v4.19.13 snapshot.
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
new file mode 100644
index 0000000..a863b04
--- /dev/null
+++ b/drivers/s390/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the S/390 specific device drivers
+#
+
+obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/
+
+drivers-y += drivers/s390/built-in.a
+
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
new file mode 100644
index 0000000..9ac7574
--- /dev/null
+++ b/drivers/s390/block/Kconfig
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0
+comment "S/390 block device drivers"
+	depends on S390 && BLOCK
+
+config BLK_DEV_XPRAM
+	def_tristate m
+	prompt "XPRAM disk support"
+	depends on S390 && BLOCK
+	help
+	  Select this option if you want to use your expanded storage on S/390
+	  or zSeries as a disk.  This is useful as a _fast_ swap device if you
+	  want to access more than 2G of memory when running in 31 bit mode.
+	  This option is also available as a module which will be called
+	  xpram.  If unsure, say "N".
+
+config DCSSBLK
+	def_tristate m
+	select FS_DAX_LIMITED
+	select DAX_DRIVER
+	prompt "DCSSBLK support"
+	depends on S390 && BLOCK
+	help
+	  Support for dcss block device
+
+config DASD
+	def_tristate y
+	prompt "Support for DASD devices"
+	depends on CCW && BLOCK
+	select IOSCHED_DEADLINE
+	help
+	  Enable this option if you want to access DASDs directly utilizing
+	  S/390s channel subsystem commands. This is necessary for running
+	  natively on a single image or an LPAR.
+
+config DASD_PROFILE
+	def_bool y
+	prompt "Profiling support for dasd devices"
+	depends on DASD
+	help
+	  Enable this option if you want to see profiling information
+          in /proc/dasd/statistics.
+
+config DASD_ECKD
+	def_tristate y
+	prompt "Support for ECKD Disks"
+	depends on DASD
+	help
+	  ECKD devices are the most commonly used devices. You should enable
+	  this option unless you are very sure to have no ECKD device.
+
+config DASD_FBA
+	def_tristate y
+	prompt "Support for FBA  Disks"
+	depends on DASD
+	help
+	  Select this option to be able to access FBA devices. It is safe to
+	  say "Y".
+
+config DASD_DIAG
+	def_tristate y
+	prompt "Support for DIAG access to Disks"
+	depends on DASD
+	help
+	  Select this option if you want to use Diagnose250 command to access
+	  Disks under VM.  If you are not running under VM or unsure what it is,
+	  say "N".
+
+config DASD_EER
+	def_bool y
+	prompt "Extended error reporting (EER)"
+	depends on DASD
+	help
+	  This driver provides a character device interface to the
+	  DASD extended error reporting. This is only needed if you want to
+	  use applications written for the EER facility.
+
+config SCM_BLOCK
+	def_tristate m
+	prompt "Support for Storage Class Memory"
+	depends on S390 && BLOCK && EADM_SCH && SCM_BUS
+	help
+	  Block device driver for Storage Class Memory (SCM). This driver
+	  provides a block device interface for each available SCM increment.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called scm_block.
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
new file mode 100644
index 0000000..60c85cf
--- /dev/null
+++ b/drivers/s390/block/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# S/390 block devices
+#
+
+dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_alias.o
+dasd_fba_mod-objs  := dasd_fba.o
+dasd_diag_mod-objs := dasd_diag.o
+dasd_mod-objs      := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \
+			dasd_genhd.o dasd_erp.o
+ifdef CONFIG_DASD_EER
+dasd_mod-objs      += dasd_eer.o
+endif
+
+obj-$(CONFIG_DASD) += dasd_mod.o
+obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
+obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
+obj-$(CONFIG_DASD_FBA)  += dasd_fba_mod.o
+obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
+obj-$(CONFIG_DCSSBLK) += dcssblk.o
+
+scm_block-objs := scm_drv.o scm_blk.o
+obj-$(CONFIG_SCM_BLOCK) += scm_block.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
new file mode 100644
index 0000000..a23e7d3
--- /dev/null
+++ b/drivers/s390/block/dasd.c
@@ -0,0 +1,4143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ *		    Carsten Otte <Cotte@de.ibm.com>
+ *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ */
+
+#define KMSG_COMPONENT "dasd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
+#include <linux/major.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h>
+#include <linux/async.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+
+#include <asm/ccwdev.h>
+#include <asm/ebcdic.h>
+#include <asm/idals.h>
+#include <asm/itcw.h>
+#include <asm/diag.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd:"
+
+#include "dasd_int.h"
+/*
+ * SECTION: Constant definitions to be used within this file
+ */
+#define DASD_CHANQ_MAX_SIZE 4
+
+#define DASD_DIAG_MOD		"dasd_diag_mod"
+
+static unsigned int queue_depth = 32;
+static unsigned int nr_hw_queues = 4;
+
+module_param(queue_depth, uint, 0444);
+MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
+
+module_param(nr_hw_queues, uint, 0444);
+MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
+
+/*
+ * SECTION: exported variables of dasd.c
+ */
+debug_info_t *dasd_debug_area;
+EXPORT_SYMBOL(dasd_debug_area);
+static struct dentry *dasd_debugfs_root_entry;
+struct dasd_discipline *dasd_diag_discipline_pointer;
+EXPORT_SYMBOL(dasd_diag_discipline_pointer);
+void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
+
+MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
+MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
+		   " Copyright IBM Corp. 2000");
+MODULE_SUPPORTED_DEVICE("dasd");
+MODULE_LICENSE("GPL");
+
+/*
+ * SECTION: prototypes for static functions of dasd.c
+ */
+static int  dasd_alloc_queue(struct dasd_block *);
+static void dasd_setup_queue(struct dasd_block *);
+static void dasd_free_queue(struct dasd_block *);
+static int dasd_flush_block_queue(struct dasd_block *);
+static void dasd_device_tasklet(unsigned long);
+static void dasd_block_tasklet(unsigned long);
+static void do_kick_device(struct work_struct *);
+static void do_restore_device(struct work_struct *);
+static void do_reload_device(struct work_struct *);
+static void do_requeue_requests(struct work_struct *);
+static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
+static void dasd_device_timeout(struct timer_list *);
+static void dasd_block_timeout(struct timer_list *);
+static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
+static void dasd_profile_init(struct dasd_profile *, struct dentry *);
+static void dasd_profile_exit(struct dasd_profile *);
+static void dasd_hosts_init(struct dentry *, struct dasd_device *);
+static void dasd_hosts_exit(struct dasd_device *);
+
+/*
+ * SECTION: Operations on the device structure.
+ */
+static wait_queue_head_t dasd_init_waitq;
+static wait_queue_head_t dasd_flush_wq;
+static wait_queue_head_t generic_waitq;
+static wait_queue_head_t shutdown_waitq;
+
+/*
+ * Allocate memory for a new device structure.
+ */
+struct dasd_device *dasd_alloc_device(void)
+{
+	struct dasd_device *device;
+
+	device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
+	if (!device)
+		return ERR_PTR(-ENOMEM);
+
+	/* Get two pages for normal block device operations. */
+	device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
+	if (!device->ccw_mem) {
+		kfree(device);
+		return ERR_PTR(-ENOMEM);
+	}
+	/* Get one page for error recovery. */
+	device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
+	if (!device->erp_mem) {
+		free_pages((unsigned long) device->ccw_mem, 1);
+		kfree(device);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
+	dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
+	spin_lock_init(&device->mem_lock);
+	atomic_set(&device->tasklet_scheduled, 0);
+	tasklet_init(&device->tasklet, dasd_device_tasklet,
+		     (unsigned long) device);
+	INIT_LIST_HEAD(&device->ccw_queue);
+	timer_setup(&device->timer, dasd_device_timeout, 0);
+	INIT_WORK(&device->kick_work, do_kick_device);
+	INIT_WORK(&device->restore_device, do_restore_device);
+	INIT_WORK(&device->reload_device, do_reload_device);
+	INIT_WORK(&device->requeue_requests, do_requeue_requests);
+	device->state = DASD_STATE_NEW;
+	device->target = DASD_STATE_NEW;
+	mutex_init(&device->state_mutex);
+	spin_lock_init(&device->profile.lock);
+	return device;
+}
+
+/*
+ * Free memory of a device structure.
+ */
+void dasd_free_device(struct dasd_device *device)
+{
+	kfree(device->private);
+	free_page((unsigned long) device->erp_mem);
+	free_pages((unsigned long) device->ccw_mem, 1);
+	kfree(device);
+}
+
+/*
+ * Allocate memory for a new device structure.
+ */
+struct dasd_block *dasd_alloc_block(void)
+{
+	struct dasd_block *block;
+
+	block = kzalloc(sizeof(*block), GFP_ATOMIC);
+	if (!block)
+		return ERR_PTR(-ENOMEM);
+	/* open_count = 0 means device online but not in use */
+	atomic_set(&block->open_count, -1);
+
+	atomic_set(&block->tasklet_scheduled, 0);
+	tasklet_init(&block->tasklet, dasd_block_tasklet,
+		     (unsigned long) block);
+	INIT_LIST_HEAD(&block->ccw_queue);
+	spin_lock_init(&block->queue_lock);
+	timer_setup(&block->timer, dasd_block_timeout, 0);
+	spin_lock_init(&block->profile.lock);
+
+	return block;
+}
+EXPORT_SYMBOL_GPL(dasd_alloc_block);
+
+/*
+ * Free memory of a device structure.
+ */
+void dasd_free_block(struct dasd_block *block)
+{
+	kfree(block);
+}
+EXPORT_SYMBOL_GPL(dasd_free_block);
+
+/*
+ * Make a new device known to the system.
+ */
+static int dasd_state_new_to_known(struct dasd_device *device)
+{
+	int rc;
+
+	/*
+	 * As long as the device is not in state DASD_STATE_NEW we want to
+	 * keep the reference count > 0.
+	 */
+	dasd_get_device(device);
+
+	if (device->block) {
+		rc = dasd_alloc_queue(device->block);
+		if (rc) {
+			dasd_put_device(device);
+			return rc;
+		}
+	}
+	device->state = DASD_STATE_KNOWN;
+	return 0;
+}
+
+/*
+ * Let the system forget about a device.
+ */
+static int dasd_state_known_to_new(struct dasd_device *device)
+{
+	/* Disable extended error reporting for this device. */
+	dasd_eer_disable(device);
+	device->state = DASD_STATE_NEW;
+
+	if (device->block)
+		dasd_free_queue(device->block);
+
+	/* Give up reference we took in dasd_state_new_to_known. */
+	dasd_put_device(device);
+	return 0;
+}
+
+static struct dentry *dasd_debugfs_setup(const char *name,
+					 struct dentry *base_dentry)
+{
+	struct dentry *pde;
+
+	if (!base_dentry)
+		return NULL;
+	pde = debugfs_create_dir(name, base_dentry);
+	if (!pde || IS_ERR(pde))
+		return NULL;
+	return pde;
+}
+
+/*
+ * Request the irq line for the device.
+ */
+static int dasd_state_known_to_basic(struct dasd_device *device)
+{
+	struct dasd_block *block = device->block;
+	int rc = 0;
+
+	/* Allocate and register gendisk structure. */
+	if (block) {
+		rc = dasd_gendisk_alloc(block);
+		if (rc)
+			return rc;
+		block->debugfs_dentry =
+			dasd_debugfs_setup(block->gdp->disk_name,
+					   dasd_debugfs_root_entry);
+		dasd_profile_init(&block->profile, block->debugfs_dentry);
+		if (dasd_global_profile_level == DASD_PROFILE_ON)
+			dasd_profile_on(&device->block->profile);
+	}
+	device->debugfs_dentry =
+		dasd_debugfs_setup(dev_name(&device->cdev->dev),
+				   dasd_debugfs_root_entry);
+	dasd_profile_init(&device->profile, device->debugfs_dentry);
+	dasd_hosts_init(device->debugfs_dentry, device);
+
+	/* register 'device' debug area, used for all DBF_DEV_XXX calls */
+	device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
+					    8 * sizeof(long));
+	debug_register_view(device->debug_area, &debug_sprintf_view);
+	debug_set_level(device->debug_area, DBF_WARNING);
+	DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
+
+	device->state = DASD_STATE_BASIC;
+
+	return rc;
+}
+
+/*
+ * Release the irq line for the device. Terminate any running i/o.
+ */
+static int dasd_state_basic_to_known(struct dasd_device *device)
+{
+	int rc;
+
+	if (device->discipline->basic_to_known) {
+		rc = device->discipline->basic_to_known(device);
+		if (rc)
+			return rc;
+	}
+
+	if (device->block) {
+		dasd_profile_exit(&device->block->profile);
+		debugfs_remove(device->block->debugfs_dentry);
+		dasd_gendisk_free(device->block);
+		dasd_block_clear_timer(device->block);
+	}
+	rc = dasd_flush_device_queue(device);
+	if (rc)
+		return rc;
+	dasd_device_clear_timer(device);
+	dasd_profile_exit(&device->profile);
+	dasd_hosts_exit(device);
+	debugfs_remove(device->debugfs_dentry);
+	DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
+	if (device->debug_area != NULL) {
+		debug_unregister(device->debug_area);
+		device->debug_area = NULL;
+	}
+	device->state = DASD_STATE_KNOWN;
+	return 0;
+}
+
+/*
+ * Do the initial analysis. The do_analysis function may return
+ * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
+ * until the discipline decides to continue the startup sequence
+ * by calling the function dasd_change_state. The eckd disciplines
+ * uses this to start a ccw that detects the format. The completion
+ * interrupt for this detection ccw uses the kernel event daemon to
+ * trigger the call to dasd_change_state. All this is done in the
+ * discipline code, see dasd_eckd.c.
+ * After the analysis ccw is done (do_analysis returned 0) the block
+ * device is setup.
+ * In case the analysis returns an error, the device setup is stopped
+ * (a fake disk was already added to allow formatting).
+ */
+static int dasd_state_basic_to_ready(struct dasd_device *device)
+{
+	int rc;
+	struct dasd_block *block;
+	struct gendisk *disk;
+
+	rc = 0;
+	block = device->block;
+	/* make disk known with correct capacity */
+	if (block) {
+		if (block->base->discipline->do_analysis != NULL)
+			rc = block->base->discipline->do_analysis(block);
+		if (rc) {
+			if (rc != -EAGAIN) {
+				device->state = DASD_STATE_UNFMT;
+				disk = device->block->gdp;
+				kobject_uevent(&disk_to_dev(disk)->kobj,
+					       KOBJ_CHANGE);
+				goto out;
+			}
+			return rc;
+		}
+		dasd_setup_queue(block);
+		set_capacity(block->gdp,
+			     block->blocks << block->s2b_shift);
+		device->state = DASD_STATE_READY;
+		rc = dasd_scan_partitions(block);
+		if (rc) {
+			device->state = DASD_STATE_BASIC;
+			return rc;
+		}
+	} else {
+		device->state = DASD_STATE_READY;
+	}
+out:
+	if (device->discipline->basic_to_ready)
+		rc = device->discipline->basic_to_ready(device);
+	return rc;
+}
+
+static inline
+int _wait_for_empty_queues(struct dasd_device *device)
+{
+	if (device->block)
+		return list_empty(&device->ccw_queue) &&
+			list_empty(&device->block->ccw_queue);
+	else
+		return list_empty(&device->ccw_queue);
+}
+
+/*
+ * Remove device from block device layer. Destroy dirty buffers.
+ * Forget format information. Check if the target level is basic
+ * and if it is create fake disk for formatting.
+ */
+static int dasd_state_ready_to_basic(struct dasd_device *device)
+{
+	int rc;
+
+	device->state = DASD_STATE_BASIC;
+	if (device->block) {
+		struct dasd_block *block = device->block;
+		rc = dasd_flush_block_queue(block);
+		if (rc) {
+			device->state = DASD_STATE_READY;
+			return rc;
+		}
+		dasd_destroy_partitions(block);
+		block->blocks = 0;
+		block->bp_block = 0;
+		block->s2b_shift = 0;
+	}
+	return 0;
+}
+
+/*
+ * Back to basic.
+ */
+static int dasd_state_unfmt_to_basic(struct dasd_device *device)
+{
+	device->state = DASD_STATE_BASIC;
+	return 0;
+}
+
+/*
+ * Make the device online and schedule the bottom half to start
+ * the requeueing of requests from the linux request queue to the
+ * ccw queue.
+ */
+static int
+dasd_state_ready_to_online(struct dasd_device * device)
+{
+	struct gendisk *disk;
+	struct disk_part_iter piter;
+	struct hd_struct *part;
+
+	device->state = DASD_STATE_ONLINE;
+	if (device->block) {
+		dasd_schedule_block_bh(device->block);
+		if ((device->features & DASD_FEATURE_USERAW)) {
+			disk = device->block->gdp;
+			kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
+			return 0;
+		}
+		disk = device->block->bdev->bd_disk;
+		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+		while ((part = disk_part_iter_next(&piter)))
+			kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
+		disk_part_iter_exit(&piter);
+	}
+	return 0;
+}
+
+/*
+ * Stop the requeueing of requests again.
+ */
+static int dasd_state_online_to_ready(struct dasd_device *device)
+{
+	int rc;
+	struct gendisk *disk;
+	struct disk_part_iter piter;
+	struct hd_struct *part;
+
+	if (device->discipline->online_to_ready) {
+		rc = device->discipline->online_to_ready(device);
+		if (rc)
+			return rc;
+	}
+
+	device->state = DASD_STATE_READY;
+	if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
+		disk = device->block->bdev->bd_disk;
+		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+		while ((part = disk_part_iter_next(&piter)))
+			kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
+		disk_part_iter_exit(&piter);
+	}
+	return 0;
+}
+
+/*
+ * Device startup state changes.
+ */
+static int dasd_increase_state(struct dasd_device *device)
+{
+	int rc;
+
+	rc = 0;
+	if (device->state == DASD_STATE_NEW &&
+	    device->target >= DASD_STATE_KNOWN)
+		rc = dasd_state_new_to_known(device);
+
+	if (!rc &&
+	    device->state == DASD_STATE_KNOWN &&
+	    device->target >= DASD_STATE_BASIC)
+		rc = dasd_state_known_to_basic(device);
+
+	if (!rc &&
+	    device->state == DASD_STATE_BASIC &&
+	    device->target >= DASD_STATE_READY)
+		rc = dasd_state_basic_to_ready(device);
+
+	if (!rc &&
+	    device->state == DASD_STATE_UNFMT &&
+	    device->target > DASD_STATE_UNFMT)
+		rc = -EPERM;
+
+	if (!rc &&
+	    device->state == DASD_STATE_READY &&
+	    device->target >= DASD_STATE_ONLINE)
+		rc = dasd_state_ready_to_online(device);
+
+	return rc;
+}
+
+/*
+ * Device shutdown state changes.
+ */
+static int dasd_decrease_state(struct dasd_device *device)
+{
+	int rc;
+
+	rc = 0;
+	if (device->state == DASD_STATE_ONLINE &&
+	    device->target <= DASD_STATE_READY)
+		rc = dasd_state_online_to_ready(device);
+
+	if (!rc &&
+	    device->state == DASD_STATE_READY &&
+	    device->target <= DASD_STATE_BASIC)
+		rc = dasd_state_ready_to_basic(device);
+
+	if (!rc &&
+	    device->state == DASD_STATE_UNFMT &&
+	    device->target <= DASD_STATE_BASIC)
+		rc = dasd_state_unfmt_to_basic(device);
+
+	if (!rc &&
+	    device->state == DASD_STATE_BASIC &&
+	    device->target <= DASD_STATE_KNOWN)
+		rc = dasd_state_basic_to_known(device);
+
+	if (!rc &&
+	    device->state == DASD_STATE_KNOWN &&
+	    device->target <= DASD_STATE_NEW)
+		rc = dasd_state_known_to_new(device);
+
+	return rc;
+}
+
+/*
+ * This is the main startup/shutdown routine.
+ */
+static void dasd_change_state(struct dasd_device *device)
+{
+	int rc;
+
+	if (device->state == device->target)
+		/* Already where we want to go today... */
+		return;
+	if (device->state < device->target)
+		rc = dasd_increase_state(device);
+	else
+		rc = dasd_decrease_state(device);
+	if (rc == -EAGAIN)
+		return;
+	if (rc)
+		device->target = device->state;
+
+	/* let user-space know that the device status changed */
+	kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
+
+	if (device->state == device->target)
+		wake_up(&dasd_init_waitq);
+}
+
+/*
+ * Kick starter for devices that did not complete the startup/shutdown
+ * procedure or were sleeping because of a pending state.
+ * dasd_kick_device will schedule a call do do_kick_device to the kernel
+ * event daemon.
+ */
+static void do_kick_device(struct work_struct *work)
+{
+	struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
+	mutex_lock(&device->state_mutex);
+	dasd_change_state(device);
+	mutex_unlock(&device->state_mutex);
+	dasd_schedule_device_bh(device);
+	dasd_put_device(device);
+}
+
+void dasd_kick_device(struct dasd_device *device)
+{
+	dasd_get_device(device);
+	/* queue call to dasd_kick_device to the kernel event daemon. */
+	if (!schedule_work(&device->kick_work))
+		dasd_put_device(device);
+}
+EXPORT_SYMBOL(dasd_kick_device);
+
+/*
+ * dasd_reload_device will schedule a call do do_reload_device to the kernel
+ * event daemon.
+ */
+static void do_reload_device(struct work_struct *work)
+{
+	struct dasd_device *device = container_of(work, struct dasd_device,
+						  reload_device);
+	device->discipline->reload(device);
+	dasd_put_device(device);
+}
+
+void dasd_reload_device(struct dasd_device *device)
+{
+	dasd_get_device(device);
+	/* queue call to dasd_reload_device to the kernel event daemon. */
+	if (!schedule_work(&device->reload_device))
+		dasd_put_device(device);
+}
+EXPORT_SYMBOL(dasd_reload_device);
+
+/*
+ * dasd_restore_device will schedule a call do do_restore_device to the kernel
+ * event daemon.
+ */
+static void do_restore_device(struct work_struct *work)
+{
+	struct dasd_device *device = container_of(work, struct dasd_device,
+						  restore_device);
+	device->cdev->drv->restore(device->cdev);
+	dasd_put_device(device);
+}
+
+void dasd_restore_device(struct dasd_device *device)
+{
+	dasd_get_device(device);
+	/* queue call to dasd_restore_device to the kernel event daemon. */
+	if (!schedule_work(&device->restore_device))
+		dasd_put_device(device);
+}
+
+/*
+ * Set the target state for a device and starts the state change.
+ */
+void dasd_set_target_state(struct dasd_device *device, int target)
+{
+	dasd_get_device(device);
+	mutex_lock(&device->state_mutex);
+	/* If we are in probeonly mode stop at DASD_STATE_READY. */
+	if (dasd_probeonly && target > DASD_STATE_READY)
+		target = DASD_STATE_READY;
+	if (device->target != target) {
+		if (device->state == target)
+			wake_up(&dasd_init_waitq);
+		device->target = target;
+	}
+	if (device->state != device->target)
+		dasd_change_state(device);
+	mutex_unlock(&device->state_mutex);
+	dasd_put_device(device);
+}
+EXPORT_SYMBOL(dasd_set_target_state);
+
+/*
+ * Enable devices with device numbers in [from..to].
+ */
+static inline int _wait_for_device(struct dasd_device *device)
+{
+	return (device->state == device->target);
+}
+
+void dasd_enable_device(struct dasd_device *device)
+{
+	dasd_set_target_state(device, DASD_STATE_ONLINE);
+	if (device->state <= DASD_STATE_KNOWN)
+		/* No discipline for device found. */
+		dasd_set_target_state(device, DASD_STATE_NEW);
+	/* Now wait for the devices to come up. */
+	wait_event(dasd_init_waitq, _wait_for_device(device));
+
+	dasd_reload_device(device);
+	if (device->discipline->kick_validate)
+		device->discipline->kick_validate(device);
+}
+EXPORT_SYMBOL(dasd_enable_device);
+
+/*
+ * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
+ */
+
+unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
+
+#ifdef CONFIG_DASD_PROFILE
+struct dasd_profile dasd_global_profile = {
+	.lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
+};
+static struct dentry *dasd_debugfs_global_entry;
+
+/*
+ * Add profiling information for cqr before execution.
+ */
+static void dasd_profile_start(struct dasd_block *block,
+			       struct dasd_ccw_req *cqr,
+			       struct request *req)
+{
+	struct list_head *l;
+	unsigned int counter;
+	struct dasd_device *device;
+
+	/* count the length of the chanq for statistics */
+	counter = 0;
+	if (dasd_global_profile_level || block->profile.data)
+		list_for_each(l, &block->ccw_queue)
+			if (++counter >= 31)
+				break;
+
+	spin_lock(&dasd_global_profile.lock);
+	if (dasd_global_profile.data) {
+		dasd_global_profile.data->dasd_io_nr_req[counter]++;
+		if (rq_data_dir(req) == READ)
+			dasd_global_profile.data->dasd_read_nr_req[counter]++;
+	}
+	spin_unlock(&dasd_global_profile.lock);
+
+	spin_lock(&block->profile.lock);
+	if (block->profile.data) {
+		block->profile.data->dasd_io_nr_req[counter]++;
+		if (rq_data_dir(req) == READ)
+			block->profile.data->dasd_read_nr_req[counter]++;
+	}
+	spin_unlock(&block->profile.lock);
+
+	/*
+	 * We count the request for the start device, even though it may run on
+	 * some other device due to error recovery. This way we make sure that
+	 * we count each request only once.
+	 */
+	device = cqr->startdev;
+	if (device->profile.data) {
+		counter = 1; /* request is not yet queued on the start device */
+		list_for_each(l, &device->ccw_queue)
+			if (++counter >= 31)
+				break;
+	}
+	spin_lock(&device->profile.lock);
+	if (device->profile.data) {
+		device->profile.data->dasd_io_nr_req[counter]++;
+		if (rq_data_dir(req) == READ)
+			device->profile.data->dasd_read_nr_req[counter]++;
+	}
+	spin_unlock(&device->profile.lock);
+}
+
+/*
+ * Add profiling information for cqr after execution.
+ */
+
+#define dasd_profile_counter(value, index)			   \
+{								   \
+	for (index = 0; index < 31 && value >> (2+index); index++) \
+		;						   \
+}
+
+static void dasd_profile_end_add_data(struct dasd_profile_info *data,
+				      int is_alias,
+				      int is_tpm,
+				      int is_read,
+				      long sectors,
+				      int sectors_ind,
+				      int tottime_ind,
+				      int tottimeps_ind,
+				      int strtime_ind,
+				      int irqtime_ind,
+				      int irqtimeps_ind,
+				      int endtime_ind)
+{
+	/* in case of an overflow, reset the whole profile */
+	if (data->dasd_io_reqs == UINT_MAX) {
+			memset(data, 0, sizeof(*data));
+			ktime_get_real_ts64(&data->starttod);
+	}
+	data->dasd_io_reqs++;
+	data->dasd_io_sects += sectors;
+	if (is_alias)
+		data->dasd_io_alias++;
+	if (is_tpm)
+		data->dasd_io_tpm++;
+
+	data->dasd_io_secs[sectors_ind]++;
+	data->dasd_io_times[tottime_ind]++;
+	data->dasd_io_timps[tottimeps_ind]++;
+	data->dasd_io_time1[strtime_ind]++;
+	data->dasd_io_time2[irqtime_ind]++;
+	data->dasd_io_time2ps[irqtimeps_ind]++;
+	data->dasd_io_time3[endtime_ind]++;
+
+	if (is_read) {
+		data->dasd_read_reqs++;
+		data->dasd_read_sects += sectors;
+		if (is_alias)
+			data->dasd_read_alias++;
+		if (is_tpm)
+			data->dasd_read_tpm++;
+		data->dasd_read_secs[sectors_ind]++;
+		data->dasd_read_times[tottime_ind]++;
+		data->dasd_read_time1[strtime_ind]++;
+		data->dasd_read_time2[irqtime_ind]++;
+		data->dasd_read_time3[endtime_ind]++;
+	}
+}
+
+static void dasd_profile_end(struct dasd_block *block,
+			     struct dasd_ccw_req *cqr,
+			     struct request *req)
+{
+	unsigned long strtime, irqtime, endtime, tottime;
+	unsigned long tottimeps, sectors;
+	struct dasd_device *device;
+	int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
+	int irqtime_ind, irqtimeps_ind, endtime_ind;
+	struct dasd_profile_info *data;
+
+	device = cqr->startdev;
+	if (!(dasd_global_profile_level ||
+	      block->profile.data ||
+	      device->profile.data))
+		return;
+
+	sectors = blk_rq_sectors(req);
+	if (!cqr->buildclk || !cqr->startclk ||
+	    !cqr->stopclk || !cqr->endclk ||
+	    !sectors)
+		return;
+
+	strtime = ((cqr->startclk - cqr->buildclk) >> 12);
+	irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
+	endtime = ((cqr->endclk - cqr->stopclk) >> 12);
+	tottime = ((cqr->endclk - cqr->buildclk) >> 12);
+	tottimeps = tottime / sectors;
+
+	dasd_profile_counter(sectors, sectors_ind);
+	dasd_profile_counter(tottime, tottime_ind);
+	dasd_profile_counter(tottimeps, tottimeps_ind);
+	dasd_profile_counter(strtime, strtime_ind);
+	dasd_profile_counter(irqtime, irqtime_ind);
+	dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
+	dasd_profile_counter(endtime, endtime_ind);
+
+	spin_lock(&dasd_global_profile.lock);
+	if (dasd_global_profile.data) {
+		data = dasd_global_profile.data;
+		data->dasd_sum_times += tottime;
+		data->dasd_sum_time_str += strtime;
+		data->dasd_sum_time_irq += irqtime;
+		data->dasd_sum_time_end += endtime;
+		dasd_profile_end_add_data(dasd_global_profile.data,
+					  cqr->startdev != block->base,
+					  cqr->cpmode == 1,
+					  rq_data_dir(req) == READ,
+					  sectors, sectors_ind, tottime_ind,
+					  tottimeps_ind, strtime_ind,
+					  irqtime_ind, irqtimeps_ind,
+					  endtime_ind);
+	}
+	spin_unlock(&dasd_global_profile.lock);
+
+	spin_lock(&block->profile.lock);
+	if (block->profile.data) {
+		data = block->profile.data;
+		data->dasd_sum_times += tottime;
+		data->dasd_sum_time_str += strtime;
+		data->dasd_sum_time_irq += irqtime;
+		data->dasd_sum_time_end += endtime;
+		dasd_profile_end_add_data(block->profile.data,
+					  cqr->startdev != block->base,
+					  cqr->cpmode == 1,
+					  rq_data_dir(req) == READ,
+					  sectors, sectors_ind, tottime_ind,
+					  tottimeps_ind, strtime_ind,
+					  irqtime_ind, irqtimeps_ind,
+					  endtime_ind);
+	}
+	spin_unlock(&block->profile.lock);
+
+	spin_lock(&device->profile.lock);
+	if (device->profile.data) {
+		data = device->profile.data;
+		data->dasd_sum_times += tottime;
+		data->dasd_sum_time_str += strtime;
+		data->dasd_sum_time_irq += irqtime;
+		data->dasd_sum_time_end += endtime;
+		dasd_profile_end_add_data(device->profile.data,
+					  cqr->startdev != block->base,
+					  cqr->cpmode == 1,
+					  rq_data_dir(req) == READ,
+					  sectors, sectors_ind, tottime_ind,
+					  tottimeps_ind, strtime_ind,
+					  irqtime_ind, irqtimeps_ind,
+					  endtime_ind);
+	}
+	spin_unlock(&device->profile.lock);
+}
+
+void dasd_profile_reset(struct dasd_profile *profile)
+{
+	struct dasd_profile_info *data;
+
+	spin_lock_bh(&profile->lock);
+	data = profile->data;
+	if (!data) {
+		spin_unlock_bh(&profile->lock);
+		return;
+	}
+	memset(data, 0, sizeof(*data));
+	ktime_get_real_ts64(&data->starttod);
+	spin_unlock_bh(&profile->lock);
+}
+
+int dasd_profile_on(struct dasd_profile *profile)
+{
+	struct dasd_profile_info *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+	spin_lock_bh(&profile->lock);
+	if (profile->data) {
+		spin_unlock_bh(&profile->lock);
+		kfree(data);
+		return 0;
+	}
+	ktime_get_real_ts64(&data->starttod);
+	profile->data = data;
+	spin_unlock_bh(&profile->lock);
+	return 0;
+}
+
+void dasd_profile_off(struct dasd_profile *profile)
+{
+	spin_lock_bh(&profile->lock);
+	kfree(profile->data);
+	profile->data = NULL;
+	spin_unlock_bh(&profile->lock);
+}
+
+char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
+{
+	char *buffer;
+
+	buffer = vmalloc(user_len + 1);
+	if (buffer == NULL)
+		return ERR_PTR(-ENOMEM);
+	if (copy_from_user(buffer, user_buf, user_len) != 0) {
+		vfree(buffer);
+		return ERR_PTR(-EFAULT);
+	}
+	/* got the string, now strip linefeed. */
+	if (buffer[user_len - 1] == '\n')
+		buffer[user_len - 1] = 0;
+	else
+		buffer[user_len] = 0;
+	return buffer;
+}
+
+static ssize_t dasd_stats_write(struct file *file,
+				const char __user *user_buf,
+				size_t user_len, loff_t *pos)
+{
+	char *buffer, *str;
+	int rc;
+	struct seq_file *m = (struct seq_file *)file->private_data;
+	struct dasd_profile *prof = m->private;
+
+	if (user_len > 65536)
+		user_len = 65536;
+	buffer = dasd_get_user_string(user_buf, user_len);
+	if (IS_ERR(buffer))
+		return PTR_ERR(buffer);
+
+	str = skip_spaces(buffer);
+	rc = user_len;
+	if (strncmp(str, "reset", 5) == 0) {
+		dasd_profile_reset(prof);
+	} else if (strncmp(str, "on", 2) == 0) {
+		rc = dasd_profile_on(prof);
+		if (rc)
+			goto out;
+		rc = user_len;
+		if (prof == &dasd_global_profile) {
+			dasd_profile_reset(prof);
+			dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
+		}
+	} else if (strncmp(str, "off", 3) == 0) {
+		if (prof == &dasd_global_profile)
+			dasd_global_profile_level = DASD_PROFILE_OFF;
+		dasd_profile_off(prof);
+	} else
+		rc = -EINVAL;
+out:
+	vfree(buffer);
+	return rc;
+}
+
+static void dasd_stats_array(struct seq_file *m, unsigned int *array)
+{
+	int i;
+
+	for (i = 0; i < 32; i++)
+		seq_printf(m, "%u ", array[i]);
+	seq_putc(m, '\n');
+}
+
+static void dasd_stats_seq_print(struct seq_file *m,
+				 struct dasd_profile_info *data)
+{
+	seq_printf(m, "start_time %lld.%09ld\n",
+		   (s64)data->starttod.tv_sec, data->starttod.tv_nsec);
+	seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
+	seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
+	seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
+	seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
+	seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ?
+		   data->dasd_sum_times / data->dasd_io_reqs : 0UL);
+	seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ?
+		   data->dasd_sum_time_str / data->dasd_io_reqs : 0UL);
+	seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ?
+		   data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL);
+	seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ?
+		   data->dasd_sum_time_end / data->dasd_io_reqs : 0UL);
+	seq_puts(m, "histogram_sectors ");
+	dasd_stats_array(m, data->dasd_io_secs);
+	seq_puts(m, "histogram_io_times ");
+	dasd_stats_array(m, data->dasd_io_times);
+	seq_puts(m, "histogram_io_times_weighted ");
+	dasd_stats_array(m, data->dasd_io_timps);
+	seq_puts(m, "histogram_time_build_to_ssch ");
+	dasd_stats_array(m, data->dasd_io_time1);
+	seq_puts(m, "histogram_time_ssch_to_irq ");
+	dasd_stats_array(m, data->dasd_io_time2);
+	seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
+	dasd_stats_array(m, data->dasd_io_time2ps);
+	seq_puts(m, "histogram_time_irq_to_end ");
+	dasd_stats_array(m, data->dasd_io_time3);
+	seq_puts(m, "histogram_ccw_queue_length ");
+	dasd_stats_array(m, data->dasd_io_nr_req);
+	seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
+	seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
+	seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
+	seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
+	seq_puts(m, "histogram_read_sectors ");
+	dasd_stats_array(m, data->dasd_read_secs);
+	seq_puts(m, "histogram_read_times ");
+	dasd_stats_array(m, data->dasd_read_times);
+	seq_puts(m, "histogram_read_time_build_to_ssch ");
+	dasd_stats_array(m, data->dasd_read_time1);
+	seq_puts(m, "histogram_read_time_ssch_to_irq ");
+	dasd_stats_array(m, data->dasd_read_time2);
+	seq_puts(m, "histogram_read_time_irq_to_end ");
+	dasd_stats_array(m, data->dasd_read_time3);
+	seq_puts(m, "histogram_read_ccw_queue_length ");
+	dasd_stats_array(m, data->dasd_read_nr_req);
+}
+
+static int dasd_stats_show(struct seq_file *m, void *v)
+{
+	struct dasd_profile *profile;
+	struct dasd_profile_info *data;
+
+	profile = m->private;
+	spin_lock_bh(&profile->lock);
+	data = profile->data;
+	if (!data) {
+		spin_unlock_bh(&profile->lock);
+		seq_puts(m, "disabled\n");
+		return 0;
+	}
+	dasd_stats_seq_print(m, data);
+	spin_unlock_bh(&profile->lock);
+	return 0;
+}
+
+static int dasd_stats_open(struct inode *inode, struct file *file)
+{
+	struct dasd_profile *profile = inode->i_private;
+	return single_open(file, dasd_stats_show, profile);
+}
+
+static const struct file_operations dasd_stats_raw_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dasd_stats_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+	.write		= dasd_stats_write,
+};
+
+static void dasd_profile_init(struct dasd_profile *profile,
+			      struct dentry *base_dentry)
+{
+	umode_t mode;
+	struct dentry *pde;
+
+	if (!base_dentry)
+		return;
+	profile->dentry = NULL;
+	profile->data = NULL;
+	mode = (S_IRUSR | S_IWUSR | S_IFREG);
+	pde = debugfs_create_file("statistics", mode, base_dentry,
+				  profile, &dasd_stats_raw_fops);
+	if (pde && !IS_ERR(pde))
+		profile->dentry = pde;
+	return;
+}
+
+static void dasd_profile_exit(struct dasd_profile *profile)
+{
+	dasd_profile_off(profile);
+	debugfs_remove(profile->dentry);
+	profile->dentry = NULL;
+}
+
+static void dasd_statistics_removeroot(void)
+{
+	dasd_global_profile_level = DASD_PROFILE_OFF;
+	dasd_profile_exit(&dasd_global_profile);
+	debugfs_remove(dasd_debugfs_global_entry);
+	debugfs_remove(dasd_debugfs_root_entry);
+}
+
+static void dasd_statistics_createroot(void)
+{
+	struct dentry *pde;
+
+	dasd_debugfs_root_entry = NULL;
+	pde = debugfs_create_dir("dasd", NULL);
+	if (!pde || IS_ERR(pde))
+		goto error;
+	dasd_debugfs_root_entry = pde;
+	pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
+	if (!pde || IS_ERR(pde))
+		goto error;
+	dasd_debugfs_global_entry = pde;
+	dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
+	return;
+
+error:
+	DBF_EVENT(DBF_ERR, "%s",
+		  "Creation of the dasd debugfs interface failed");
+	dasd_statistics_removeroot();
+	return;
+}
+
+#else
+#define dasd_profile_start(block, cqr, req) do {} while (0)
+#define dasd_profile_end(block, cqr, req) do {} while (0)
+
+static void dasd_statistics_createroot(void)
+{
+	return;
+}
+
+static void dasd_statistics_removeroot(void)
+{
+	return;
+}
+
+int dasd_stats_generic_show(struct seq_file *m, void *v)
+{
+	seq_puts(m, "Statistics are not activated in this kernel\n");
+	return 0;
+}
+
+static void dasd_profile_init(struct dasd_profile *profile,
+			      struct dentry *base_dentry)
+{
+	return;
+}
+
+static void dasd_profile_exit(struct dasd_profile *profile)
+{
+	return;
+}
+
+int dasd_profile_on(struct dasd_profile *profile)
+{
+	return 0;
+}
+
+#endif				/* CONFIG_DASD_PROFILE */
+
+static int dasd_hosts_show(struct seq_file *m, void *v)
+{
+	struct dasd_device *device;
+	int rc = -EOPNOTSUPP;
+
+	device = m->private;
+	dasd_get_device(device);
+
+	if (device->discipline->hosts_print)
+		rc = device->discipline->hosts_print(device, m);
+
+	dasd_put_device(device);
+	return rc;
+}
+
+static int dasd_hosts_open(struct inode *inode, struct file *file)
+{
+	struct dasd_device *device = inode->i_private;
+
+	return single_open(file, dasd_hosts_show, device);
+}
+
+static const struct file_operations dasd_hosts_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dasd_hosts_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void dasd_hosts_exit(struct dasd_device *device)
+{
+	debugfs_remove(device->hosts_dentry);
+	device->hosts_dentry = NULL;
+}
+
+static void dasd_hosts_init(struct dentry *base_dentry,
+			    struct dasd_device *device)
+{
+	struct dentry *pde;
+	umode_t mode;
+
+	if (!base_dentry)
+		return;
+
+	mode = S_IRUSR | S_IFREG;
+	pde = debugfs_create_file("host_access_list", mode, base_dentry,
+				  device, &dasd_hosts_fops);
+	if (pde && !IS_ERR(pde))
+		device->hosts_dentry = pde;
+}
+
+struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
+					  struct dasd_device *device,
+					  struct dasd_ccw_req *cqr)
+{
+	unsigned long flags;
+	char *data, *chunk;
+	int size = 0;
+
+	if (cplength > 0)
+		size += cplength * sizeof(struct ccw1);
+	if (datasize > 0)
+		size += datasize;
+	if (!cqr)
+		size += (sizeof(*cqr) + 7L) & -8L;
+
+	spin_lock_irqsave(&device->mem_lock, flags);
+	data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
+	spin_unlock_irqrestore(&device->mem_lock, flags);
+	if (!chunk)
+		return ERR_PTR(-ENOMEM);
+	if (!cqr) {
+		cqr = (void *) data;
+		data += (sizeof(*cqr) + 7L) & -8L;
+	}
+	memset(cqr, 0, sizeof(*cqr));
+	cqr->mem_chunk = chunk;
+	if (cplength > 0) {
+		cqr->cpaddr = data;
+		data += cplength * sizeof(struct ccw1);
+		memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
+	}
+	if (datasize > 0) {
+		cqr->data = data;
+ 		memset(cqr->data, 0, datasize);
+	}
+	cqr->magic = magic;
+	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	dasd_get_device(device);
+	return cqr;
+}
+EXPORT_SYMBOL(dasd_smalloc_request);
+
+void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&device->mem_lock, flags);
+	dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
+	spin_unlock_irqrestore(&device->mem_lock, flags);
+	dasd_put_device(device);
+}
+EXPORT_SYMBOL(dasd_sfree_request);
+
+/*
+ * Check discipline magic in cqr.
+ */
+static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+
+	if (cqr == NULL)
+		return -EINVAL;
+	device = cqr->startdev;
+	if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
+		DBF_DEV_EVENT(DBF_WARNING, device,
+			    " dasd_ccw_req 0x%08x magic doesn't match"
+			    " discipline 0x%08x",
+			    cqr->magic,
+			    *(unsigned int *) device->discipline->name);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*
+ * Terminate the current i/o and set the request to clear_pending.
+ * Timer keeps device runnig.
+ * ccw_device_clear can fail if the i/o subsystem
+ * is in a bad mood.
+ */
+int dasd_term_IO(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+	int retries, rc;
+	char errorstring[ERRORLENGTH];
+
+	/* Check the cqr */
+	rc = dasd_check_cqr(cqr);
+	if (rc)
+		return rc;
+	retries = 0;
+	device = (struct dasd_device *) cqr->startdev;
+	while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
+		rc = ccw_device_clear(device->cdev, (long) cqr);
+		switch (rc) {
+		case 0:	/* termination successful */
+			cqr->status = DASD_CQR_CLEAR_PENDING;
+			cqr->stopclk = get_tod_clock();
+			cqr->starttime = 0;
+			DBF_DEV_EVENT(DBF_DEBUG, device,
+				      "terminate cqr %p successful",
+				      cqr);
+			break;
+		case -ENODEV:
+			DBF_DEV_EVENT(DBF_ERR, device, "%s",
+				      "device gone, retry");
+			break;
+		case -EINVAL:
+			/*
+			 * device not valid so no I/O could be running
+			 * handle CQR as termination successful
+			 */
+			cqr->status = DASD_CQR_CLEARED;
+			cqr->stopclk = get_tod_clock();
+			cqr->starttime = 0;
+			/* no retries for invalid devices */
+			cqr->retries = -1;
+			DBF_DEV_EVENT(DBF_ERR, device, "%s",
+				      "EINVAL, handle as terminated");
+			/* fake rc to success */
+			rc = 0;
+			break;
+		default:
+			/* internal error 10 - unknown rc*/
+			snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
+			dev_err(&device->cdev->dev, "An error occurred in the "
+				"DASD device driver, reason=%s\n", errorstring);
+			BUG();
+			break;
+		}
+		retries++;
+	}
+	dasd_schedule_device_bh(device);
+	return rc;
+}
+EXPORT_SYMBOL(dasd_term_IO);
+
+/*
+ * Start the i/o. This start_IO can fail if the channel is really busy.
+ * In that case set up a timer to start the request later.
+ */
+int dasd_start_IO(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+	int rc;
+	char errorstring[ERRORLENGTH];
+
+	/* Check the cqr */
+	rc = dasd_check_cqr(cqr);
+	if (rc) {
+		cqr->intrc = rc;
+		return rc;
+	}
+	device = (struct dasd_device *) cqr->startdev;
+	if (((cqr->block &&
+	      test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
+	     test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
+	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+		DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
+			      "because of stolen lock", cqr);
+		cqr->status = DASD_CQR_ERROR;
+		cqr->intrc = -EPERM;
+		return -EPERM;
+	}
+	if (cqr->retries < 0) {
+		/* internal error 14 - start_IO run out of retries */
+		sprintf(errorstring, "14 %p", cqr);
+		dev_err(&device->cdev->dev, "An error occurred in the DASD "
+			"device driver, reason=%s\n", errorstring);
+		cqr->status = DASD_CQR_ERROR;
+		return -EIO;
+	}
+	cqr->startclk = get_tod_clock();
+	cqr->starttime = jiffies;
+	cqr->retries--;
+	if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+		cqr->lpm &= dasd_path_get_opm(device);
+		if (!cqr->lpm)
+			cqr->lpm = dasd_path_get_opm(device);
+	}
+	if (cqr->cpmode == 1) {
+		rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
+					 (long) cqr, cqr->lpm);
+	} else {
+		rc = ccw_device_start(device->cdev, cqr->cpaddr,
+				      (long) cqr, cqr->lpm, 0);
+	}
+	switch (rc) {
+	case 0:
+		cqr->status = DASD_CQR_IN_IO;
+		break;
+	case -EBUSY:
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "start_IO: device busy, retry later");
+		break;
+	case -EACCES:
+		/* -EACCES indicates that the request used only a subset of the
+		 * available paths and all these paths are gone. If the lpm of
+		 * this request was only a subset of the opm (e.g. the ppm) then
+		 * we just do a retry with all available paths.
+		 * If we already use the full opm, something is amiss, and we
+		 * need a full path verification.
+		 */
+		if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+			DBF_DEV_EVENT(DBF_WARNING, device,
+				      "start_IO: selected paths gone (%x)",
+				      cqr->lpm);
+		} else if (cqr->lpm != dasd_path_get_opm(device)) {
+			cqr->lpm = dasd_path_get_opm(device);
+			DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
+				      "start_IO: selected paths gone,"
+				      " retry on all paths");
+		} else {
+			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+				      "start_IO: all paths in opm gone,"
+				      " do path verification");
+			dasd_generic_last_path_gone(device);
+			dasd_path_no_path(device);
+			dasd_path_set_tbvpm(device,
+					  ccw_device_get_path_mask(
+						  device->cdev));
+		}
+		break;
+	case -ENODEV:
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "start_IO: -ENODEV device gone, retry");
+		break;
+	case -EIO:
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "start_IO: -EIO device gone, retry");
+		break;
+	case -EINVAL:
+		/* most likely caused in power management context */
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "start_IO: -EINVAL device currently "
+			      "not accessible");
+		break;
+	default:
+		/* internal error 11 - unknown rc */
+		snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
+		dev_err(&device->cdev->dev,
+			"An error occurred in the DASD device driver, "
+			"reason=%s\n", errorstring);
+		BUG();
+		break;
+	}
+	cqr->intrc = rc;
+	return rc;
+}
+EXPORT_SYMBOL(dasd_start_IO);
+
+/*
+ * Timeout function for dasd devices. This is used for different purposes
+ *  1) missing interrupt handler for normal operation
+ *  2) delayed start of request where start_IO failed with -EBUSY
+ *  3) timeout for missing state change interrupts
+ * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
+ * DASD_CQR_QUEUED for 2) and 3).
+ */
+static void dasd_device_timeout(struct timer_list *t)
+{
+	unsigned long flags;
+	struct dasd_device *device;
+
+	device = from_timer(device, t, timer);
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	/* re-activate request queue */
+	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	dasd_schedule_device_bh(device);
+}
+
+/*
+ * Setup timeout for a device in jiffies.
+ */
+void dasd_device_set_timer(struct dasd_device *device, int expires)
+{
+	if (expires == 0)
+		del_timer(&device->timer);
+	else
+		mod_timer(&device->timer, jiffies + expires);
+}
+EXPORT_SYMBOL(dasd_device_set_timer);
+
+/*
+ * Clear timeout for a device.
+ */
+void dasd_device_clear_timer(struct dasd_device *device)
+{
+	del_timer(&device->timer);
+}
+EXPORT_SYMBOL(dasd_device_clear_timer);
+
+static void dasd_handle_killed_request(struct ccw_device *cdev,
+				       unsigned long intparm)
+{
+	struct dasd_ccw_req *cqr;
+	struct dasd_device *device;
+
+	if (!intparm)
+		return;
+	cqr = (struct dasd_ccw_req *) intparm;
+	if (cqr->status != DASD_CQR_IN_IO) {
+		DBF_EVENT_DEVID(DBF_DEBUG, cdev,
+				"invalid status in handle_killed_request: "
+				"%02x", cqr->status);
+		return;
+	}
+
+	device = dasd_device_from_cdev_locked(cdev);
+	if (IS_ERR(device)) {
+		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
+				"unable to get device from cdev");
+		return;
+	}
+
+	if (!cqr->startdev ||
+	    device != cqr->startdev ||
+	    strncmp(cqr->startdev->discipline->ebcname,
+		    (char *) &cqr->magic, 4)) {
+		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
+				"invalid device in request");
+		dasd_put_device(device);
+		return;
+	}
+
+	/* Schedule request to be retried. */
+	cqr->status = DASD_CQR_QUEUED;
+
+	dasd_device_clear_timer(device);
+	dasd_schedule_device_bh(device);
+	dasd_put_device(device);
+}
+
+void dasd_generic_handle_state_change(struct dasd_device *device)
+{
+	/* First of all start sense subsystem status request. */
+	dasd_eer_snss(device);
+
+	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
+	dasd_schedule_device_bh(device);
+	if (device->block) {
+		dasd_schedule_block_bh(device->block);
+		if (device->block->request_queue)
+			blk_mq_run_hw_queues(device->block->request_queue,
+					     true);
+	}
+}
+EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
+
+static int dasd_check_hpf_error(struct irb *irb)
+{
+	return (scsw_tm_is_valid_schxs(&irb->scsw) &&
+	    (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
+	     irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
+}
+
+/*
+ * Interrupt handler for "normal" ssch-io based dasd devices.
+ */
+void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+		      struct irb *irb)
+{
+	struct dasd_ccw_req *cqr, *next;
+	struct dasd_device *device;
+	unsigned long now;
+	int nrf_suppressed = 0;
+	int fp_suppressed = 0;
+	u8 *sense = NULL;
+	int expires;
+
+	cqr = (struct dasd_ccw_req *) intparm;
+	if (IS_ERR(irb)) {
+		switch (PTR_ERR(irb)) {
+		case -EIO:
+			if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
+				device = cqr->startdev;
+				cqr->status = DASD_CQR_CLEARED;
+				dasd_device_clear_timer(device);
+				wake_up(&dasd_flush_wq);
+				dasd_schedule_device_bh(device);
+				return;
+			}
+			break;
+		case -ETIMEDOUT:
+			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
+					"request timed out\n", __func__);
+			break;
+		default:
+			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
+					"unknown error %ld\n", __func__,
+					PTR_ERR(irb));
+		}
+		dasd_handle_killed_request(cdev, intparm);
+		return;
+	}
+
+	now = get_tod_clock();
+	/* check for conditions that should be handled immediately */
+	if (!cqr ||
+	    !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
+	      scsw_cstat(&irb->scsw) == 0)) {
+		if (cqr)
+			memcpy(&cqr->irb, irb, sizeof(*irb));
+		device = dasd_device_from_cdev_locked(cdev);
+		if (IS_ERR(device))
+			return;
+		/* ignore unsolicited interrupts for DIAG discipline */
+		if (device->discipline == dasd_diag_discipline_pointer) {
+			dasd_put_device(device);
+			return;
+		}
+
+		/*
+		 * In some cases 'File Protected' or 'No Record Found' errors
+		 * might be expected and debug log messages for the
+		 * corresponding interrupts shouldn't be written then.
+		 * Check if either of the according suppress bits is set.
+		 */
+		sense = dasd_get_sense(irb);
+		if (sense) {
+			fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
+				test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+			nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
+				test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+		}
+		if (!(fp_suppressed || nrf_suppressed))
+			device->discipline->dump_sense_dbf(device, irb, "int");
+
+		if (device->features & DASD_FEATURE_ERPLOG)
+			device->discipline->dump_sense(device, cqr, irb);
+		device->discipline->check_for_device_change(device, cqr, irb);
+		dasd_put_device(device);
+	}
+
+	/* check for for attention message */
+	if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
+		device = dasd_device_from_cdev_locked(cdev);
+		if (!IS_ERR(device)) {
+			device->discipline->check_attention(device,
+							    irb->esw.esw1.lpum);
+			dasd_put_device(device);
+		}
+	}
+
+	if (!cqr)
+		return;
+
+	device = (struct dasd_device *) cqr->startdev;
+	if (!device ||
+	    strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
+		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
+				"invalid device in request");
+		return;
+	}
+
+	/* Check for clear pending */
+	if (cqr->status == DASD_CQR_CLEAR_PENDING &&
+	    scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
+		cqr->status = DASD_CQR_CLEARED;
+		dasd_device_clear_timer(device);
+		wake_up(&dasd_flush_wq);
+		dasd_schedule_device_bh(device);
+		return;
+	}
+
+	/* check status - the request might have been killed by dyn detach */
+	if (cqr->status != DASD_CQR_IN_IO) {
+		DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
+			      "status %02x", dev_name(&cdev->dev), cqr->status);
+		return;
+	}
+
+	next = NULL;
+	expires = 0;
+	if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
+	    scsw_cstat(&irb->scsw) == 0) {
+		/* request was completed successfully */
+		cqr->status = DASD_CQR_SUCCESS;
+		cqr->stopclk = now;
+		/* Start first request on queue if possible -> fast_io. */
+		if (cqr->devlist.next != &device->ccw_queue) {
+			next = list_entry(cqr->devlist.next,
+					  struct dasd_ccw_req, devlist);
+		}
+	} else {  /* error */
+		/* check for HPF error
+		 * call discipline function to requeue all requests
+		 * and disable HPF accordingly
+		 */
+		if (cqr->cpmode && dasd_check_hpf_error(irb) &&
+		    device->discipline->handle_hpf_error)
+			device->discipline->handle_hpf_error(device, irb);
+		/*
+		 * If we don't want complex ERP for this request, then just
+		 * reset this and retry it in the fastpath
+		 */
+		if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
+		    cqr->retries > 0) {
+			if (cqr->lpm == dasd_path_get_opm(device))
+				DBF_DEV_EVENT(DBF_DEBUG, device,
+					      "default ERP in fastpath "
+					      "(%i retries left)",
+					      cqr->retries);
+			if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
+				cqr->lpm = dasd_path_get_opm(device);
+			cqr->status = DASD_CQR_QUEUED;
+			next = cqr;
+		} else
+			cqr->status = DASD_CQR_ERROR;
+	}
+	if (next && (next->status == DASD_CQR_QUEUED) &&
+	    (!device->stopped)) {
+		if (device->discipline->start_IO(next) == 0)
+			expires = next->expires;
+	}
+	if (expires != 0)
+		dasd_device_set_timer(device, expires);
+	else
+		dasd_device_clear_timer(device);
+	dasd_schedule_device_bh(device);
+}
+EXPORT_SYMBOL(dasd_int_handler);
+
+enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
+{
+	struct dasd_device *device;
+
+	device = dasd_device_from_cdev_locked(cdev);
+
+	if (IS_ERR(device))
+		goto out;
+	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
+	   device->state != device->target ||
+	   !device->discipline->check_for_device_change){
+		dasd_put_device(device);
+		goto out;
+	}
+	if (device->discipline->dump_sense_dbf)
+		device->discipline->dump_sense_dbf(device, irb, "uc");
+	device->discipline->check_for_device_change(device, NULL, irb);
+	dasd_put_device(device);
+out:
+	return UC_TODO_RETRY;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
+
+/*
+ * If we have an error on a dasd_block layer request then we cancel
+ * and return all further requests from the same dasd_block as well.
+ */
+static void __dasd_device_recovery(struct dasd_device *device,
+				   struct dasd_ccw_req *ref_cqr)
+{
+	struct list_head *l, *n;
+	struct dasd_ccw_req *cqr;
+
+	/*
+	 * only requeue request that came from the dasd_block layer
+	 */
+	if (!ref_cqr->block)
+		return;
+
+	list_for_each_safe(l, n, &device->ccw_queue) {
+		cqr = list_entry(l, struct dasd_ccw_req, devlist);
+		if (cqr->status == DASD_CQR_QUEUED &&
+		    ref_cqr->block == cqr->block) {
+			cqr->status = DASD_CQR_CLEARED;
+		}
+	}
+};
+
+/*
+ * Remove those ccw requests from the queue that need to be returned
+ * to the upper layer.
+ */
+static void __dasd_device_process_ccw_queue(struct dasd_device *device,
+					    struct list_head *final_queue)
+{
+	struct list_head *l, *n;
+	struct dasd_ccw_req *cqr;
+
+	/* Process request with final status. */
+	list_for_each_safe(l, n, &device->ccw_queue) {
+		cqr = list_entry(l, struct dasd_ccw_req, devlist);
+
+		/* Skip any non-final request. */
+		if (cqr->status == DASD_CQR_QUEUED ||
+		    cqr->status == DASD_CQR_IN_IO ||
+		    cqr->status == DASD_CQR_CLEAR_PENDING)
+			continue;
+		if (cqr->status == DASD_CQR_ERROR) {
+			__dasd_device_recovery(device, cqr);
+		}
+		/* Rechain finished requests to final queue */
+		list_move_tail(&cqr->devlist, final_queue);
+	}
+}
+
+static void __dasd_process_cqr(struct dasd_device *device,
+			       struct dasd_ccw_req *cqr)
+{
+	char errorstring[ERRORLENGTH];
+
+	switch (cqr->status) {
+	case DASD_CQR_SUCCESS:
+		cqr->status = DASD_CQR_DONE;
+		break;
+	case DASD_CQR_ERROR:
+		cqr->status = DASD_CQR_NEED_ERP;
+		break;
+	case DASD_CQR_CLEARED:
+		cqr->status = DASD_CQR_TERMINATED;
+		break;
+	default:
+		/* internal error 12 - wrong cqr status*/
+		snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
+		dev_err(&device->cdev->dev,
+			"An error occurred in the DASD device driver, "
+			"reason=%s\n", errorstring);
+		BUG();
+	}
+	if (cqr->callback)
+		cqr->callback(cqr, cqr->callback_data);
+}
+
+/*
+ * the cqrs from the final queue are returned to the upper layer
+ * by setting a dasd_block state and calling the callback function
+ */
+static void __dasd_device_process_final_queue(struct dasd_device *device,
+					      struct list_head *final_queue)
+{
+	struct list_head *l, *n;
+	struct dasd_ccw_req *cqr;
+	struct dasd_block *block;
+
+	list_for_each_safe(l, n, final_queue) {
+		cqr = list_entry(l, struct dasd_ccw_req, devlist);
+		list_del_init(&cqr->devlist);
+		block = cqr->block;
+		if (!block) {
+			__dasd_process_cqr(device, cqr);
+		} else {
+			spin_lock_bh(&block->queue_lock);
+			__dasd_process_cqr(device, cqr);
+			spin_unlock_bh(&block->queue_lock);
+		}
+	}
+}
+
+/*
+ * Take a look at the first request on the ccw queue and check
+ * if it reached its expire time. If so, terminate the IO.
+ */
+static void __dasd_device_check_expire(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+
+	if (list_empty(&device->ccw_queue))
+		return;
+	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
+	if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
+	    (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
+		if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+			/*
+			 * IO in safe offline processing should not
+			 * run out of retries
+			 */
+			cqr->retries++;
+		}
+		if (device->discipline->term_IO(cqr) != 0) {
+			/* Hmpf, try again in 5 sec */
+			dev_err(&device->cdev->dev,
+				"cqr %p timed out (%lus) but cannot be "
+				"ended, retrying in 5 s\n",
+				cqr, (cqr->expires/HZ));
+			cqr->expires += 5*HZ;
+			dasd_device_set_timer(device, 5*HZ);
+		} else {
+			dev_err(&device->cdev->dev,
+				"cqr %p timed out (%lus), %i retries "
+				"remaining\n", cqr, (cqr->expires/HZ),
+				cqr->retries);
+		}
+	}
+}
+
+/*
+ * return 1 when device is not eligible for IO
+ */
+static int __dasd_device_is_unusable(struct dasd_device *device,
+				     struct dasd_ccw_req *cqr)
+{
+	int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
+
+	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
+	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+		/*
+		 * dasd is being set offline
+		 * but it is no safe offline where we have to allow I/O
+		 */
+		return 1;
+	}
+	if (device->stopped) {
+		if (device->stopped & mask) {
+			/* stopped and CQR will not change that. */
+			return 1;
+		}
+		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+			/* CQR is not able to change device to
+			 * operational. */
+			return 1;
+		}
+		/* CQR required to get device operational. */
+	}
+	return 0;
+}
+
+/*
+ * Take a look at the first request on the ccw queue and check
+ * if it needs to be started.
+ */
+static void __dasd_device_start_head(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+	int rc;
+
+	if (list_empty(&device->ccw_queue))
+		return;
+	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
+	if (cqr->status != DASD_CQR_QUEUED)
+		return;
+	/* if device is not usable return request to upper layer */
+	if (__dasd_device_is_unusable(device, cqr)) {
+		cqr->intrc = -EAGAIN;
+		cqr->status = DASD_CQR_CLEARED;
+		dasd_schedule_device_bh(device);
+		return;
+	}
+
+	rc = device->discipline->start_IO(cqr);
+	if (rc == 0)
+		dasd_device_set_timer(device, cqr->expires);
+	else if (rc == -EACCES) {
+		dasd_schedule_device_bh(device);
+	} else
+		/* Hmpf, try again in 1/2 sec */
+		dasd_device_set_timer(device, 50);
+}
+
+static void __dasd_device_check_path_events(struct dasd_device *device)
+{
+	int rc;
+
+	if (!dasd_path_get_tbvpm(device))
+		return;
+
+	if (device->stopped &
+	    ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
+		return;
+	rc = device->discipline->verify_path(device,
+					     dasd_path_get_tbvpm(device));
+	if (rc)
+		dasd_device_set_timer(device, 50);
+	else
+		dasd_path_clear_all_verify(device);
+};
+
+/*
+ * Go through all request on the dasd_device request queue,
+ * terminate them on the cdev if necessary, and return them to the
+ * submitting layer via callback.
+ * Note:
+ * Make sure that all 'submitting layers' still exist when
+ * this function is called!. In other words, when 'device' is a base
+ * device then all block layer requests must have been removed before
+ * via dasd_flush_block_queue.
+ */
+int dasd_flush_device_queue(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr, *n;
+	int rc;
+	struct list_head flush_queue;
+
+	INIT_LIST_HEAD(&flush_queue);
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	rc = 0;
+	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
+		/* Check status and move request to flush_queue */
+		switch (cqr->status) {
+		case DASD_CQR_IN_IO:
+			rc = device->discipline->term_IO(cqr);
+			if (rc) {
+				/* unable to terminate requeust */
+				dev_err(&device->cdev->dev,
+					"Flushing the DASD request queue "
+					"failed for request %p\n", cqr);
+				/* stop flush processing */
+				goto finished;
+			}
+			break;
+		case DASD_CQR_QUEUED:
+			cqr->stopclk = get_tod_clock();
+			cqr->status = DASD_CQR_CLEARED;
+			break;
+		default: /* no need to modify the others */
+			break;
+		}
+		list_move_tail(&cqr->devlist, &flush_queue);
+	}
+finished:
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	/*
+	 * After this point all requests must be in state CLEAR_PENDING,
+	 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
+	 * one of the others.
+	 */
+	list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
+		wait_event(dasd_flush_wq,
+			   (cqr->status != DASD_CQR_CLEAR_PENDING));
+	/*
+	 * Now set each request back to TERMINATED, DONE or NEED_ERP
+	 * and call the callback function of flushed requests
+	 */
+	__dasd_device_process_final_queue(device, &flush_queue);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
+
+/*
+ * Acquire the device lock and process queues for the device.
+ */
+static void dasd_device_tasklet(unsigned long data)
+{
+	struct dasd_device *device = (struct dasd_device *) data;
+	struct list_head final_queue;
+
+	atomic_set (&device->tasklet_scheduled, 0);
+	INIT_LIST_HEAD(&final_queue);
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	/* Check expire time of first request on the ccw queue. */
+	__dasd_device_check_expire(device);
+	/* find final requests on ccw queue */
+	__dasd_device_process_ccw_queue(device, &final_queue);
+	__dasd_device_check_path_events(device);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	/* Now call the callback function of requests with final status */
+	__dasd_device_process_final_queue(device, &final_queue);
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	/* Now check if the head of the ccw queue needs to be started. */
+	__dasd_device_start_head(device);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	if (waitqueue_active(&shutdown_waitq))
+		wake_up(&shutdown_waitq);
+	dasd_put_device(device);
+}
+
+/*
+ * Schedules a call to dasd_tasklet over the device tasklet.
+ */
+void dasd_schedule_device_bh(struct dasd_device *device)
+{
+	/* Protect against rescheduling. */
+	if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
+		return;
+	dasd_get_device(device);
+	tasklet_hi_schedule(&device->tasklet);
+}
+EXPORT_SYMBOL(dasd_schedule_device_bh);
+
+void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
+{
+	device->stopped |= bits;
+}
+EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
+
+void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
+{
+	device->stopped &= ~bits;
+	if (!device->stopped)
+		wake_up(&generic_waitq);
+}
+EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
+
+/*
+ * Queue a request to the head of the device ccw_queue.
+ * Start the I/O if possible.
+ */
+void dasd_add_request_head(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+	unsigned long flags;
+
+	device = cqr->startdev;
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	cqr->status = DASD_CQR_QUEUED;
+	list_add(&cqr->devlist, &device->ccw_queue);
+	/* let the bh start the request to keep them in order */
+	dasd_schedule_device_bh(device);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+EXPORT_SYMBOL(dasd_add_request_head);
+
+/*
+ * Queue a request to the tail of the device ccw_queue.
+ * Start the I/O if possible.
+ */
+void dasd_add_request_tail(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+	unsigned long flags;
+
+	device = cqr->startdev;
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	cqr->status = DASD_CQR_QUEUED;
+	list_add_tail(&cqr->devlist, &device->ccw_queue);
+	/* let the bh start the request to keep them in order */
+	dasd_schedule_device_bh(device);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+EXPORT_SYMBOL(dasd_add_request_tail);
+
+/*
+ * Wakeup helper for the 'sleep_on' functions.
+ */
+void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
+{
+	spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
+	cqr->callback_data = DASD_SLEEPON_END_TAG;
+	spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
+	wake_up(&generic_waitq);
+}
+EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
+
+static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+	int rc;
+
+	device = cqr->startdev;
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	return rc;
+}
+
+/*
+ * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
+ */
+static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+	dasd_erp_fn_t erp_fn;
+
+	if (cqr->status == DASD_CQR_FILLED)
+		return 0;
+	device = cqr->startdev;
+	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
+		if (cqr->status == DASD_CQR_TERMINATED) {
+			device->discipline->handle_terminated_request(cqr);
+			return 1;
+		}
+		if (cqr->status == DASD_CQR_NEED_ERP) {
+			erp_fn = device->discipline->erp_action(cqr);
+			erp_fn(cqr);
+			return 1;
+		}
+		if (cqr->status == DASD_CQR_FAILED)
+			dasd_log_sense(cqr, &cqr->irb);
+		if (cqr->refers) {
+			__dasd_process_erp(device, cqr);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
+{
+	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
+		if (cqr->refers) /* erp is not done yet */
+			return 1;
+		return ((cqr->status != DASD_CQR_DONE) &&
+			(cqr->status != DASD_CQR_FAILED));
+	} else
+		return (cqr->status == DASD_CQR_FILLED);
+}
+
+static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
+{
+	struct dasd_device *device;
+	int rc;
+	struct list_head ccw_queue;
+	struct dasd_ccw_req *cqr;
+
+	INIT_LIST_HEAD(&ccw_queue);
+	maincqr->status = DASD_CQR_FILLED;
+	device = maincqr->startdev;
+	list_add(&maincqr->blocklist, &ccw_queue);
+	for (cqr = maincqr;  __dasd_sleep_on_loop_condition(cqr);
+	     cqr = list_first_entry(&ccw_queue,
+				    struct dasd_ccw_req, blocklist)) {
+
+		if (__dasd_sleep_on_erp(cqr))
+			continue;
+		if (cqr->status != DASD_CQR_FILLED) /* could be failed */
+			continue;
+		if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
+		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+			cqr->status = DASD_CQR_FAILED;
+			cqr->intrc = -EPERM;
+			continue;
+		}
+		/* Non-temporary stop condition will trigger fail fast */
+		if (device->stopped & ~DASD_STOPPED_PENDING &&
+		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+		    (!dasd_eer_enabled(device))) {
+			cqr->status = DASD_CQR_FAILED;
+			cqr->intrc = -ENOLINK;
+			continue;
+		}
+		/*
+		 * Don't try to start requests if device is in
+		 * offline processing, it might wait forever
+		 */
+		if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+			cqr->status = DASD_CQR_FAILED;
+			cqr->intrc = -ENODEV;
+			continue;
+		}
+		/*
+		 * Don't try to start requests if device is stopped
+		 * except path verification requests
+		 */
+		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+			if (interruptible) {
+				rc = wait_event_interruptible(
+					generic_waitq, !(device->stopped));
+				if (rc == -ERESTARTSYS) {
+					cqr->status = DASD_CQR_FAILED;
+					maincqr->intrc = rc;
+					continue;
+				}
+			} else
+				wait_event(generic_waitq, !(device->stopped));
+		}
+		if (!cqr->callback)
+			cqr->callback = dasd_wakeup_cb;
+
+		cqr->callback_data = DASD_SLEEPON_START_TAG;
+		dasd_add_request_tail(cqr);
+		if (interruptible) {
+			rc = wait_event_interruptible(
+				generic_waitq, _wait_for_wakeup(cqr));
+			if (rc == -ERESTARTSYS) {
+				dasd_cancel_req(cqr);
+				/* wait (non-interruptible) for final status */
+				wait_event(generic_waitq,
+					   _wait_for_wakeup(cqr));
+				cqr->status = DASD_CQR_FAILED;
+				maincqr->intrc = rc;
+				continue;
+			}
+		} else
+			wait_event(generic_waitq, _wait_for_wakeup(cqr));
+	}
+
+	maincqr->endclk = get_tod_clock();
+	if ((maincqr->status != DASD_CQR_DONE) &&
+	    (maincqr->intrc != -ERESTARTSYS))
+		dasd_log_sense(maincqr, &maincqr->irb);
+	if (maincqr->status == DASD_CQR_DONE)
+		rc = 0;
+	else if (maincqr->intrc)
+		rc = maincqr->intrc;
+	else
+		rc = -EIO;
+	return rc;
+}
+
+static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
+{
+	struct dasd_ccw_req *cqr;
+
+	list_for_each_entry(cqr, ccw_queue, blocklist) {
+		if (cqr->callback_data != DASD_SLEEPON_END_TAG)
+			return 0;
+	}
+
+	return 1;
+}
+
+static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
+{
+	struct dasd_device *device;
+	struct dasd_ccw_req *cqr, *n;
+	u8 *sense = NULL;
+	int rc;
+
+retry:
+	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
+		device = cqr->startdev;
+		if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
+			continue;
+
+		if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
+		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+			cqr->status = DASD_CQR_FAILED;
+			cqr->intrc = -EPERM;
+			continue;
+		}
+		/*Non-temporary stop condition will trigger fail fast*/
+		if (device->stopped & ~DASD_STOPPED_PENDING &&
+		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+		    !dasd_eer_enabled(device)) {
+			cqr->status = DASD_CQR_FAILED;
+			cqr->intrc = -EAGAIN;
+			continue;
+		}
+
+		/*Don't try to start requests if device is stopped*/
+		if (interruptible) {
+			rc = wait_event_interruptible(
+				generic_waitq, !device->stopped);
+			if (rc == -ERESTARTSYS) {
+				cqr->status = DASD_CQR_FAILED;
+				cqr->intrc = rc;
+				continue;
+			}
+		} else
+			wait_event(generic_waitq, !(device->stopped));
+
+		if (!cqr->callback)
+			cqr->callback = dasd_wakeup_cb;
+		cqr->callback_data = DASD_SLEEPON_START_TAG;
+		dasd_add_request_tail(cqr);
+	}
+
+	wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));
+
+	rc = 0;
+	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
+		/*
+		 * In some cases the 'File Protected' or 'Incorrect Length'
+		 * error might be expected and error recovery would be
+		 * unnecessary in these cases.	Check if the according suppress
+		 * bit is set.
+		 */
+		sense = dasd_get_sense(&cqr->irb);
+		if (sense && sense[1] & SNS1_FILE_PROTECTED &&
+		    test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
+			continue;
+		if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
+		    test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
+			continue;
+
+		/*
+		 * for alias devices simplify error recovery and
+		 * return to upper layer
+		 * do not skip ERP requests
+		 */
+		if (cqr->startdev != cqr->basedev && !cqr->refers &&
+		    (cqr->status == DASD_CQR_TERMINATED ||
+		     cqr->status == DASD_CQR_NEED_ERP))
+			return -EAGAIN;
+
+		/* normal recovery for basedev IO */
+		if (__dasd_sleep_on_erp(cqr))
+			/* handle erp first */
+			goto retry;
+	}
+
+	return 0;
+}
+
+/*
+ * Queue a request to the tail of the device ccw_queue and wait for
+ * it's completion.
+ */
+int dasd_sleep_on(struct dasd_ccw_req *cqr)
+{
+	return _dasd_sleep_on(cqr, 0);
+}
+EXPORT_SYMBOL(dasd_sleep_on);
+
+/*
+ * Start requests from a ccw_queue and wait for their completion.
+ */
+int dasd_sleep_on_queue(struct list_head *ccw_queue)
+{
+	return _dasd_sleep_on_queue(ccw_queue, 0);
+}
+EXPORT_SYMBOL(dasd_sleep_on_queue);
+
+/*
+ * Queue a request to the tail of the device ccw_queue and wait
+ * interruptible for it's completion.
+ */
+int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
+{
+	return _dasd_sleep_on(cqr, 1);
+}
+EXPORT_SYMBOL(dasd_sleep_on_interruptible);
+
+/*
+ * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
+ * for eckd devices) the currently running request has to be terminated
+ * and be put back to status queued, before the special request is added
+ * to the head of the queue. Then the special request is waited on normally.
+ */
+static inline int _dasd_term_running_cqr(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+	int rc;
+
+	if (list_empty(&device->ccw_queue))
+		return 0;
+	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
+	rc = device->discipline->term_IO(cqr);
+	if (!rc)
+		/*
+		 * CQR terminated because a more important request is pending.
+		 * Undo decreasing of retry counter because this is
+		 * not an error case.
+		 */
+		cqr->retries++;
+	return rc;
+}
+
+int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+	int rc;
+
+	device = cqr->startdev;
+	if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
+	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+		cqr->status = DASD_CQR_FAILED;
+		cqr->intrc = -EPERM;
+		return -EIO;
+	}
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	rc = _dasd_term_running_cqr(device);
+	if (rc) {
+		spin_unlock_irq(get_ccwdev_lock(device->cdev));
+		return rc;
+	}
+	cqr->callback = dasd_wakeup_cb;
+	cqr->callback_data = DASD_SLEEPON_START_TAG;
+	cqr->status = DASD_CQR_QUEUED;
+	/*
+	 * add new request as second
+	 * first the terminated cqr needs to be finished
+	 */
+	list_add(&cqr->devlist, device->ccw_queue.next);
+
+	/* let the bh start the request to keep them in order */
+	dasd_schedule_device_bh(device);
+
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+
+	wait_event(generic_waitq, _wait_for_wakeup(cqr));
+
+	if (cqr->status == DASD_CQR_DONE)
+		rc = 0;
+	else if (cqr->intrc)
+		rc = cqr->intrc;
+	else
+		rc = -EIO;
+
+	/* kick tasklets */
+	dasd_schedule_device_bh(device);
+	if (device->block)
+		dasd_schedule_block_bh(device->block);
+
+	return rc;
+}
+EXPORT_SYMBOL(dasd_sleep_on_immediatly);
+
+/*
+ * Cancels a request that was started with dasd_sleep_on_req.
+ * This is useful to timeout requests. The request will be
+ * terminated if it is currently in i/o.
+ * Returns 0 if request termination was successful
+ *	   negative error code if termination failed
+ * Cancellation of a request is an asynchronous operation! The calling
+ * function has to wait until the request is properly returned via callback.
+ */
+static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device = cqr->startdev;
+	int rc = 0;
+
+	switch (cqr->status) {
+	case DASD_CQR_QUEUED:
+		/* request was not started - just set to cleared */
+		cqr->status = DASD_CQR_CLEARED;
+		break;
+	case DASD_CQR_IN_IO:
+		/* request in IO - terminate IO and release again */
+		rc = device->discipline->term_IO(cqr);
+		if (rc) {
+			dev_err(&device->cdev->dev,
+				"Cancelling request %p failed with rc=%d\n",
+				cqr, rc);
+		} else {
+			cqr->stopclk = get_tod_clock();
+		}
+		break;
+	default: /* already finished or clear pending - do nothing */
+		break;
+	}
+	dasd_schedule_device_bh(device);
+	return rc;
+}
+
+int dasd_cancel_req(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device = cqr->startdev;
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	rc = __dasd_cancel_req(cqr);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	return rc;
+}
+
+/*
+ * SECTION: Operations of the dasd_block layer.
+ */
+
+/*
+ * Timeout function for dasd_block. This is used when the block layer
+ * is waiting for something that may not come reliably, (e.g. a state
+ * change interrupt)
+ */
+static void dasd_block_timeout(struct timer_list *t)
+{
+	unsigned long flags;
+	struct dasd_block *block;
+
+	block = from_timer(block, t, timer);
+	spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
+	/* re-activate request queue */
+	dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
+	spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
+	dasd_schedule_block_bh(block);
+	blk_mq_run_hw_queues(block->request_queue, true);
+}
+
+/*
+ * Setup timeout for a dasd_block in jiffies.
+ */
+void dasd_block_set_timer(struct dasd_block *block, int expires)
+{
+	if (expires == 0)
+		del_timer(&block->timer);
+	else
+		mod_timer(&block->timer, jiffies + expires);
+}
+EXPORT_SYMBOL(dasd_block_set_timer);
+
+/*
+ * Clear timeout for a dasd_block.
+ */
+void dasd_block_clear_timer(struct dasd_block *block)
+{
+	del_timer(&block->timer);
+}
+EXPORT_SYMBOL(dasd_block_clear_timer);
+
+/*
+ * Process finished error recovery ccw.
+ */
+static void __dasd_process_erp(struct dasd_device *device,
+			       struct dasd_ccw_req *cqr)
+{
+	dasd_erp_fn_t erp_fn;
+
+	if (cqr->status == DASD_CQR_DONE)
+		DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
+	else
+		dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
+	erp_fn = device->discipline->erp_postaction(cqr);
+	erp_fn(cqr);
+}
+
+static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
+{
+	struct request *req;
+	blk_status_t error = BLK_STS_OK;
+	int status;
+
+	req = (struct request *) cqr->callback_data;
+	dasd_profile_end(cqr->block, cqr, req);
+
+	status = cqr->block->base->discipline->free_cp(cqr, req);
+	if (status < 0)
+		error = errno_to_blk_status(status);
+	else if (status == 0) {
+		switch (cqr->intrc) {
+		case -EPERM:
+			error = BLK_STS_NEXUS;
+			break;
+		case -ENOLINK:
+			error = BLK_STS_TRANSPORT;
+			break;
+		case -ETIMEDOUT:
+			error = BLK_STS_TIMEOUT;
+			break;
+		default:
+			error = BLK_STS_IOERR;
+			break;
+		}
+	}
+
+	/*
+	 * We need to take care for ETIMEDOUT errors here since the
+	 * complete callback does not get called in this case.
+	 * Take care of all errors here and avoid additional code to
+	 * transfer the error value to the complete callback.
+	 */
+	if (error) {
+		blk_mq_end_request(req, error);
+		blk_mq_run_hw_queues(req->q, true);
+	} else {
+		blk_mq_complete_request(req);
+	}
+}
+
+/*
+ * Process ccw request queue.
+ */
+static void __dasd_process_block_ccw_queue(struct dasd_block *block,
+					   struct list_head *final_queue)
+{
+	struct list_head *l, *n;
+	struct dasd_ccw_req *cqr;
+	dasd_erp_fn_t erp_fn;
+	unsigned long flags;
+	struct dasd_device *base = block->base;
+
+restart:
+	/* Process request with final status. */
+	list_for_each_safe(l, n, &block->ccw_queue) {
+		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
+		if (cqr->status != DASD_CQR_DONE &&
+		    cqr->status != DASD_CQR_FAILED &&
+		    cqr->status != DASD_CQR_NEED_ERP &&
+		    cqr->status != DASD_CQR_TERMINATED)
+			continue;
+
+		if (cqr->status == DASD_CQR_TERMINATED) {
+			base->discipline->handle_terminated_request(cqr);
+			goto restart;
+		}
+
+		/*  Process requests that may be recovered */
+		if (cqr->status == DASD_CQR_NEED_ERP) {
+			erp_fn = base->discipline->erp_action(cqr);
+			if (IS_ERR(erp_fn(cqr)))
+				continue;
+			goto restart;
+		}
+
+		/* log sense for fatal error */
+		if (cqr->status == DASD_CQR_FAILED) {
+			dasd_log_sense(cqr, &cqr->irb);
+		}
+
+		/* First of all call extended error reporting. */
+		if (dasd_eer_enabled(base) &&
+		    cqr->status == DASD_CQR_FAILED) {
+			dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
+
+			/* restart request  */
+			cqr->status = DASD_CQR_FILLED;
+			cqr->retries = 255;
+			spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
+			dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
+			spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
+					       flags);
+			goto restart;
+		}
+
+		/* Process finished ERP request. */
+		if (cqr->refers) {
+			__dasd_process_erp(base, cqr);
+			goto restart;
+		}
+
+		/* Rechain finished requests to final queue */
+		cqr->endclk = get_tod_clock();
+		list_move_tail(&cqr->blocklist, final_queue);
+	}
+}
+
+static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
+{
+	dasd_schedule_block_bh(cqr->block);
+}
+
+static void __dasd_block_start_head(struct dasd_block *block)
+{
+	struct dasd_ccw_req *cqr;
+
+	if (list_empty(&block->ccw_queue))
+		return;
+	/* We allways begin with the first requests on the queue, as some
+	 * of previously started requests have to be enqueued on a
+	 * dasd_device again for error recovery.
+	 */
+	list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
+		if (cqr->status != DASD_CQR_FILLED)
+			continue;
+		if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
+		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+			cqr->status = DASD_CQR_FAILED;
+			cqr->intrc = -EPERM;
+			dasd_schedule_block_bh(block);
+			continue;
+		}
+		/* Non-temporary stop condition will trigger fail fast */
+		if (block->base->stopped & ~DASD_STOPPED_PENDING &&
+		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+		    (!dasd_eer_enabled(block->base))) {
+			cqr->status = DASD_CQR_FAILED;
+			cqr->intrc = -ENOLINK;
+			dasd_schedule_block_bh(block);
+			continue;
+		}
+		/* Don't try to start requests if device is stopped */
+		if (block->base->stopped)
+			return;
+
+		/* just a fail safe check, should not happen */
+		if (!cqr->startdev)
+			cqr->startdev = block->base;
+
+		/* make sure that the requests we submit find their way back */
+		cqr->callback = dasd_return_cqr_cb;
+
+		dasd_add_request_tail(cqr);
+	}
+}
+
+/*
+ * Central dasd_block layer routine. Takes requests from the generic
+ * block layer request queue, creates ccw requests, enqueues them on
+ * a dasd_device and processes ccw requests that have been returned.
+ */
+static void dasd_block_tasklet(unsigned long data)
+{
+	struct dasd_block *block = (struct dasd_block *) data;
+	struct list_head final_queue;
+	struct list_head *l, *n;
+	struct dasd_ccw_req *cqr;
+	struct dasd_queue *dq;
+
+	atomic_set(&block->tasklet_scheduled, 0);
+	INIT_LIST_HEAD(&final_queue);
+	spin_lock_irq(&block->queue_lock);
+	/* Finish off requests on ccw queue */
+	__dasd_process_block_ccw_queue(block, &final_queue);
+	spin_unlock_irq(&block->queue_lock);
+
+	/* Now call the callback function of requests with final status */
+	list_for_each_safe(l, n, &final_queue) {
+		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
+		dq = cqr->dq;
+		spin_lock_irq(&dq->lock);
+		list_del_init(&cqr->blocklist);
+		__dasd_cleanup_cqr(cqr);
+		spin_unlock_irq(&dq->lock);
+	}
+
+	spin_lock_irq(&block->queue_lock);
+	/* Now check if the head of the ccw queue needs to be started. */
+	__dasd_block_start_head(block);
+	spin_unlock_irq(&block->queue_lock);
+
+	if (waitqueue_active(&shutdown_waitq))
+		wake_up(&shutdown_waitq);
+	dasd_put_device(block->base);
+}
+
+static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
+{
+	wake_up(&dasd_flush_wq);
+}
+
+/*
+ * Requeue a request back to the block request queue
+ * only works for block requests
+ */
+static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
+{
+	struct dasd_block *block = cqr->block;
+	struct request *req;
+
+	if (!block)
+		return -EINVAL;
+	spin_lock_irq(&cqr->dq->lock);
+	req = (struct request *) cqr->callback_data;
+	blk_mq_requeue_request(req, false);
+	spin_unlock_irq(&cqr->dq->lock);
+
+	return 0;
+}
+
+/*
+ * Go through all request on the dasd_block request queue, cancel them
+ * on the respective dasd_device, and return them to the generic
+ * block layer.
+ */
+static int dasd_flush_block_queue(struct dasd_block *block)
+{
+	struct dasd_ccw_req *cqr, *n;
+	int rc, i;
+	struct list_head flush_queue;
+	unsigned long flags;
+
+	INIT_LIST_HEAD(&flush_queue);
+	spin_lock_bh(&block->queue_lock);
+	rc = 0;
+restart:
+	list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
+		/* if this request currently owned by a dasd_device cancel it */
+		if (cqr->status >= DASD_CQR_QUEUED)
+			rc = dasd_cancel_req(cqr);
+		if (rc < 0)
+			break;
+		/* Rechain request (including erp chain) so it won't be
+		 * touched by the dasd_block_tasklet anymore.
+		 * Replace the callback so we notice when the request
+		 * is returned from the dasd_device layer.
+		 */
+		cqr->callback = _dasd_wake_block_flush_cb;
+		for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
+			list_move_tail(&cqr->blocklist, &flush_queue);
+		if (i > 1)
+			/* moved more than one request - need to restart */
+			goto restart;
+	}
+	spin_unlock_bh(&block->queue_lock);
+	/* Now call the callback function of flushed requests */
+restart_cb:
+	list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
+		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
+		/* Process finished ERP request. */
+		if (cqr->refers) {
+			spin_lock_bh(&block->queue_lock);
+			__dasd_process_erp(block->base, cqr);
+			spin_unlock_bh(&block->queue_lock);
+			/* restart list_for_xx loop since dasd_process_erp
+			 * might remove multiple elements */
+			goto restart_cb;
+		}
+		/* call the callback function */
+		spin_lock_irqsave(&cqr->dq->lock, flags);
+		cqr->endclk = get_tod_clock();
+		list_del_init(&cqr->blocklist);
+		__dasd_cleanup_cqr(cqr);
+		spin_unlock_irqrestore(&cqr->dq->lock, flags);
+	}
+	return rc;
+}
+
+/*
+ * Schedules a call to dasd_tasklet over the device tasklet.
+ */
+void dasd_schedule_block_bh(struct dasd_block *block)
+{
+	/* Protect against rescheduling. */
+	if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
+		return;
+	/* life cycle of block is bound to it's base device */
+	dasd_get_device(block->base);
+	tasklet_hi_schedule(&block->tasklet);
+}
+EXPORT_SYMBOL(dasd_schedule_block_bh);
+
+
+/*
+ * SECTION: external block device operations
+ * (request queue handling, open, release, etc.)
+ */
+
+/*
+ * Dasd request queue function. Called from ll_rw_blk.c
+ */
+static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
+				    const struct blk_mq_queue_data *qd)
+{
+	struct dasd_block *block = hctx->queue->queuedata;
+	struct dasd_queue *dq = hctx->driver_data;
+	struct request *req = qd->rq;
+	struct dasd_device *basedev;
+	struct dasd_ccw_req *cqr;
+	blk_status_t rc = BLK_STS_OK;
+
+	basedev = block->base;
+	spin_lock_irq(&dq->lock);
+	if (basedev->state < DASD_STATE_READY) {
+		DBF_DEV_EVENT(DBF_ERR, basedev,
+			      "device not ready for request %p", req);
+		rc = BLK_STS_IOERR;
+		goto out;
+	}
+
+	/*
+	 * if device is stopped do not fetch new requests
+	 * except failfast is active which will let requests fail
+	 * immediately in __dasd_block_start_head()
+	 */
+	if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
+		DBF_DEV_EVENT(DBF_ERR, basedev,
+			      "device stopped request %p", req);
+		rc = BLK_STS_RESOURCE;
+		goto out;
+	}
+
+	if (basedev->features & DASD_FEATURE_READONLY &&
+	    rq_data_dir(req) == WRITE) {
+		DBF_DEV_EVENT(DBF_ERR, basedev,
+			      "Rejecting write request %p", req);
+		rc = BLK_STS_IOERR;
+		goto out;
+	}
+
+	if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
+	    (basedev->features & DASD_FEATURE_FAILFAST ||
+	     blk_noretry_request(req))) {
+		DBF_DEV_EVENT(DBF_ERR, basedev,
+			      "Rejecting failfast request %p", req);
+		rc = BLK_STS_IOERR;
+		goto out;
+	}
+
+	cqr = basedev->discipline->build_cp(basedev, block, req);
+	if (IS_ERR(cqr)) {
+		if (PTR_ERR(cqr) == -EBUSY ||
+		    PTR_ERR(cqr) == -ENOMEM ||
+		    PTR_ERR(cqr) == -EAGAIN) {
+			rc = BLK_STS_RESOURCE;
+			goto out;
+		}
+		DBF_DEV_EVENT(DBF_ERR, basedev,
+			      "CCW creation failed (rc=%ld) on request %p",
+			      PTR_ERR(cqr), req);
+		rc = BLK_STS_IOERR;
+		goto out;
+	}
+	/*
+	 *  Note: callback is set to dasd_return_cqr_cb in
+	 * __dasd_block_start_head to cover erp requests as well
+	 */
+	cqr->callback_data = req;
+	cqr->status = DASD_CQR_FILLED;
+	cqr->dq = dq;
+
+	blk_mq_start_request(req);
+	spin_lock(&block->queue_lock);
+	list_add_tail(&cqr->blocklist, &block->ccw_queue);
+	INIT_LIST_HEAD(&cqr->devlist);
+	dasd_profile_start(block, cqr, req);
+	dasd_schedule_block_bh(block);
+	spin_unlock(&block->queue_lock);
+
+out:
+	spin_unlock_irq(&dq->lock);
+	return rc;
+}
+
+/*
+ * Block timeout callback, called from the block layer
+ *
+ * Return values:
+ * BLK_EH_RESET_TIMER if the request should be left running
+ * BLK_EH_DONE if the request is handled or terminated
+ *		      by the driver.
+ */
+enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
+{
+	struct dasd_block *block = req->q->queuedata;
+	struct dasd_device *device;
+	struct dasd_ccw_req *cqr;
+	unsigned long flags;
+	int rc = 0;
+
+	cqr = blk_mq_rq_to_pdu(req);
+	if (!cqr)
+		return BLK_EH_DONE;
+
+	spin_lock_irqsave(&cqr->dq->lock, flags);
+	device = cqr->startdev ? cqr->startdev : block->base;
+	if (!device->blk_timeout) {
+		spin_unlock_irqrestore(&cqr->dq->lock, flags);
+		return BLK_EH_RESET_TIMER;
+	}
+	DBF_DEV_EVENT(DBF_WARNING, device,
+		      " dasd_times_out cqr %p status %x",
+		      cqr, cqr->status);
+
+	spin_lock(&block->queue_lock);
+	spin_lock(get_ccwdev_lock(device->cdev));
+	cqr->retries = -1;
+	cqr->intrc = -ETIMEDOUT;
+	if (cqr->status >= DASD_CQR_QUEUED) {
+		rc = __dasd_cancel_req(cqr);
+	} else if (cqr->status == DASD_CQR_FILLED ||
+		   cqr->status == DASD_CQR_NEED_ERP) {
+		cqr->status = DASD_CQR_TERMINATED;
+	} else if (cqr->status == DASD_CQR_IN_ERP) {
+		struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
+
+		list_for_each_entry_safe(searchcqr, nextcqr,
+					 &block->ccw_queue, blocklist) {
+			tmpcqr = searchcqr;
+			while (tmpcqr->refers)
+				tmpcqr = tmpcqr->refers;
+			if (tmpcqr != cqr)
+				continue;
+			/* searchcqr is an ERP request for cqr */
+			searchcqr->retries = -1;
+			searchcqr->intrc = -ETIMEDOUT;
+			if (searchcqr->status >= DASD_CQR_QUEUED) {
+				rc = __dasd_cancel_req(searchcqr);
+			} else if ((searchcqr->status == DASD_CQR_FILLED) ||
+				   (searchcqr->status == DASD_CQR_NEED_ERP)) {
+				searchcqr->status = DASD_CQR_TERMINATED;
+				rc = 0;
+			} else if (searchcqr->status == DASD_CQR_IN_ERP) {
+				/*
+				 * Shouldn't happen; most recent ERP
+				 * request is at the front of queue
+				 */
+				continue;
+			}
+			break;
+		}
+	}
+	spin_unlock(get_ccwdev_lock(device->cdev));
+	dasd_schedule_block_bh(block);
+	spin_unlock(&block->queue_lock);
+	spin_unlock_irqrestore(&cqr->dq->lock, flags);
+
+	return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE;
+}
+
+static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+			  unsigned int idx)
+{
+	struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);
+
+	if (!dq)
+		return -ENOMEM;
+
+	spin_lock_init(&dq->lock);
+	hctx->driver_data = dq;
+
+	return 0;
+}
+
+static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
+{
+	kfree(hctx->driver_data);
+	hctx->driver_data = NULL;
+}
+
+static void dasd_request_done(struct request *req)
+{
+	blk_mq_end_request(req, 0);
+	blk_mq_run_hw_queues(req->q, true);
+}
+
+static struct blk_mq_ops dasd_mq_ops = {
+	.queue_rq = do_dasd_request,
+	.complete = dasd_request_done,
+	.timeout = dasd_times_out,
+	.init_hctx = dasd_init_hctx,
+	.exit_hctx = dasd_exit_hctx,
+};
+
+/*
+ * Allocate and initialize request queue and default I/O scheduler.
+ */
+static int dasd_alloc_queue(struct dasd_block *block)
+{
+	int rc;
+
+	block->tag_set.ops = &dasd_mq_ops;
+	block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
+	block->tag_set.nr_hw_queues = nr_hw_queues;
+	block->tag_set.queue_depth = queue_depth;
+	block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+	block->tag_set.numa_node = NUMA_NO_NODE;
+
+	rc = blk_mq_alloc_tag_set(&block->tag_set);
+	if (rc)
+		return rc;
+
+	block->request_queue = blk_mq_init_queue(&block->tag_set);
+	if (IS_ERR(block->request_queue))
+		return PTR_ERR(block->request_queue);
+
+	block->request_queue->queuedata = block;
+
+	return 0;
+}
+
+/*
+ * Allocate and initialize request queue.
+ */
+static void dasd_setup_queue(struct dasd_block *block)
+{
+	unsigned int logical_block_size = block->bp_block;
+	struct request_queue *q = block->request_queue;
+	unsigned int max_bytes, max_discard_sectors;
+	int max;
+
+	if (block->base->features & DASD_FEATURE_USERAW) {
+		/*
+		 * the max_blocks value for raw_track access is 256
+		 * it is higher than the native ECKD value because we
+		 * only need one ccw per track
+		 * so the max_hw_sectors are
+		 * 2048 x 512B = 1024kB = 16 tracks
+		 */
+		max = 2048;
+	} else {
+		max = block->base->discipline->max_blocks << block->s2b_shift;
+	}
+	blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+	q->limits.max_dev_sectors = max;
+	blk_queue_logical_block_size(q, logical_block_size);
+	blk_queue_max_hw_sectors(q, max);
+	blk_queue_max_segments(q, USHRT_MAX);
+	/* with page sized segments we can translate each segement into
+	 * one idaw/tidaw
+	 */
+	blk_queue_max_segment_size(q, PAGE_SIZE);
+	blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+
+	/* Only activate blocklayer discard support for devices that support it */
+	if (block->base->features & DASD_FEATURE_DISCARD) {
+		q->limits.discard_granularity = logical_block_size;
+		q->limits.discard_alignment = PAGE_SIZE;
+
+		/* Calculate max_discard_sectors and make it PAGE aligned */
+		max_bytes = USHRT_MAX * logical_block_size;
+		max_bytes = ALIGN(max_bytes, PAGE_SIZE) - PAGE_SIZE;
+		max_discard_sectors = max_bytes / logical_block_size;
+
+		blk_queue_max_discard_sectors(q, max_discard_sectors);
+		blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
+		blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+	}
+}
+
+/*
+ * Deactivate and free request queue.
+ */
+static void dasd_free_queue(struct dasd_block *block)
+{
+	if (block->request_queue) {
+		blk_cleanup_queue(block->request_queue);
+		blk_mq_free_tag_set(&block->tag_set);
+		block->request_queue = NULL;
+	}
+}
+
+static int dasd_open(struct block_device *bdev, fmode_t mode)
+{
+	struct dasd_device *base;
+	int rc;
+
+	base = dasd_device_from_gendisk(bdev->bd_disk);
+	if (!base)
+		return -ENODEV;
+
+	atomic_inc(&base->block->open_count);
+	if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
+		rc = -ENODEV;
+		goto unlock;
+	}
+
+	if (!try_module_get(base->discipline->owner)) {
+		rc = -EINVAL;
+		goto unlock;
+	}
+
+	if (dasd_probeonly) {
+		dev_info(&base->cdev->dev,
+			 "Accessing the DASD failed because it is in "
+			 "probeonly mode\n");
+		rc = -EPERM;
+		goto out;
+	}
+
+	if (base->state <= DASD_STATE_BASIC) {
+		DBF_DEV_EVENT(DBF_ERR, base, " %s",
+			      " Cannot open unrecognized device");
+		rc = -ENODEV;
+		goto out;
+	}
+
+	if ((mode & FMODE_WRITE) &&
+	    (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
+	     (base->features & DASD_FEATURE_READONLY))) {
+		rc = -EROFS;
+		goto out;
+	}
+
+	dasd_put_device(base);
+	return 0;
+
+out:
+	module_put(base->discipline->owner);
+unlock:
+	atomic_dec(&base->block->open_count);
+	dasd_put_device(base);
+	return rc;
+}
+
+static void dasd_release(struct gendisk *disk, fmode_t mode)
+{
+	struct dasd_device *base = dasd_device_from_gendisk(disk);
+	if (base) {
+		atomic_dec(&base->block->open_count);
+		module_put(base->discipline->owner);
+		dasd_put_device(base);
+	}
+}
+
+/*
+ * Return disk geometry.
+ */
+static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+	struct dasd_device *base;
+
+	base = dasd_device_from_gendisk(bdev->bd_disk);
+	if (!base)
+		return -ENODEV;
+
+	if (!base->discipline ||
+	    !base->discipline->fill_geometry) {
+		dasd_put_device(base);
+		return -EINVAL;
+	}
+	base->discipline->fill_geometry(base->block, geo);
+	geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
+	dasd_put_device(base);
+	return 0;
+}
+
+const struct block_device_operations
+dasd_device_operations = {
+	.owner		= THIS_MODULE,
+	.open		= dasd_open,
+	.release	= dasd_release,
+	.ioctl		= dasd_ioctl,
+	.compat_ioctl	= dasd_ioctl,
+	.getgeo		= dasd_getgeo,
+};
+
+/*******************************************************************************
+ * end of block device operations
+ */
+
+static void
+dasd_exit(void)
+{
+#ifdef CONFIG_PROC_FS
+	dasd_proc_exit();
+#endif
+	dasd_eer_exit();
+        if (dasd_page_cache != NULL) {
+		kmem_cache_destroy(dasd_page_cache);
+		dasd_page_cache = NULL;
+	}
+	dasd_gendisk_exit();
+	dasd_devmap_exit();
+	if (dasd_debug_area != NULL) {
+		debug_unregister(dasd_debug_area);
+		dasd_debug_area = NULL;
+	}
+	dasd_statistics_removeroot();
+}
+
+/*
+ * SECTION: common functions for ccw_driver use
+ */
+
+/*
+ * Is the device read-only?
+ * Note that this function does not report the setting of the
+ * readonly device attribute, but how it is configured in z/VM.
+ */
+int dasd_device_is_ro(struct dasd_device *device)
+{
+	struct ccw_dev_id dev_id;
+	struct diag210 diag_data;
+	int rc;
+
+	if (!MACHINE_IS_VM)
+		return 0;
+	ccw_device_get_id(device->cdev, &dev_id);
+	memset(&diag_data, 0, sizeof(diag_data));
+	diag_data.vrdcdvno = dev_id.devno;
+	diag_data.vrdclen = sizeof(diag_data);
+	rc = diag210(&diag_data);
+	if (rc == 0 || rc == 2) {
+		return diag_data.vrdcvfla & 0x80;
+	} else {
+		DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
+			  dev_id.devno, rc);
+		return 0;
+	}
+}
+EXPORT_SYMBOL_GPL(dasd_device_is_ro);
+
+static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
+{
+	struct ccw_device *cdev = data;
+	int ret;
+
+	ret = ccw_device_set_online(cdev);
+	if (ret)
+		pr_warn("%s: Setting the DASD online failed with rc=%d\n",
+			dev_name(&cdev->dev), ret);
+}
+
+/*
+ * Initial attempt at a probe function. this can be simplified once
+ * the other detection code is gone.
+ */
+int dasd_generic_probe(struct ccw_device *cdev,
+		       struct dasd_discipline *discipline)
+{
+	int ret;
+
+	ret = dasd_add_sysfs_files(cdev);
+	if (ret) {
+		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
+				"dasd_generic_probe: could not add "
+				"sysfs entries");
+		return ret;
+	}
+	cdev->handler = &dasd_int_handler;
+
+	/*
+	 * Automatically online either all dasd devices (dasd_autodetect)
+	 * or all devices specified with dasd= parameters during
+	 * initial probe.
+	 */
+	if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
+	    (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
+		async_schedule(dasd_generic_auto_online, cdev);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_probe);
+
+void dasd_generic_free_discipline(struct dasd_device *device)
+{
+	/* Forget the discipline information. */
+	if (device->discipline) {
+		if (device->discipline->uncheck_device)
+			device->discipline->uncheck_device(device);
+		module_put(device->discipline->owner);
+		device->discipline = NULL;
+	}
+	if (device->base_discipline) {
+		module_put(device->base_discipline->owner);
+		device->base_discipline = NULL;
+	}
+}
+EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);
+
+/*
+ * This will one day be called from a global not_oper handler.
+ * It is also used by driver_unregister during module unload.
+ */
+void dasd_generic_remove(struct ccw_device *cdev)
+{
+	struct dasd_device *device;
+	struct dasd_block *block;
+
+	cdev->handler = NULL;
+
+	device = dasd_device_from_cdev(cdev);
+	if (IS_ERR(device)) {
+		dasd_remove_sysfs_files(cdev);
+		return;
+	}
+	if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
+	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+		/* Already doing offline processing */
+		dasd_put_device(device);
+		dasd_remove_sysfs_files(cdev);
+		return;
+	}
+	/*
+	 * This device is removed unconditionally. Set offline
+	 * flag to prevent dasd_open from opening it while it is
+	 * no quite down yet.
+	 */
+	dasd_set_target_state(device, DASD_STATE_NEW);
+	/* dasd_delete_device destroys the device reference. */
+	block = device->block;
+	dasd_delete_device(device);
+	/*
+	 * life cycle of block is bound to device, so delete it after
+	 * device was safely removed
+	 */
+	if (block)
+		dasd_free_block(block);
+
+	dasd_remove_sysfs_files(cdev);
+}
+EXPORT_SYMBOL_GPL(dasd_generic_remove);
+
+/*
+ * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
+ * the device is detected for the first time and is supposed to be used
+ * or the user has started activation through sysfs.
+ */
+int dasd_generic_set_online(struct ccw_device *cdev,
+			    struct dasd_discipline *base_discipline)
+{
+	struct dasd_discipline *discipline;
+	struct dasd_device *device;
+	int rc;
+
+	/* first online clears initial online feature flag */
+	dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
+	device = dasd_create_device(cdev);
+	if (IS_ERR(device))
+		return PTR_ERR(device);
+
+	discipline = base_discipline;
+	if (device->features & DASD_FEATURE_USEDIAG) {
+	  	if (!dasd_diag_discipline_pointer) {
+			/* Try to load the required module. */
+			rc = request_module(DASD_DIAG_MOD);
+			if (rc) {
+				pr_warn("%s Setting the DASD online failed "
+					"because the required module %s "
+					"could not be loaded (rc=%d)\n",
+					dev_name(&cdev->dev), DASD_DIAG_MOD,
+					rc);
+				dasd_delete_device(device);
+				return -ENODEV;
+			}
+		}
+		/* Module init could have failed, so check again here after
+		 * request_module(). */
+		if (!dasd_diag_discipline_pointer) {
+			pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
+				dev_name(&cdev->dev));
+			dasd_delete_device(device);
+			return -ENODEV;
+		}
+		discipline = dasd_diag_discipline_pointer;
+	}
+	if (!try_module_get(base_discipline->owner)) {
+		dasd_delete_device(device);
+		return -EINVAL;
+	}
+	if (!try_module_get(discipline->owner)) {
+		module_put(base_discipline->owner);
+		dasd_delete_device(device);
+		return -EINVAL;
+	}
+	device->base_discipline = base_discipline;
+	device->discipline = discipline;
+
+	/* check_device will allocate block device if necessary */
+	rc = discipline->check_device(device);
+	if (rc) {
+		pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
+			dev_name(&cdev->dev), discipline->name, rc);
+		module_put(discipline->owner);
+		module_put(base_discipline->owner);
+		dasd_delete_device(device);
+		return rc;
+	}
+
+	dasd_set_target_state(device, DASD_STATE_ONLINE);
+	if (device->state <= DASD_STATE_KNOWN) {
+		pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
+			dev_name(&cdev->dev));
+		rc = -ENODEV;
+		dasd_set_target_state(device, DASD_STATE_NEW);
+		if (device->block)
+			dasd_free_block(device->block);
+		dasd_delete_device(device);
+	} else
+		pr_debug("dasd_generic device %s found\n",
+				dev_name(&cdev->dev));
+
+	wait_event(dasd_init_waitq, _wait_for_device(device));
+
+	dasd_put_device(device);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_set_online);
+
+int dasd_generic_set_offline(struct ccw_device *cdev)
+{
+	struct dasd_device *device;
+	struct dasd_block *block;
+	int max_count, open_count, rc;
+	unsigned long flags;
+
+	rc = 0;
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	device = dasd_device_from_cdev_locked(cdev);
+	if (IS_ERR(device)) {
+		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+		return PTR_ERR(device);
+	}
+
+	/*
+	 * We must make sure that this device is currently not in use.
+	 * The open_count is increased for every opener, that includes
+	 * the blkdev_get in dasd_scan_partitions. We are only interested
+	 * in the other openers.
+	 */
+	if (device->block) {
+		max_count = device->block->bdev ? 0 : -1;
+		open_count = atomic_read(&device->block->open_count);
+		if (open_count > max_count) {
+			if (open_count > 0)
+				pr_warn("%s: The DASD cannot be set offline with open count %i\n",
+					dev_name(&cdev->dev), open_count);
+			else
+				pr_warn("%s: The DASD cannot be set offline while it is in use\n",
+					dev_name(&cdev->dev));
+			rc = -EBUSY;
+			goto out_err;
+		}
+	}
+
+	/*
+	 * Test if the offline processing is already running and exit if so.
+	 * If a safe offline is being processed this could only be a normal
+	 * offline that should be able to overtake the safe offline and
+	 * cancel any I/O we do not want to wait for any longer
+	 */
+	if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+		if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+			clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
+				  &device->flags);
+		} else {
+			rc = -EBUSY;
+			goto out_err;
+		}
+	}
+	set_bit(DASD_FLAG_OFFLINE, &device->flags);
+
+	/*
+	 * if safe_offline is called set safe_offline_running flag and
+	 * clear safe_offline so that a call to normal offline
+	 * can overrun safe_offline processing
+	 */
+	if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
+	    !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+		/* need to unlock here to wait for outstanding I/O */
+		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+		/*
+		 * If we want to set the device safe offline all IO operations
+		 * should be finished before continuing the offline process
+		 * so sync bdev first and then wait for our queues to become
+		 * empty
+		 */
+		if (device->block) {
+			rc = fsync_bdev(device->block->bdev);
+			if (rc != 0)
+				goto interrupted;
+		}
+		dasd_schedule_device_bh(device);
+		rc = wait_event_interruptible(shutdown_waitq,
+					      _wait_for_empty_queues(device));
+		if (rc != 0)
+			goto interrupted;
+
+		/*
+		 * check if a normal offline process overtook the offline
+		 * processing in this case simply do nothing beside returning
+		 * that we got interrupted
+		 * otherwise mark safe offline as not running any longer and
+		 * continue with normal offline
+		 */
+		spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+		if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+			rc = -ERESTARTSYS;
+			goto out_err;
+		}
+		clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+	dasd_set_target_state(device, DASD_STATE_NEW);
+	/* dasd_delete_device destroys the device reference. */
+	block = device->block;
+	dasd_delete_device(device);
+	/*
+	 * life cycle of block is bound to device, so delete it after
+	 * device was safely removed
+	 */
+	if (block)
+		dasd_free_block(block);
+
+	return 0;
+
+interrupted:
+	/* interrupted by signal */
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
+	clear_bit(DASD_FLAG_OFFLINE, &device->flags);
+out_err:
+	dasd_put_device(device);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
+
+int dasd_generic_last_path_gone(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+
+	dev_warn(&device->cdev->dev, "No operational channel path is left "
+		 "for the device\n");
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
+	/* First of all call extended error reporting. */
+	dasd_eer_write(device, NULL, DASD_EER_NOPATH);
+
+	if (device->state < DASD_STATE_BASIC)
+		return 0;
+	/* Device is active. We want to keep it. */
+	list_for_each_entry(cqr, &device->ccw_queue, devlist)
+		if ((cqr->status == DASD_CQR_IN_IO) ||
+		    (cqr->status == DASD_CQR_CLEAR_PENDING)) {
+			cqr->status = DASD_CQR_QUEUED;
+			cqr->retries++;
+		}
+	dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
+	dasd_device_clear_timer(device);
+	dasd_schedule_device_bh(device);
+	return 1;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
+
+int dasd_generic_path_operational(struct dasd_device *device)
+{
+	dev_info(&device->cdev->dev, "A channel path to the device has become "
+		 "operational\n");
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
+	dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
+	if (device->stopped & DASD_UNRESUMED_PM) {
+		dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
+		dasd_restore_device(device);
+		return 1;
+	}
+	dasd_schedule_device_bh(device);
+	if (device->block) {
+		dasd_schedule_block_bh(device->block);
+		if (device->block->request_queue)
+			blk_mq_run_hw_queues(device->block->request_queue,
+					     true);
+		}
+
+	if (!device->stopped)
+		wake_up(&generic_waitq);
+
+	return 1;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
+
+int dasd_generic_notify(struct ccw_device *cdev, int event)
+{
+	struct dasd_device *device;
+	int ret;
+
+	device = dasd_device_from_cdev_locked(cdev);
+	if (IS_ERR(device))
+		return 0;
+	ret = 0;
+	switch (event) {
+	case CIO_GONE:
+	case CIO_BOXED:
+	case CIO_NO_PATH:
+		dasd_path_no_path(device);
+		ret = dasd_generic_last_path_gone(device);
+		break;
+	case CIO_OPER:
+		ret = 1;
+		if (dasd_path_get_opm(device))
+			ret = dasd_generic_path_operational(device);
+		break;
+	}
+	dasd_put_device(device);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_notify);
+
+void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
+{
+	struct dasd_device *device;
+	int chp, oldopm, hpfpm, ifccpm;
+
+	device = dasd_device_from_cdev_locked(cdev);
+	if (IS_ERR(device))
+		return;
+
+	oldopm = dasd_path_get_opm(device);
+	for (chp = 0; chp < 8; chp++) {
+		if (path_event[chp] & PE_PATH_GONE) {
+			dasd_path_notoper(device, chp);
+		}
+		if (path_event[chp] & PE_PATH_AVAILABLE) {
+			dasd_path_available(device, chp);
+			dasd_schedule_device_bh(device);
+		}
+		if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
+			if (!dasd_path_is_operational(device, chp) &&
+			    !dasd_path_need_verify(device, chp)) {
+				/*
+				 * we can not establish a pathgroup on an
+				 * unavailable path, so trigger a path
+				 * verification first
+				 */
+			dasd_path_available(device, chp);
+			dasd_schedule_device_bh(device);
+			}
+			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+				      "Pathgroup re-established\n");
+			if (device->discipline->kick_validate)
+				device->discipline->kick_validate(device);
+		}
+	}
+	hpfpm = dasd_path_get_hpfpm(device);
+	ifccpm = dasd_path_get_ifccpm(device);
+	if (!dasd_path_get_opm(device) && hpfpm) {
+		/*
+		 * device has no operational paths but at least one path is
+		 * disabled due to HPF errors
+		 * disable HPF at all and use the path(s) again
+		 */
+		if (device->discipline->disable_hpf)
+			device->discipline->disable_hpf(device);
+		dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
+		dasd_path_set_tbvpm(device, hpfpm);
+		dasd_schedule_device_bh(device);
+		dasd_schedule_requeue(device);
+	} else if (!dasd_path_get_opm(device) && ifccpm) {
+		/*
+		 * device has no operational paths but at least one path is
+		 * disabled due to IFCC errors
+		 * trigger path verification on paths with IFCC errors
+		 */
+		dasd_path_set_tbvpm(device, ifccpm);
+		dasd_schedule_device_bh(device);
+	}
+	if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
+		dev_warn(&device->cdev->dev,
+			 "No verified channel paths remain for the device\n");
+		DBF_DEV_EVENT(DBF_WARNING, device,
+			      "%s", "last verified path gone");
+		dasd_eer_write(device, NULL, DASD_EER_NOPATH);
+		dasd_device_set_stop_bits(device,
+					  DASD_STOPPED_DC_WAIT);
+	}
+	dasd_put_device(device);
+}
+EXPORT_SYMBOL_GPL(dasd_generic_path_event);
+
+int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
+{
+	if (!dasd_path_get_opm(device) && lpm) {
+		dasd_path_set_opm(device, lpm);
+		dasd_generic_path_operational(device);
+	} else
+		dasd_path_add_opm(device, lpm);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
+
+/*
+ * clear active requests and requeue them to block layer if possible
+ */
+static int dasd_generic_requeue_all_requests(struct dasd_device *device)
+{
+	struct list_head requeue_queue;
+	struct dasd_ccw_req *cqr, *n;
+	struct dasd_ccw_req *refers;
+	int rc;
+
+	INIT_LIST_HEAD(&requeue_queue);
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	rc = 0;
+	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
+		/* Check status and move request to flush_queue */
+		if (cqr->status == DASD_CQR_IN_IO) {
+			rc = device->discipline->term_IO(cqr);
+			if (rc) {
+				/* unable to terminate requeust */
+				dev_err(&device->cdev->dev,
+					"Unable to terminate request %p "
+					"on suspend\n", cqr);
+				spin_unlock_irq(get_ccwdev_lock(device->cdev));
+				dasd_put_device(device);
+				return rc;
+			}
+		}
+		list_move_tail(&cqr->devlist, &requeue_queue);
+	}
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+
+	list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
+		wait_event(dasd_flush_wq,
+			   (cqr->status != DASD_CQR_CLEAR_PENDING));
+
+		/*
+		 * requeue requests to blocklayer will only work
+		 * for block device requests
+		 */
+		if (_dasd_requeue_request(cqr))
+			continue;
+
+		/* remove requests from device and block queue */
+		list_del_init(&cqr->devlist);
+		while (cqr->refers != NULL) {
+			refers = cqr->refers;
+			/* remove the request from the block queue */
+			list_del(&cqr->blocklist);
+			/* free the finished erp request */
+			dasd_free_erp_request(cqr, cqr->memdev);
+			cqr = refers;
+		}
+
+		/*
+		 * _dasd_requeue_request already checked for a valid
+		 * blockdevice, no need to check again
+		 * all erp requests (cqr->refers) have a cqr->block
+		 * pointer copy from the original cqr
+		 */
+		list_del_init(&cqr->blocklist);
+		cqr->block->base->discipline->free_cp(
+			cqr, (struct request *) cqr->callback_data);
+	}
+
+	/*
+	 * if requests remain then they are internal request
+	 * and go back to the device queue
+	 */
+	if (!list_empty(&requeue_queue)) {
+		/* move freeze_queue to start of the ccw_queue */
+		spin_lock_irq(get_ccwdev_lock(device->cdev));
+		list_splice_tail(&requeue_queue, &device->ccw_queue);
+		spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	}
+	dasd_schedule_device_bh(device);
+	return rc;
+}
+
+static void do_requeue_requests(struct work_struct *work)
+{
+	struct dasd_device *device = container_of(work, struct dasd_device,
+						  requeue_requests);
+	dasd_generic_requeue_all_requests(device);
+	dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
+	if (device->block)
+		dasd_schedule_block_bh(device->block);
+	dasd_put_device(device);
+}
+
+void dasd_schedule_requeue(struct dasd_device *device)
+{
+	dasd_get_device(device);
+	/* queue call to dasd_reload_device to the kernel event daemon. */
+	if (!schedule_work(&device->requeue_requests))
+		dasd_put_device(device);
+}
+EXPORT_SYMBOL(dasd_schedule_requeue);
+
+int dasd_generic_pm_freeze(struct ccw_device *cdev)
+{
+	struct dasd_device *device = dasd_device_from_cdev(cdev);
+
+	if (IS_ERR(device))
+		return PTR_ERR(device);
+
+	/* mark device as suspended */
+	set_bit(DASD_FLAG_SUSPENDED, &device->flags);
+
+	if (device->discipline->freeze)
+		device->discipline->freeze(device);
+
+	/* disallow new I/O  */
+	dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
+
+	return dasd_generic_requeue_all_requests(device);
+}
+EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
+
+int dasd_generic_restore_device(struct ccw_device *cdev)
+{
+	struct dasd_device *device = dasd_device_from_cdev(cdev);
+	int rc = 0;
+
+	if (IS_ERR(device))
+		return PTR_ERR(device);
+
+	/* allow new IO again */
+	dasd_device_remove_stop_bits(device,
+				     (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
+
+	dasd_schedule_device_bh(device);
+
+	/*
+	 * call discipline restore function
+	 * if device is stopped do nothing e.g. for disconnected devices
+	 */
+	if (device->discipline->restore && !(device->stopped))
+		rc = device->discipline->restore(device);
+	if (rc || device->stopped)
+		/*
+		 * if the resume failed for the DASD we put it in
+		 * an UNRESUMED stop state
+		 */
+		device->stopped |= DASD_UNRESUMED_PM;
+
+	if (device->block) {
+		dasd_schedule_block_bh(device->block);
+		if (device->block->request_queue)
+			blk_mq_run_hw_queues(device->block->request_queue,
+					     true);
+	}
+
+	clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
+	dasd_put_device(device);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
+
+static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
+						   void *rdc_buffer,
+						   int rdc_buffer_size,
+						   int magic)
+{
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	unsigned long *idaw;
+
+	cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
+				   NULL);
+
+	if (IS_ERR(cqr)) {
+		/* internal error 13 - Allocating the RDC request failed*/
+		dev_err(&device->cdev->dev,
+			 "An error occurred in the DASD device driver, "
+			 "reason=%s\n", "13");
+		return cqr;
+	}
+
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = CCW_CMD_RDC;
+	if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
+		idaw = (unsigned long *) (cqr->data);
+		ccw->cda = (__u32)(addr_t) idaw;
+		ccw->flags = CCW_FLAG_IDA;
+		idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
+	} else {
+		ccw->cda = (__u32)(addr_t) rdc_buffer;
+		ccw->flags = 0;
+	}
+
+	ccw->count = rdc_buffer_size;
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->expires = 10*HZ;
+	cqr->retries = 256;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	return cqr;
+}
+
+
+int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
+				void *rdc_buffer, int rdc_buffer_size)
+{
+	int ret;
+	struct dasd_ccw_req *cqr;
+
+	cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
+				     magic);
+	if (IS_ERR(cqr))
+		return PTR_ERR(cqr);
+
+	ret = dasd_sleep_on(cqr);
+	dasd_sfree_request(cqr, cqr->memdev);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
+
+/*
+ *   In command mode and transport mode we need to look for sense
+ *   data in different places. The sense data itself is allways
+ *   an array of 32 bytes, so we can unify the sense data access
+ *   for both modes.
+ */
+char *dasd_get_sense(struct irb *irb)
+{
+	struct tsb *tsb = NULL;
+	char *sense = NULL;
+
+	if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
+		if (irb->scsw.tm.tcw)
+			tsb = tcw_get_tsb((struct tcw *)(unsigned long)
+					  irb->scsw.tm.tcw);
+		if (tsb && tsb->length == 64 && tsb->flags)
+			switch (tsb->flags & 0x07) {
+			case 1:	/* tsa_iostat */
+				sense = tsb->tsa.iostat.sense;
+				break;
+			case 2: /* tsa_ddpc */
+				sense = tsb->tsa.ddpc.sense;
+				break;
+			default:
+				/* currently we don't use interrogate data */
+				break;
+			}
+	} else if (irb->esw.esw0.erw.cons) {
+		sense = irb->ecw;
+	}
+	return sense;
+}
+EXPORT_SYMBOL_GPL(dasd_get_sense);
+
+void dasd_generic_shutdown(struct ccw_device *cdev)
+{
+	struct dasd_device *device;
+
+	device = dasd_device_from_cdev(cdev);
+	if (IS_ERR(device))
+		return;
+
+	if (device->block)
+		dasd_schedule_block_bh(device->block);
+
+	dasd_schedule_device_bh(device);
+
+	wait_event(shutdown_waitq, _wait_for_empty_queues(device));
+}
+EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
+
+static int __init dasd_init(void)
+{
+	int rc;
+
+	init_waitqueue_head(&dasd_init_waitq);
+	init_waitqueue_head(&dasd_flush_wq);
+	init_waitqueue_head(&generic_waitq);
+	init_waitqueue_head(&shutdown_waitq);
+
+	/* register 'common' DASD debug area, used for all DBF_XXX calls */
+	dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
+	if (dasd_debug_area == NULL) {
+		rc = -ENOMEM;
+		goto failed;
+	}
+	debug_register_view(dasd_debug_area, &debug_sprintf_view);
+	debug_set_level(dasd_debug_area, DBF_WARNING);
+
+	DBF_EVENT(DBF_EMERG, "%s", "debug area created");
+
+	dasd_diag_discipline_pointer = NULL;
+
+	dasd_statistics_createroot();
+
+	rc = dasd_devmap_init();
+	if (rc)
+		goto failed;
+	rc = dasd_gendisk_init();
+	if (rc)
+		goto failed;
+	rc = dasd_parse();
+	if (rc)
+		goto failed;
+	rc = dasd_eer_init();
+	if (rc)
+		goto failed;
+#ifdef CONFIG_PROC_FS
+	rc = dasd_proc_init();
+	if (rc)
+		goto failed;
+#endif
+
+	return 0;
+failed:
+	pr_info("The DASD device driver could not be initialized\n");
+	dasd_exit();
+	return rc;
+}
+
+module_init(dasd_init);
+module_exit(dasd_exit);
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
new file mode 100644
index 0000000..ee73b06
--- /dev/null
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -0,0 +1,2854 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Horst  Hummel    <Horst.Hummel@de.ibm.com>
+ *		    Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 2000, 2001
+ *
+ */
+
+#define KMSG_COMPONENT "dasd-eckd"
+
+#include <linux/timer.h>
+#include <asm/idals.h>
+
+#define PRINTK_HEADER "dasd_erp(3990): "
+
+#include "dasd_int.h"
+#include "dasd_eckd.h"
+
+
+struct DCTL_data {
+	unsigned char subcommand;  /* e.g Inhibit Write, Enable Write,... */
+	unsigned char modifier;	   /* Subcommand modifier */
+	unsigned short res;	   /* reserved */
+} __attribute__ ((packed));
+
+/*
+ *****************************************************************************
+ * SECTION ERP HANDLING
+ *****************************************************************************
+ */
+/*
+ *****************************************************************************
+ * 24 and 32 byte sense ERP functions
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERP_CLEANUP
+ *
+ * DESCRIPTION
+ *   Removes the already build but not necessary ERP request and sets
+ *   the status of the original cqr / erp to the given (final) status
+ *
+ *  PARAMETER
+ *   erp		request to be blocked
+ *   final_status	either DASD_CQR_DONE or DASD_CQR_FAILED
+ *
+ * RETURN VALUES
+ *   cqr		original cqr
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
+{
+	struct dasd_ccw_req *cqr = erp->refers;
+
+	dasd_free_erp_request(erp, erp->memdev);
+	cqr->status = final_status;
+	return cqr;
+
+}				/* end dasd_3990_erp_cleanup */
+
+/*
+ * DASD_3990_ERP_BLOCK_QUEUE
+ *
+ * DESCRIPTION
+ *   Block the given device request queue to prevent from further
+ *   processing until the started timer has expired or an related
+ *   interrupt was received.
+ */
+static void dasd_3990_erp_block_queue(struct dasd_ccw_req *erp, int expires)
+{
+
+	struct dasd_device *device = erp->startdev;
+	unsigned long flags;
+
+	DBF_DEV_EVENT(DBF_INFO, device,
+		    "blocking request queue for %is", expires/HZ);
+
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	dasd_device_set_stop_bits(device, DASD_STOPPED_PENDING);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	erp->status = DASD_CQR_FILLED;
+	if (erp->block)
+		dasd_block_set_timer(erp->block, expires);
+	else
+		dasd_device_set_timer(device, expires);
+}
+
+/*
+ * DASD_3990_ERP_INT_REQ
+ *
+ * DESCRIPTION
+ *   Handles 'Intervention Required' error.
+ *   This means either device offline or not installed.
+ *
+ * PARAMETER
+ *   erp		current erp
+ * RETURN VALUES
+ *   erp		modified erp
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	/* first time set initial retry counter and erp_function */
+	/* and retry once without blocking queue		 */
+	/* (this enables easier enqueing of the cqr)		 */
+	if (erp->function != dasd_3990_erp_int_req) {
+
+		erp->retries = 256;
+		erp->function = dasd_3990_erp_int_req;
+
+	} else {
+
+		/* issue a message and wait for 'device ready' interrupt */
+		dev_err(&device->cdev->dev,
+			    "is offline or not installed - "
+			    "INTERVENTION REQUIRED!!\n");
+
+		dasd_3990_erp_block_queue(erp, 60*HZ);
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_int_req */
+
+/*
+ * DASD_3990_ERP_ALTERNATE_PATH
+ *
+ * DESCRIPTION
+ *   Repeat the operation on a different channel path.
+ *   If all alternate paths have been tried, the request is posted with a
+ *   permanent error.
+ *
+ *  PARAMETER
+ *   erp		pointer to the current ERP
+ *
+ * RETURN VALUES
+ *   erp		modified pointer to the ERP
+ */
+static void
+dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
+{
+	struct dasd_device *device = erp->startdev;
+	__u8 opm;
+	unsigned long flags;
+
+	/* try alternate valid path */
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	opm = ccw_device_get_path_mask(device->cdev);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	if (erp->lpm == 0)
+		erp->lpm = dasd_path_get_opm(device) &
+			~(erp->irb.esw.esw0.sublog.lpum);
+	else
+		erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
+
+	if ((erp->lpm & opm) != 0x00) {
+
+		DBF_DEV_EVENT(DBF_WARNING, device,
+			    "try alternate lpm=%x (lpum=%x / opm=%x)",
+			    erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm);
+
+		/* reset status to submit the request again... */
+		erp->status = DASD_CQR_FILLED;
+		erp->retries = 10;
+	} else {
+		dev_err(&device->cdev->dev,
+			"The DASD cannot be reached on any path (lpum=%x"
+			"/opm=%x)\n", erp->irb.esw.esw0.sublog.lpum, opm);
+
+		/* post request with permanent error */
+		erp->status = DASD_CQR_FAILED;
+	}
+}				/* end dasd_3990_erp_alternate_path */
+
+/*
+ * DASD_3990_ERP_DCTL
+ *
+ * DESCRIPTION
+ *   Setup cqr to do the Diagnostic Control (DCTL) command with an
+ *   Inhibit Write subcommand (0x20) and the given modifier.
+ *
+ *  PARAMETER
+ *   erp		pointer to the current (failed) ERP
+ *   modifier		subcommand modifier
+ *
+ * RETURN VALUES
+ *   dctl_cqr		pointer to NEW dctl_cqr
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
+{
+
+	struct dasd_device *device = erp->startdev;
+	struct DCTL_data *DCTL_data;
+	struct ccw1 *ccw;
+	struct dasd_ccw_req *dctl_cqr;
+
+	dctl_cqr = dasd_alloc_erp_request((char *) &erp->magic, 1,
+					  sizeof(struct DCTL_data),
+					  device);
+	if (IS_ERR(dctl_cqr)) {
+		dev_err(&device->cdev->dev,
+			    "Unable to allocate DCTL-CQR\n");
+		erp->status = DASD_CQR_FAILED;
+		return erp;
+	}
+
+	DCTL_data = dctl_cqr->data;
+
+	DCTL_data->subcommand = 0x02;	/* Inhibit Write */
+	DCTL_data->modifier = modifier;
+
+	ccw = dctl_cqr->cpaddr;
+	memset(ccw, 0, sizeof(struct ccw1));
+	ccw->cmd_code = CCW_CMD_DCTL;
+	ccw->count = 4;
+	ccw->cda = (__u32)(addr_t) DCTL_data;
+	dctl_cqr->flags = erp->flags;
+	dctl_cqr->function = dasd_3990_erp_DCTL;
+	dctl_cqr->refers = erp;
+	dctl_cqr->startdev = device;
+	dctl_cqr->memdev = device;
+	dctl_cqr->magic = erp->magic;
+	dctl_cqr->expires = 5 * 60 * HZ;
+	dctl_cqr->retries = 2;
+
+	dctl_cqr->buildclk = get_tod_clock();
+
+	dctl_cqr->status = DASD_CQR_FILLED;
+
+	return dctl_cqr;
+
+}				/* end dasd_3990_erp_DCTL */
+
+/*
+ * DASD_3990_ERP_ACTION_1
+ *
+ * DESCRIPTION
+ *   Setup ERP to do the ERP action 1 (see Reference manual).
+ *   Repeat the operation on a different channel path.
+ *   As deviation from the recommended recovery action, we reset the path mask
+ *   after we have tried each path and go through all paths a second time.
+ *   This will cover situations where only one path at a time is actually down,
+ *   but all paths fail and recover just with the same sequence and timing as
+ *   we try to use them (flapping links).
+ *   If all alternate paths have been tried twice, the request is posted with
+ *   a permanent error.
+ *
+ *  PARAMETER
+ *   erp		pointer to the current ERP
+ *
+ * RETURN VALUES
+ *   erp		pointer to the ERP
+ *
+ */
+static struct dasd_ccw_req *dasd_3990_erp_action_1_sec(struct dasd_ccw_req *erp)
+{
+	erp->function = dasd_3990_erp_action_1_sec;
+	dasd_3990_erp_alternate_path(erp);
+	return erp;
+}
+
+static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
+{
+	erp->function = dasd_3990_erp_action_1;
+	dasd_3990_erp_alternate_path(erp);
+	if (erp->status == DASD_CQR_FAILED &&
+	    !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
+		erp->status = DASD_CQR_FILLED;
+		erp->retries = 10;
+		erp->lpm = dasd_path_get_opm(erp->startdev);
+		erp->function = dasd_3990_erp_action_1_sec;
+	}
+	return erp;
+}				/* end dasd_3990_erp_action_1(b) */
+
+/*
+ * DASD_3990_ERP_ACTION_4
+ *
+ * DESCRIPTION
+ *   Setup ERP to do the ERP action 4 (see Reference manual).
+ *   Set the current request to PENDING to block the CQR queue for that device
+ *   until the state change interrupt appears.
+ *   Use a timer (20 seconds) to retry the cqr if the interrupt is still
+ *   missing.
+ *
+ *  PARAMETER
+ *   sense		sense data of the actual error
+ *   erp		pointer to the current ERP
+ *
+ * RETURN VALUES
+ *   erp		pointer to the ERP
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	/* first time set initial retry counter and erp_function    */
+	/* and retry once without waiting for state change pending  */
+	/* interrupt (this enables easier enqueing of the cqr)	    */
+	if (erp->function != dasd_3990_erp_action_4) {
+
+		DBF_DEV_EVENT(DBF_INFO, device, "%s",
+			    "dasd_3990_erp_action_4: first time retry");
+
+		erp->retries = 256;
+		erp->function = dasd_3990_erp_action_4;
+
+	} else {
+		if (sense && (sense[25] == 0x1D)) { /* state change pending */
+
+			DBF_DEV_EVENT(DBF_INFO, device,
+				    "waiting for state change pending "
+				    "interrupt, %d retries left",
+				    erp->retries);
+
+			dasd_3990_erp_block_queue(erp, 30*HZ);
+
+		} else if (sense && (sense[25] == 0x1E)) {	/* busy */
+			DBF_DEV_EVENT(DBF_INFO, device,
+				    "busy - redriving request later, "
+				    "%d retries left",
+				    erp->retries);
+                        dasd_3990_erp_block_queue(erp, HZ);
+		} else {
+			/* no state change pending - retry */
+			DBF_DEV_EVENT(DBF_INFO, device,
+				     "redriving request immediately, "
+				     "%d retries left",
+				     erp->retries);
+			erp->status = DASD_CQR_FILLED;
+		}
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_action_4 */
+
+/*
+ *****************************************************************************
+ * 24 byte sense ERP functions (only)
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERP_ACTION_5
+ *
+ * DESCRIPTION
+ *   Setup ERP to do the ERP action 5 (see Reference manual).
+ *   NOTE: Further handling is done in xxx_further_erp after the retries.
+ *
+ *  PARAMETER
+ *   erp		pointer to the current ERP
+ *
+ * RETURN VALUES
+ *   erp		pointer to the ERP
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_5(struct dasd_ccw_req * erp)
+{
+
+	/* first of all retry */
+	erp->retries = 10;
+	erp->function = dasd_3990_erp_action_5;
+
+	return erp;
+
+}				/* end dasd_3990_erp_action_5 */
+
+/*
+ * DASD_3990_HANDLE_ENV_DATA
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'Environmental data present'.
+ *   Does a analysis of the sense data (message Format)
+ *   and prints the error messages.
+ *
+ * PARAMETER
+ *   sense		current sense data
+ *
+ * RETURN VALUES
+ *   void
+ */
+static void
+dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+	char msg_format = (sense[7] & 0xF0);
+	char msg_no = (sense[7] & 0x0F);
+	char errorstring[ERRORLENGTH];
+
+	switch (msg_format) {
+	case 0x00:		/* Format 0 - Program or System Checks */
+
+		if (sense[1] & 0x10) {	/* check message to operator bit */
+
+			switch (msg_no) {
+			case 0x00:	/* No Message */
+				break;
+			case 0x01:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Invalid Command\n");
+				break;
+			case 0x02:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Invalid Command "
+					    "Sequence\n");
+				break;
+			case 0x03:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - CCW Count less than "
+					    "required\n");
+				break;
+			case 0x04:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Invalid Parameter\n");
+				break;
+			case 0x05:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Diagnostic of Special"
+					    " Command Violates File Mask\n");
+				break;
+			case 0x07:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Channel Returned with "
+					    "Incorrect retry CCW\n");
+				break;
+			case 0x08:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Reset Notification\n");
+				break;
+			case 0x09:
+				dev_warn(&device->cdev->dev,
+					 "FORMAT 0 - Storage Path Restart\n");
+				break;
+			case 0x0A:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Channel requested "
+					    "... %02x\n", sense[8]);
+				break;
+			case 0x0B:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Invalid Defective/"
+					    "Alternate Track Pointer\n");
+				break;
+			case 0x0C:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - DPS Installation "
+					    "Check\n");
+				break;
+			case 0x0E:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Command Invalid on "
+					    "Secondary Address\n");
+				break;
+			case 0x0F:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Status Not As "
+					    "Required: reason %02x\n",
+					 sense[8]);
+				break;
+			default:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Reserved\n");
+			}
+		} else {
+			switch (msg_no) {
+			case 0x00:	/* No Message */
+				break;
+			case 0x01:
+				dev_warn(&device->cdev->dev,
+					 "FORMAT 0 - Device Error "
+					 "Source\n");
+				break;
+			case 0x02:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Reserved\n");
+				break;
+			case 0x03:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Device Fenced - "
+					    "device = %02x\n", sense[4]);
+				break;
+			case 0x04:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Data Pinned for "
+					    "Device\n");
+				break;
+			default:
+				dev_warn(&device->cdev->dev,
+					    "FORMAT 0 - Reserved\n");
+			}
+		}
+		break;
+
+	case 0x10:		/* Format 1 - Device Equipment Checks */
+		switch (msg_no) {
+		case 0x00:	/* No Message */
+			break;
+		case 0x01:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Device Status 1 not as "
+				    "expected\n");
+			break;
+		case 0x03:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Index missing\n");
+			break;
+		case 0x04:
+			dev_warn(&device->cdev->dev,
+				 "FORMAT 1 - Interruption cannot be "
+				 "reset\n");
+			break;
+		case 0x05:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Device did not respond to "
+				    "selection\n");
+			break;
+		case 0x06:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Device check-2 error or Set "
+				    "Sector is not complete\n");
+			break;
+		case 0x07:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Head address does not "
+				    "compare\n");
+			break;
+		case 0x08:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Device status 1 not valid\n");
+			break;
+		case 0x09:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Device not ready\n");
+			break;
+		case 0x0A:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Track physical address did "
+				    "not compare\n");
+			break;
+		case 0x0B:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Missing device address bit\n");
+			break;
+		case 0x0C:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Drive motor switch is off\n");
+			break;
+		case 0x0D:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Seek incomplete\n");
+			break;
+		case 0x0E:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Cylinder address did not "
+				    "compare\n");
+			break;
+		case 0x0F:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Offset active cannot be "
+				    "reset\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 1 - Reserved\n");
+		}
+		break;
+
+	case 0x20:		/* Format 2 - 3990 Equipment Checks */
+		switch (msg_no) {
+		case 0x08:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 2 - 3990 check-2 error\n");
+			break;
+		case 0x0E:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 2 - Support facility errors\n");
+			break;
+		case 0x0F:
+			dev_warn(&device->cdev->dev,
+				 "FORMAT 2 - Microcode detected error "
+				 "%02x\n",
+				 sense[8]);
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 2 - Reserved\n");
+		}
+		break;
+
+	case 0x30:		/* Format 3 - 3990 Control Checks */
+		switch (msg_no) {
+		case 0x0F:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 3 - Allegiance terminated\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 3 - Reserved\n");
+		}
+		break;
+
+	case 0x40:		/* Format 4 - Data Checks */
+		switch (msg_no) {
+		case 0x00:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Home address area error\n");
+			break;
+		case 0x01:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Count area error\n");
+			break;
+		case 0x02:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Key area error\n");
+			break;
+		case 0x03:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Data area error\n");
+			break;
+		case 0x04:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - No sync byte in home address "
+				    "area\n");
+			break;
+		case 0x05:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - No sync byte in count address "
+				    "area\n");
+			break;
+		case 0x06:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - No sync byte in key area\n");
+			break;
+		case 0x07:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - No sync byte in data area\n");
+			break;
+		case 0x08:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Home address area error; "
+				    "offset active\n");
+			break;
+		case 0x09:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Count area error; offset "
+				    "active\n");
+			break;
+		case 0x0A:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Key area error; offset "
+				    "active\n");
+			break;
+		case 0x0B:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Data area error; "
+				    "offset active\n");
+			break;
+		case 0x0C:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - No sync byte in home "
+				    "address area; offset active\n");
+			break;
+		case 0x0D:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - No sync byte in count "
+				    "address area; offset active\n");
+			break;
+		case 0x0E:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - No sync byte in key area; "
+				    "offset active\n");
+			break;
+		case 0x0F:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - No sync byte in data area; "
+				    "offset active\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 4 - Reserved\n");
+		}
+		break;
+
+	case 0x50:  /* Format 5 - Data Check with displacement information */
+		switch (msg_no) {
+		case 0x00:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 5 - Data Check in the "
+				    "home address area\n");
+			break;
+		case 0x01:
+			dev_warn(&device->cdev->dev,
+				 "FORMAT 5 - Data Check in the count "
+				 "area\n");
+			break;
+		case 0x02:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 5 - Data Check in the key area\n");
+			break;
+		case 0x03:
+			dev_warn(&device->cdev->dev,
+				 "FORMAT 5 - Data Check in the data "
+				 "area\n");
+			break;
+		case 0x08:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 5 - Data Check in the "
+				    "home address area; offset active\n");
+			break;
+		case 0x09:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 5 - Data Check in the count area; "
+				    "offset active\n");
+			break;
+		case 0x0A:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 5 - Data Check in the key area; "
+				    "offset active\n");
+			break;
+		case 0x0B:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 5 - Data Check in the data area; "
+				    "offset active\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 5 - Reserved\n");
+		}
+		break;
+
+	case 0x60:  /* Format 6 - Usage Statistics/Overrun Errors */
+		switch (msg_no) {
+		case 0x00:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Overrun on channel A\n");
+			break;
+		case 0x01:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Overrun on channel B\n");
+			break;
+		case 0x02:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Overrun on channel C\n");
+			break;
+		case 0x03:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Overrun on channel D\n");
+			break;
+		case 0x04:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Overrun on channel E\n");
+			break;
+		case 0x05:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Overrun on channel F\n");
+			break;
+		case 0x06:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Overrun on channel G\n");
+			break;
+		case 0x07:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Overrun on channel H\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 6 - Reserved\n");
+		}
+		break;
+
+	case 0x70:  /* Format 7 - Device Connection Control Checks */
+		switch (msg_no) {
+		case 0x00:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - RCC initiated by a connection "
+				    "check alert\n");
+			break;
+		case 0x01:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - RCC 1 sequence not "
+				    "successful\n");
+			break;
+		case 0x02:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - RCC 1 and RCC 2 sequences not "
+				    "successful\n");
+			break;
+		case 0x03:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - Invalid tag-in during "
+				    "selection sequence\n");
+			break;
+		case 0x04:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - extra RCC required\n");
+			break;
+		case 0x05:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - Invalid DCC selection "
+				    "response or timeout\n");
+			break;
+		case 0x06:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - Missing end operation; device "
+				    "transfer complete\n");
+			break;
+		case 0x07:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - Missing end operation; device "
+				    "transfer incomplete\n");
+			break;
+		case 0x08:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - Invalid tag-in for an "
+				    "immediate command sequence\n");
+			break;
+		case 0x09:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - Invalid tag-in for an "
+				    "extended command sequence\n");
+			break;
+		case 0x0A:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - 3990 microcode time out when "
+				    "stopping selection\n");
+			break;
+		case 0x0B:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - No response to selection "
+				    "after a poll interruption\n");
+			break;
+		case 0x0C:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - Permanent path error (DASD "
+				    "controller not available)\n");
+			break;
+		case 0x0D:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - DASD controller not available"
+				    " on disconnected command chain\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 7 - Reserved\n");
+		}
+		break;
+
+	case 0x80:  /* Format 8 - Additional Device Equipment Checks */
+		switch (msg_no) {
+		case 0x00:	/* No Message */
+		case 0x01:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - Error correction code "
+				    "hardware fault\n");
+			break;
+		case 0x03:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - Unexpected end operation "
+				    "response code\n");
+			break;
+		case 0x04:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - End operation with transfer "
+				    "count not zero\n");
+			break;
+		case 0x05:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - End operation with transfer "
+				    "count zero\n");
+			break;
+		case 0x06:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - DPS checks after a system "
+				    "reset or selective reset\n");
+			break;
+		case 0x07:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - DPS cannot be filled\n");
+			break;
+		case 0x08:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - Short busy time-out during "
+				    "device selection\n");
+			break;
+		case 0x09:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - DASD controller failed to "
+				    "set or reset the long busy latch\n");
+			break;
+		case 0x0A:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - No interruption from device "
+				    "during a command chain\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 8 - Reserved\n");
+		}
+		break;
+
+	case 0x90:  /* Format 9 - Device Read, Write, and Seek Checks */
+		switch (msg_no) {
+		case 0x00:
+			break;	/* No Message */
+		case 0x06:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 9 - Device check-2 error\n");
+			break;
+		case 0x07:
+			dev_warn(&device->cdev->dev,
+				 "FORMAT 9 - Head address did not "
+				 "compare\n");
+			break;
+		case 0x0A:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 9 - Track physical address did "
+				    "not compare while oriented\n");
+			break;
+		case 0x0E:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 9 - Cylinder address did not "
+				    "compare\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT 9 - Reserved\n");
+		}
+		break;
+
+	case 0xF0:		/* Format F - Cache Storage Checks */
+		switch (msg_no) {
+		case 0x00:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Operation Terminated\n");
+			break;
+		case 0x01:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Subsystem Processing Error\n");
+			break;
+		case 0x02:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Cache or nonvolatile storage "
+				    "equipment failure\n");
+			break;
+		case 0x04:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Caching terminated\n");
+			break;
+		case 0x06:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Cache fast write access not "
+				    "authorized\n");
+			break;
+		case 0x07:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Track format incorrect\n");
+			break;
+		case 0x09:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Caching reinitiated\n");
+			break;
+		case 0x0A:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Nonvolatile storage "
+				    "terminated\n");
+			break;
+		case 0x0B:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Volume is suspended duplex\n");
+			/* call extended error reporting (EER) */
+			dasd_eer_write(device, erp->refers,
+				       DASD_EER_PPRCSUSPEND);
+			break;
+		case 0x0C:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Subsystem status cannot be "
+				    "determined\n");
+			break;
+		case 0x0D:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Caching status reset to "
+				    "default\n");
+			break;
+		case 0x0E:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - DASD Fast Write inhibited\n");
+			break;
+		default:
+			dev_warn(&device->cdev->dev,
+				    "FORMAT F - Reserved\n");
+		}
+		break;
+
+	default:	/* unknown message format - should not happen
+			   internal error 03 - unknown message format */
+		snprintf(errorstring, ERRORLENGTH, "03 %x02", msg_format);
+		dev_err(&device->cdev->dev,
+			 "An error occurred in the DASD device driver, "
+			 "reason=%s\n", errorstring);
+		break;
+	}			/* end switch message format */
+
+}				/* end dasd_3990_handle_env_data */
+
+/*
+ * DASD_3990_ERP_COM_REJ
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'Command Reject' error.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ *   sense		current sense data
+ *
+ * RETURN VALUES
+ *   erp		'new' erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	erp->function = dasd_3990_erp_com_rej;
+
+	/* env data present (ACTION 10 - retry should work) */
+	if (sense[2] & SNS2_ENV_DATA_PRESENT) {
+
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Command Reject - environmental data present");
+
+		dasd_3990_handle_env_data(erp, sense);
+
+		erp->retries = 5;
+
+	} else if (sense[1] & SNS1_WRITE_INHIBITED) {
+		dev_err(&device->cdev->dev, "An I/O request was rejected"
+			" because writing is inhibited\n");
+		erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+	} else {
+		/* fatal error -  set status to FAILED
+		   internal error 09 - Command Reject */
+		if (!test_bit(DASD_CQR_SUPPRESS_CR, &erp->flags))
+			dev_err(&device->cdev->dev,
+				"An error occurred in the DASD device driver, reason=09\n");
+
+		erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_com_rej */
+
+/*
+ * DASD_3990_ERP_BUS_OUT
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'Bus Out Parity Check' error.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ * RETURN VALUES
+ *   erp		new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_bus_out(struct dasd_ccw_req * erp)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	/* first time set initial retry counter and erp_function */
+	/* and retry once without blocking queue		 */
+	/* (this enables easier enqueing of the cqr)		 */
+	if (erp->function != dasd_3990_erp_bus_out) {
+		erp->retries = 256;
+		erp->function = dasd_3990_erp_bus_out;
+
+	} else {
+
+		/* issue a message and wait for 'device ready' interrupt */
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "bus out parity error or BOPC requested by "
+			    "channel");
+
+		dasd_3990_erp_block_queue(erp, 60*HZ);
+
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_bus_out */
+
+/*
+ * DASD_3990_ERP_EQUIP_CHECK
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'Equipment Check' error.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ * RETURN VALUES
+ *   erp		new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	erp->function = dasd_3990_erp_equip_check;
+
+	if (sense[1] & SNS1_WRITE_INHIBITED) {
+		dev_info(&device->cdev->dev,
+			    "Write inhibited path encountered\n");
+
+		/* vary path offline
+		   internal error 04 - Path should be varied off-line.*/
+		dev_err(&device->cdev->dev, "An error occurred in the DASD "
+			"device driver, reason=%s\n", "04");
+
+		erp = dasd_3990_erp_action_1(erp);
+
+	} else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
+
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Equipment Check - " "environmental data present");
+
+		dasd_3990_handle_env_data(erp, sense);
+
+		erp = dasd_3990_erp_action_4(erp, sense);
+
+	} else if (sense[1] & SNS1_PERM_ERR) {
+
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Equipment Check - retry exhausted or "
+			    "undesirable");
+
+		erp = dasd_3990_erp_action_1(erp);
+
+	} else {
+		/* all other equipment checks - Action 5 */
+		/* rest is done when retries == 0 */
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Equipment check or processing error");
+
+		erp = dasd_3990_erp_action_5(erp);
+	}
+	return erp;
+
+}				/* end dasd_3990_erp_equip_check */
+
+/*
+ * DASD_3990_ERP_DATA_CHECK
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'Data Check' error.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ * RETURN VALUES
+ *   erp		new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	erp->function = dasd_3990_erp_data_check;
+
+	if (sense[2] & SNS2_CORRECTABLE) {	/* correctable data check */
+
+		/* issue message that the data has been corrected */
+		dev_emerg(&device->cdev->dev,
+			    "Data recovered during retry with PCI "
+			    "fetch mode active\n");
+
+		/* not possible to handle this situation in Linux */
+		panic("No way to inform application about the possibly "
+		      "incorrect data");
+
+	} else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
+
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Uncorrectable data check recovered secondary "
+			    "addr of duplex pair");
+
+		erp = dasd_3990_erp_action_4(erp, sense);
+
+	} else if (sense[1] & SNS1_PERM_ERR) {
+
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Uncorrectable data check with internal "
+			    "retry exhausted");
+
+		erp = dasd_3990_erp_action_1(erp);
+
+	} else {
+		/* all other data checks */
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Uncorrectable data check with retry count "
+			    "exhausted...");
+
+		erp = dasd_3990_erp_action_5(erp);
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_data_check */
+
+/*
+ * DASD_3990_ERP_OVERRUN
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'Overrun' error.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ * RETURN VALUES
+ *   erp		new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_overrun(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	erp->function = dasd_3990_erp_overrun;
+
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+		    "Overrun - service overrun or overrun"
+		    " error requested by channel");
+
+	erp = dasd_3990_erp_action_5(erp);
+
+	return erp;
+
+}				/* end dasd_3990_erp_overrun */
+
+/*
+ * DASD_3990_ERP_INV_FORMAT
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'Invalid Track Format' error.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ * RETURN VALUES
+ *   erp		new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	erp->function = dasd_3990_erp_inv_format;
+
+	if (sense[2] & SNS2_ENV_DATA_PRESENT) {
+
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Track format error when destaging or "
+			    "staging data");
+
+		dasd_3990_handle_env_data(erp, sense);
+
+		erp = dasd_3990_erp_action_4(erp, sense);
+
+	} else {
+		/* internal error 06 - The track format is not valid*/
+		dev_err(&device->cdev->dev,
+			"An error occurred in the DASD device driver, "
+			"reason=%s\n", "06");
+
+		erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_inv_format */
+
+/*
+ * DASD_3990_ERP_EOC
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'End-of-Cylinder' error.
+ *
+ * PARAMETER
+ *   erp		already added default erp
+ * RETURN VALUES
+ *   erp		pointer to original (failed) cqr.
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_EOC(struct dasd_ccw_req * default_erp, char *sense)
+{
+
+	struct dasd_device *device = default_erp->startdev;
+
+	dev_err(&device->cdev->dev,
+		"The cylinder data for accessing the DASD is inconsistent\n");
+
+	/* implement action 7 - BUG */
+	return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+
+}				/* end dasd_3990_erp_EOC */
+
+/*
+ * DASD_3990_ERP_ENV_DATA
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'Environmental-Data Present' error.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ * RETURN VALUES
+ *   erp		new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	erp->function = dasd_3990_erp_env_data;
+
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Environmental data present");
+
+	dasd_3990_handle_env_data(erp, sense);
+
+	/* don't retry on disabled interface */
+	if (sense[7] != 0x0F) {
+		erp = dasd_3990_erp_action_4(erp, sense);
+	} else {
+		erp->status = DASD_CQR_FILLED;
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_env_data */
+
+/*
+ * DASD_3990_ERP_NO_REC
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'No Record Found' error.
+ *
+ * PARAMETER
+ *   erp		already added default ERP
+ *
+ * RETURN VALUES
+ *   erp		new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense)
+{
+
+	struct dasd_device *device = default_erp->startdev;
+
+	/*
+	 * In some cases the 'No Record Found' error might be expected and
+	 * log messages shouldn't be written then.
+	 * Check if the according suppress bit is set.
+	 */
+	if (!test_bit(DASD_CQR_SUPPRESS_NRF, &default_erp->flags))
+		dev_err(&device->cdev->dev,
+			"The specified record was not found\n");
+
+	return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+
+}				/* end dasd_3990_erp_no_rec */
+
+/*
+ * DASD_3990_ERP_FILE_PROT
+ *
+ * DESCRIPTION
+ *   Handles 24 byte 'File Protected' error.
+ *   Note: Seek related recovery is not implemented because
+ *	   wee don't use the seek command yet.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ * RETURN VALUES
+ *   erp		new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	/*
+	 * In some cases the 'File Protected' error might be expected and
+	 * log messages shouldn't be written then.
+	 * Check if the according suppress bit is set.
+	 */
+	if (!test_bit(DASD_CQR_SUPPRESS_FP, &erp->flags))
+		dev_err(&device->cdev->dev,
+			"Accessing the DASD failed because of a hardware error\n");
+
+	return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+
+}				/* end dasd_3990_erp_file_prot */
+
+/*
+ * DASD_3990_ERP_INSPECT_ALIAS
+ *
+ * DESCRIPTION
+ *   Checks if the original request was started on an alias device.
+ *   If yes, it modifies the original and the erp request so that
+ *   the erp request can be started on a base device.
+ *
+ * PARAMETER
+ *   erp		pointer to the currently created default ERP
+ *
+ * RETURN VALUES
+ *   erp		pointer to the modified ERP, or NULL
+ */
+
+static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
+						struct dasd_ccw_req *erp)
+{
+	struct dasd_ccw_req *cqr = erp->refers;
+	char *sense;
+
+	if (cqr->block &&
+	    (cqr->block->base != cqr->startdev)) {
+
+		sense = dasd_get_sense(&erp->refers->irb);
+		/*
+		 * dynamic pav may have changed base alias mapping
+		 */
+		if (!test_bit(DASD_FLAG_OFFLINE, &cqr->startdev->flags) && sense
+		    && (sense[0] == 0x10) && (sense[7] == 0x0F)
+		    && (sense[8] == 0x67)) {
+			/*
+			 * remove device from alias handling to prevent new
+			 * requests from being scheduled on the
+			 * wrong alias device
+			 */
+			dasd_alias_remove_device(cqr->startdev);
+
+			/* schedule worker to reload device */
+			dasd_reload_device(cqr->startdev);
+		}
+
+		if (cqr->startdev->features & DASD_FEATURE_ERPLOG) {
+			DBF_DEV_EVENT(DBF_ERR, cqr->startdev,
+				    "ERP on alias device for request %p,"
+				    " recover on base device %s", cqr,
+				    dev_name(&cqr->block->base->cdev->dev));
+		}
+		dasd_eckd_reset_ccw_to_base_io(cqr);
+		erp->startdev = cqr->block->base;
+		erp->function = dasd_3990_erp_inspect_alias;
+		return erp;
+	} else
+		return NULL;
+}
+
+
+/*
+ * DASD_3990_ERP_INSPECT_24
+ *
+ * DESCRIPTION
+ *   Does a detailed inspection of the 24 byte sense data
+ *   and sets up a related error recovery action.
+ *
+ * PARAMETER
+ *   sense		sense data of the actual error
+ *   erp		pointer to the currently created default ERP
+ *
+ * RETURN VALUES
+ *   erp		pointer to the (addtitional) ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_inspect_24(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_ccw_req *erp_filled = NULL;
+
+	/* Check sense for ....	   */
+	/* 'Command Reject'	   */
+	if ((erp_filled == NULL) && (sense[0] & SNS0_CMD_REJECT)) {
+		erp_filled = dasd_3990_erp_com_rej(erp, sense);
+	}
+	/* 'Intervention Required' */
+	if ((erp_filled == NULL) && (sense[0] & SNS0_INTERVENTION_REQ)) {
+		erp_filled = dasd_3990_erp_int_req(erp);
+	}
+	/* 'Bus Out Parity Check'  */
+	if ((erp_filled == NULL) && (sense[0] & SNS0_BUS_OUT_CHECK)) {
+		erp_filled = dasd_3990_erp_bus_out(erp);
+	}
+	/* 'Equipment Check'	   */
+	if ((erp_filled == NULL) && (sense[0] & SNS0_EQUIPMENT_CHECK)) {
+		erp_filled = dasd_3990_erp_equip_check(erp, sense);
+	}
+	/* 'Data Check'		   */
+	if ((erp_filled == NULL) && (sense[0] & SNS0_DATA_CHECK)) {
+		erp_filled = dasd_3990_erp_data_check(erp, sense);
+	}
+	/* 'Overrun'		   */
+	if ((erp_filled == NULL) && (sense[0] & SNS0_OVERRUN)) {
+		erp_filled = dasd_3990_erp_overrun(erp, sense);
+	}
+	/* 'Invalid Track Format'  */
+	if ((erp_filled == NULL) && (sense[1] & SNS1_INV_TRACK_FORMAT)) {
+		erp_filled = dasd_3990_erp_inv_format(erp, sense);
+	}
+	/* 'End-of-Cylinder'	   */
+	if ((erp_filled == NULL) && (sense[1] & SNS1_EOC)) {
+		erp_filled = dasd_3990_erp_EOC(erp, sense);
+	}
+	/* 'Environmental Data'	   */
+	if ((erp_filled == NULL) && (sense[2] & SNS2_ENV_DATA_PRESENT)) {
+		erp_filled = dasd_3990_erp_env_data(erp, sense);
+	}
+	/* 'No Record Found'	   */
+	if ((erp_filled == NULL) && (sense[1] & SNS1_NO_REC_FOUND)) {
+		erp_filled = dasd_3990_erp_no_rec(erp, sense);
+	}
+	/* 'File Protected'	   */
+	if ((erp_filled == NULL) && (sense[1] & SNS1_FILE_PROTECTED)) {
+		erp_filled = dasd_3990_erp_file_prot(erp);
+	}
+	/* other (unknown) error - do default ERP */
+	if (erp_filled == NULL) {
+
+		erp_filled = erp;
+	}
+
+	return erp_filled;
+
+}				/* END dasd_3990_erp_inspect_24 */
+
+/*
+ *****************************************************************************
+ * 32 byte sense ERP functions (only)
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERPACTION_10_32
+ *
+ * DESCRIPTION
+ *   Handles 32 byte 'Action 10' of Single Program Action Codes.
+ *   Just retry and if retry doesn't work, return with error.
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ *   sense		current sense data
+ * RETURN VALUES
+ *   erp		modified erp_head
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	erp->retries = 256;
+	erp->function = dasd_3990_erp_action_10_32;
+
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Perform logging requested");
+
+	return erp;
+
+}				/* end dasd_3990_erp_action_10_32 */
+
+/*
+ * DASD_3990_ERP_ACTION_1B_32
+ *
+ * DESCRIPTION
+ *   Handles 32 byte 'Action 1B' of Single Program Action Codes.
+ *   A write operation could not be finished because of an unexpected
+ *   condition.
+ *   The already created 'default erp' is used to get the link to
+ *   the erp chain, but it can not be used for this recovery
+ *   action because it contains no DE/LO data space.
+ *
+ * PARAMETER
+ *   default_erp	already added default erp.
+ *   sense		current sense data
+ *
+ * RETURN VALUES
+ *   erp		new erp or
+ *			default_erp in case of imprecise ending or error
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
+{
+
+	struct dasd_device *device = default_erp->startdev;
+	__u32 cpa = 0;
+	struct dasd_ccw_req *cqr;
+	struct dasd_ccw_req *erp;
+	struct DE_eckd_data *DE_data;
+	struct PFX_eckd_data *PFX_data;
+	char *LO_data;		/* LO_eckd_data_t */
+	struct ccw1 *ccw, *oldccw;
+
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+		    "Write not finished because of unexpected condition");
+
+	default_erp->function = dasd_3990_erp_action_1B_32;
+
+	/* determine the original cqr */
+	cqr = default_erp;
+
+	while (cqr->refers != NULL) {
+		cqr = cqr->refers;
+	}
+
+	if (scsw_is_tm(&cqr->irb.scsw)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "32 bit sense, action 1B is not defined"
+			      " in transport mode - just retry");
+		return default_erp;
+	}
+
+	/* for imprecise ending just do default erp */
+	if (sense[1] & 0x01) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Imprecise ending is set - just retry");
+
+		return default_erp;
+	}
+
+	/* determine the address of the CCW to be restarted */
+	/* Imprecise ending is not set -> addr from IRB-SCSW */
+	cpa = default_erp->refers->irb.scsw.cmd.cpa;
+
+	if (cpa == 0) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Unable to determine address of the CCW "
+			    "to be restarted");
+
+		return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+	}
+
+	/* Build new ERP request including DE/LO */
+	erp = dasd_alloc_erp_request((char *) &cqr->magic,
+				     2 + 1,/* DE/LO + TIC */
+				     sizeof(struct DE_eckd_data) +
+				     sizeof(struct LO_eckd_data), device);
+
+	if (IS_ERR(erp)) {
+		/* internal error 01 - Unable to allocate ERP */
+		dev_err(&device->cdev->dev, "An error occurred in the DASD "
+			"device driver, reason=%s\n", "01");
+		return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+	}
+
+	/* use original DE */
+	DE_data = erp->data;
+	oldccw = cqr->cpaddr;
+	if (oldccw->cmd_code == DASD_ECKD_CCW_PFX) {
+		PFX_data = cqr->data;
+		memcpy(DE_data, &PFX_data->define_extent,
+		       sizeof(struct DE_eckd_data));
+	} else
+		memcpy(DE_data, cqr->data, sizeof(struct DE_eckd_data));
+
+	/* create LO */
+	LO_data = erp->data + sizeof(struct DE_eckd_data);
+
+	if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
+		/* should not */
+		return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+	}
+
+	if ((sense[7] & 0x3F) == 0x01) {
+		/* operation code is WRITE DATA -> data area orientation */
+		LO_data[0] = 0x81;
+
+	} else if ((sense[7] & 0x3F) == 0x03) {
+		/* operation code is FORMAT WRITE -> index orientation */
+		LO_data[0] = 0xC3;
+
+	} else {
+		LO_data[0] = sense[7];	/* operation */
+	}
+
+	LO_data[1] = sense[8];	/* auxiliary */
+	LO_data[2] = sense[9];
+	LO_data[3] = sense[3];	/* count */
+	LO_data[4] = sense[29];	/* seek_addr.cyl */
+	LO_data[5] = sense[30];	/* seek_addr.cyl 2nd byte */
+	LO_data[7] = sense[31];	/* seek_addr.head 2nd byte */
+
+	memcpy(&(LO_data[8]), &(sense[11]), 8);
+
+	/* create DE ccw */
+	ccw = erp->cpaddr;
+	memset(ccw, 0, sizeof(struct ccw1));
+	ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
+	ccw->flags = CCW_FLAG_CC;
+	ccw->count = 16;
+	ccw->cda = (__u32)(addr_t) DE_data;
+
+	/* create LO ccw */
+	ccw++;
+	memset(ccw, 0, sizeof(struct ccw1));
+	ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
+	ccw->flags = CCW_FLAG_CC;
+	ccw->count = 16;
+	ccw->cda = (__u32)(addr_t) LO_data;
+
+	/* TIC to the failed ccw */
+	ccw++;
+	ccw->cmd_code = CCW_CMD_TIC;
+	ccw->cda = cpa;
+
+	/* fill erp related fields */
+	erp->flags = default_erp->flags;
+	erp->function = dasd_3990_erp_action_1B_32;
+	erp->refers = default_erp->refers;
+	erp->startdev = device;
+	erp->memdev = device;
+	erp->magic = default_erp->magic;
+	erp->expires = default_erp->expires;
+	erp->retries = 256;
+	erp->buildclk = get_tod_clock();
+	erp->status = DASD_CQR_FILLED;
+
+	/* remove the default erp */
+	dasd_free_erp_request(default_erp, device);
+
+	return erp;
+
+}				/* end dasd_3990_erp_action_1B_32 */
+
+/*
+ * DASD_3990_UPDATE_1B
+ *
+ * DESCRIPTION
+ *   Handles the update to the 32 byte 'Action 1B' of Single Program
+ *   Action Codes in case the first action was not successful.
+ *   The already created 'previous_erp' is the currently not successful
+ *   ERP.
+ *
+ * PARAMETER
+ *   previous_erp	already created previous erp.
+ *   sense		current sense data
+ * RETURN VALUES
+ *   erp		modified erp
+ */
+static struct dasd_ccw_req *
+dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
+{
+
+	struct dasd_device *device = previous_erp->startdev;
+	__u32 cpa = 0;
+	struct dasd_ccw_req *cqr;
+	struct dasd_ccw_req *erp;
+	char *LO_data;		/* struct LO_eckd_data */
+	struct ccw1 *ccw;
+
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+		    "Write not finished because of unexpected condition"
+		    " - follow on");
+
+	/* determine the original cqr */
+	cqr = previous_erp;
+
+	while (cqr->refers != NULL) {
+		cqr = cqr->refers;
+	}
+
+	if (scsw_is_tm(&cqr->irb.scsw)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "32 bit sense, action 1B, update,"
+			      " in transport mode - just retry");
+		return previous_erp;
+	}
+
+	/* for imprecise ending just do default erp */
+	if (sense[1] & 0x01) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Imprecise ending is set - just retry");
+
+		previous_erp->status = DASD_CQR_FILLED;
+
+		return previous_erp;
+	}
+
+	/* determine the address of the CCW to be restarted */
+	/* Imprecise ending is not set -> addr from IRB-SCSW */
+	cpa = previous_erp->irb.scsw.cmd.cpa;
+
+	if (cpa == 0) {
+		/* internal error 02 -
+		   Unable to determine address of the CCW to be restarted */
+		dev_err(&device->cdev->dev, "An error occurred in the DASD "
+			"device driver, reason=%s\n", "02");
+
+		previous_erp->status = DASD_CQR_FAILED;
+
+		return previous_erp;
+	}
+
+	erp = previous_erp;
+
+	/* update the LO with the new returned sense data  */
+	LO_data = erp->data + sizeof(struct DE_eckd_data);
+
+	if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
+		/* should not happen */
+		previous_erp->status = DASD_CQR_FAILED;
+
+		return previous_erp;
+	}
+
+	if ((sense[7] & 0x3F) == 0x01) {
+		/* operation code is WRITE DATA -> data area orientation */
+		LO_data[0] = 0x81;
+
+	} else if ((sense[7] & 0x3F) == 0x03) {
+		/* operation code is FORMAT WRITE -> index orientation */
+		LO_data[0] = 0xC3;
+
+	} else {
+		LO_data[0] = sense[7];	/* operation */
+	}
+
+	LO_data[1] = sense[8];	/* auxiliary */
+	LO_data[2] = sense[9];
+	LO_data[3] = sense[3];	/* count */
+	LO_data[4] = sense[29];	/* seek_addr.cyl */
+	LO_data[5] = sense[30];	/* seek_addr.cyl 2nd byte */
+	LO_data[7] = sense[31];	/* seek_addr.head 2nd byte */
+
+	memcpy(&(LO_data[8]), &(sense[11]), 8);
+
+	/* TIC to the failed ccw */
+	ccw = erp->cpaddr;	/* addr of DE ccw */
+	ccw++;			/* addr of LE ccw */
+	ccw++;			/* addr of TIC ccw */
+	ccw->cda = cpa;
+
+	erp->status = DASD_CQR_FILLED;
+
+	return erp;
+
+}				/* end dasd_3990_update_1B */
+
+/*
+ * DASD_3990_ERP_COMPOUND_RETRY
+ *
+ * DESCRIPTION
+ *   Handles the compound ERP action retry code.
+ *   NOTE: At least one retry is done even if zero is specified
+ *	   by the sense data. This makes enqueueing of the request
+ *	   easier.
+ *
+ * PARAMETER
+ *   sense		sense data of the actual error
+ *   erp		pointer to the currently created ERP
+ *
+ * RETURN VALUES
+ *   erp		modified ERP pointer
+ *
+ */
+static void
+dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense)
+{
+
+	switch (sense[25] & 0x03) {
+	case 0x00:		/* no not retry */
+		erp->retries = 1;
+		break;
+
+	case 0x01:		/* retry 2 times */
+		erp->retries = 2;
+		break;
+
+	case 0x02:		/* retry 10 times */
+		erp->retries = 10;
+		break;
+
+	case 0x03:		/* retry 256 times */
+		erp->retries = 256;
+		break;
+
+	default:
+		BUG();
+	}
+
+	erp->function = dasd_3990_erp_compound_retry;
+
+}				/* end dasd_3990_erp_compound_retry */
+
+/*
+ * DASD_3990_ERP_COMPOUND_PATH
+ *
+ * DESCRIPTION
+ *   Handles the compound ERP action for retry on alternate
+ *   channel path.
+ *
+ * PARAMETER
+ *   sense		sense data of the actual error
+ *   erp		pointer to the currently created ERP
+ *
+ * RETURN VALUES
+ *   erp		modified ERP pointer
+ *
+ */
+static void
+dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
+{
+	if (sense[25] & DASD_SENSE_BIT_3) {
+		dasd_3990_erp_alternate_path(erp);
+
+		if (erp->status == DASD_CQR_FAILED &&
+		    !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
+			/* reset the lpm and the status to be able to
+			 * try further actions. */
+			erp->lpm = dasd_path_get_opm(erp->startdev);
+			erp->status = DASD_CQR_NEED_ERP;
+		}
+	}
+
+	erp->function = dasd_3990_erp_compound_path;
+
+}				/* end dasd_3990_erp_compound_path */
+
+/*
+ * DASD_3990_ERP_COMPOUND_CODE
+ *
+ * DESCRIPTION
+ *   Handles the compound ERP action for retry code.
+ *
+ * PARAMETER
+ *   sense		sense data of the actual error
+ *   erp		pointer to the currently created ERP
+ *
+ * RETURN VALUES
+ *   erp		NEW ERP pointer
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_compound_code(struct dasd_ccw_req * erp, char *sense)
+{
+
+	if (sense[25] & DASD_SENSE_BIT_2) {
+
+		switch (sense[28]) {
+		case 0x17:
+			/* issue a Diagnostic Control command with an
+			 * Inhibit Write subcommand and controller modifier */
+			erp = dasd_3990_erp_DCTL(erp, 0x20);
+			break;
+
+		case 0x25:
+			/* wait for 5 seconds and retry again */
+			erp->retries = 1;
+
+			dasd_3990_erp_block_queue (erp, 5*HZ);
+			break;
+
+		default:
+			/* should not happen - continue */
+			break;
+		}
+	}
+
+	erp->function = dasd_3990_erp_compound_code;
+
+	return erp;
+
+}				/* end dasd_3990_erp_compound_code */
+
+/*
+ * DASD_3990_ERP_COMPOUND_CONFIG
+ *
+ * DESCRIPTION
+ *   Handles the compound ERP action for configruation
+ *   dependent error.
+ *   Note: duplex handling is not implemented (yet).
+ *
+ * PARAMETER
+ *   sense		sense data of the actual error
+ *   erp		pointer to the currently created ERP
+ *
+ * RETURN VALUES
+ *   erp		modified ERP pointer
+ *
+ */
+static void
+dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
+{
+
+	if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) {
+
+		/* set to suspended duplex state then restart
+		   internal error 05 - Set device to suspended duplex state
+		   should be done */
+		struct dasd_device *device = erp->startdev;
+		dev_err(&device->cdev->dev,
+			"An error occurred in the DASD device driver, "
+			"reason=%s\n", "05");
+
+	}
+
+	erp->function = dasd_3990_erp_compound_config;
+
+}				/* end dasd_3990_erp_compound_config */
+
+/*
+ * DASD_3990_ERP_COMPOUND
+ *
+ * DESCRIPTION
+ *   Does the further compound program action if
+ *   compound retry was not successful.
+ *
+ * PARAMETER
+ *   sense		sense data of the actual error
+ *   erp		pointer to the current (failed) ERP
+ *
+ * RETURN VALUES
+ *   erp		(additional) ERP pointer
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense)
+{
+
+	if ((erp->function == dasd_3990_erp_compound_retry) &&
+	    (erp->status == DASD_CQR_NEED_ERP)) {
+
+		dasd_3990_erp_compound_path(erp, sense);
+	}
+
+	if ((erp->function == dasd_3990_erp_compound_path) &&
+	    (erp->status == DASD_CQR_NEED_ERP)) {
+
+		erp = dasd_3990_erp_compound_code(erp, sense);
+	}
+
+	if ((erp->function == dasd_3990_erp_compound_code) &&
+	    (erp->status == DASD_CQR_NEED_ERP)) {
+
+		dasd_3990_erp_compound_config(erp, sense);
+	}
+
+	/* if no compound action ERP specified, the request failed */
+	if (erp->status == DASD_CQR_NEED_ERP)
+		erp->status = DASD_CQR_FAILED;
+
+	return erp;
+
+}				/* end dasd_3990_erp_compound */
+
+/*
+ *DASD_3990_ERP_HANDLE_SIM
+ *
+ *DESCRIPTION
+ *  inspects the SIM SENSE data and starts an appropriate action
+ *
+ * PARAMETER
+ *   sense	   sense data of the actual error
+ *
+ * RETURN VALUES
+ *   none
+ */
+void
+dasd_3990_erp_handle_sim(struct dasd_device *device, char *sense)
+{
+	/* print message according to log or message to operator mode */
+	if ((sense[24] & DASD_SIM_MSG_TO_OP) || (sense[1] & 0x10)) {
+		/* print SIM SRC from RefCode */
+		dev_err(&device->cdev->dev, "SIM - SRC: "
+			    "%02x%02x%02x%02x\n", sense[22],
+			    sense[23], sense[11], sense[12]);
+	} else if (sense[24] & DASD_SIM_LOG) {
+		/* print SIM SRC Refcode */
+		dev_warn(&device->cdev->dev, "log SIM - SRC: "
+			    "%02x%02x%02x%02x\n", sense[22],
+			    sense[23], sense[11], sense[12]);
+	}
+}
+
+/*
+ * DASD_3990_ERP_INSPECT_32
+ *
+ * DESCRIPTION
+ *   Does a detailed inspection of the 32 byte sense data
+ *   and sets up a related error recovery action.
+ *
+ * PARAMETER
+ *   sense		sense data of the actual error
+ *   erp		pointer to the currently created default ERP
+ *
+ * RETURN VALUES
+ *   erp_filled		pointer to the ERP
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
+{
+
+	struct dasd_device *device = erp->startdev;
+
+	erp->function = dasd_3990_erp_inspect_32;
+
+	/* check for SIM sense data */
+	if ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)
+		dasd_3990_erp_handle_sim(device, sense);
+
+	if (sense[25] & DASD_SENSE_BIT_0) {
+
+		/* compound program action codes (byte25 bit 0 == '1') */
+		dasd_3990_erp_compound_retry(erp, sense);
+
+	} else {
+
+		/* single program action codes (byte25 bit 0 == '0') */
+		switch (sense[25]) {
+
+		case 0x00:	/* success - use default ERP for retries */
+			DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
+				    "ERP called for successful request"
+				    " - just retry");
+			break;
+
+		case 0x01:	/* fatal error */
+			dev_err(&device->cdev->dev,
+				    "ERP failed for the DASD\n");
+
+			erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+			break;
+
+		case 0x02:	/* intervention required */
+		case 0x03:	/* intervention required during dual copy */
+			erp = dasd_3990_erp_int_req(erp);
+			break;
+
+		case 0x0F:  /* length mismatch during update write command
+			       internal error 08 - update write command error*/
+			dev_err(&device->cdev->dev, "An error occurred in the "
+				"DASD device driver, reason=%s\n", "08");
+
+			erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+			break;
+
+		case 0x10:  /* logging required for other channel program */
+			erp = dasd_3990_erp_action_10_32(erp, sense);
+			break;
+
+		case 0x15:	/* next track outside defined extend
+				   internal error 07 - The next track is not
+				   within the defined storage extent */
+			dev_err(&device->cdev->dev,
+				"An error occurred in the DASD device driver, "
+				"reason=%s\n", "07");
+
+			erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+			break;
+
+		case 0x1B:	/* unexpected condition during write */
+
+			erp = dasd_3990_erp_action_1B_32(erp, sense);
+			break;
+
+		case 0x1C:	/* invalid data */
+			dev_emerg(&device->cdev->dev,
+				    "Data recovered during retry with PCI "
+				    "fetch mode active\n");
+
+			/* not possible to handle this situation in Linux */
+			panic
+			    ("Invalid data - No way to inform application "
+			     "about the possibly incorrect data");
+			break;
+
+		case 0x1D:	/* state-change pending */
+			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+				    "A State change pending condition exists "
+				    "for the subsystem or device");
+
+			erp = dasd_3990_erp_action_4(erp, sense);
+			break;
+
+		case 0x1E:	/* busy */
+			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+				    "Busy condition exists "
+				    "for the subsystem or device");
+                        erp = dasd_3990_erp_action_4(erp, sense);
+			break;
+
+		default:	/* all others errors - default erp  */
+			break;
+		}
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_inspect_32 */
+
+static void dasd_3990_erp_disable_path(struct dasd_device *device, __u8 lpum)
+{
+	int pos = pathmask_to_pos(lpum);
+
+	if (!(device->features & DASD_FEATURE_PATH_AUTODISABLE)) {
+		dev_err(&device->cdev->dev,
+			"Path %x.%02x (pathmask %02x) is operational despite excessive IFCCs\n",
+			device->path[pos].cssid, device->path[pos].chpid, lpum);
+		goto out;
+	}
+
+	/* no remaining path, cannot disable */
+	if (!(dasd_path_get_opm(device) & ~lpum)) {
+		dev_err(&device->cdev->dev,
+			"Last path %x.%02x (pathmask %02x) is operational despite excessive IFCCs\n",
+			device->path[pos].cssid, device->path[pos].chpid, lpum);
+		goto out;
+	}
+
+	dev_err(&device->cdev->dev,
+		"Path %x.%02x (pathmask %02x) is disabled - IFCC threshold exceeded\n",
+		device->path[pos].cssid, device->path[pos].chpid, lpum);
+	dasd_path_remove_opm(device, lpum);
+	dasd_path_add_ifccpm(device, lpum);
+
+out:
+	device->path[pos].errorclk = 0;
+	atomic_set(&device->path[pos].error_count, 0);
+}
+
+static void dasd_3990_erp_account_error(struct dasd_ccw_req *erp)
+{
+	struct dasd_device *device = erp->startdev;
+	__u8 lpum = erp->refers->irb.esw.esw1.lpum;
+	int pos = pathmask_to_pos(lpum);
+	unsigned long clk;
+
+	if (!device->path_thrhld)
+		return;
+
+	clk = get_tod_clock();
+	/*
+	 * check if the last error is longer ago than the timeout,
+	 * if so reset error state
+	 */
+	if ((tod_to_ns(clk - device->path[pos].errorclk) / NSEC_PER_SEC)
+	    >= device->path_interval) {
+		atomic_set(&device->path[pos].error_count, 0);
+		device->path[pos].errorclk = 0;
+	}
+	atomic_inc(&device->path[pos].error_count);
+	device->path[pos].errorclk = clk;
+	/* threshold exceeded disable path if possible */
+	if (atomic_read(&device->path[pos].error_count) >=
+	    device->path_thrhld)
+		dasd_3990_erp_disable_path(device, lpum);
+}
+
+/*
+ *****************************************************************************
+ * main ERP control functions (24 and 32 byte sense)
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERP_CONTROL_CHECK
+ *
+ * DESCRIPTION
+ *   Does a generic inspection if a control check occurred and sets up
+ *   the related error recovery procedure
+ *
+ * PARAMETER
+ *   erp		pointer to the currently created default ERP
+ *
+ * RETURN VALUES
+ *   erp_filled		pointer to the erp
+ */
+
+static struct dasd_ccw_req *
+dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
+{
+	struct dasd_device *device = erp->startdev;
+
+	if (scsw_cstat(&erp->refers->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK
+					   | SCHN_STAT_CHN_CTRL_CHK)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "channel or interface control check");
+		dasd_3990_erp_account_error(erp);
+		erp = dasd_3990_erp_action_4(erp, NULL);
+	}
+	return erp;
+}
+
+/*
+ * DASD_3990_ERP_INSPECT
+ *
+ * DESCRIPTION
+ *   Does a detailed inspection for sense data by calling either
+ *   the 24-byte or the 32-byte inspection routine.
+ *
+ * PARAMETER
+ *   erp		pointer to the currently created default ERP
+ * RETURN VALUES
+ *   erp_new		contens was possibly modified
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_inspect(struct dasd_ccw_req *erp)
+{
+
+	struct dasd_ccw_req *erp_new = NULL;
+	char *sense;
+
+	/* if this problem occurred on an alias retry on base */
+	erp_new = dasd_3990_erp_inspect_alias(erp);
+	if (erp_new)
+		return erp_new;
+
+	/* sense data are located in the refers record of the
+	 * already set up new ERP !
+	 * check if concurrent sens is available
+	 */
+	sense = dasd_get_sense(&erp->refers->irb);
+	if (!sense)
+		erp_new = dasd_3990_erp_control_check(erp);
+	/* distinguish between 24 and 32 byte sense data */
+	else if (sense[27] & DASD_SENSE_BIT_0) {
+
+		/* inspect the 24 byte sense data */
+		erp_new = dasd_3990_erp_inspect_24(erp, sense);
+
+	} else {
+
+		/* inspect the 32 byte sense data */
+		erp_new = dasd_3990_erp_inspect_32(erp, sense);
+
+	}	/* end distinguish between 24 and 32 byte sense data */
+
+	return erp_new;
+}
+
+/*
+ * DASD_3990_ERP_ADD_ERP
+ *
+ * DESCRIPTION
+ *   This function adds an additional request block (ERP) to the head of
+ *   the given cqr (or erp).
+ *   For a command mode cqr the erp is initialized as an default erp
+ *   (retry TIC).
+ *   For transport mode we make a copy of the original TCW (points to
+ *   the original TCCB, TIDALs, etc.) but give it a fresh
+ *   TSB so the original sense data will not be changed.
+ *
+ * PARAMETER
+ *   cqr		head of the current ERP-chain (or single cqr if
+ *			first error)
+ * RETURN VALUES
+ *   erp		pointer to new ERP-chain head
+ */
+static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
+{
+
+	struct dasd_device *device = cqr->startdev;
+	struct ccw1 *ccw;
+	struct dasd_ccw_req *erp;
+	int cplength, datasize;
+	struct tcw *tcw;
+	struct tsb *tsb;
+
+	if (cqr->cpmode == 1) {
+		cplength = 0;
+		/* TCW needs to be 64 byte aligned, so leave enough room */
+		datasize = 64 + sizeof(struct tcw) + sizeof(struct tsb);
+	} else {
+		cplength = 2;
+		datasize = 0;
+	}
+
+	/* allocate additional request block */
+	erp = dasd_alloc_erp_request((char *) &cqr->magic,
+				     cplength, datasize, device);
+	if (IS_ERR(erp)) {
+                if (cqr->retries <= 0) {
+			DBF_DEV_EVENT(DBF_ERR, device, "%s",
+				    "Unable to allocate ERP request");
+			cqr->status = DASD_CQR_FAILED;
+			cqr->stopclk = get_tod_clock();
+		} else {
+			DBF_DEV_EVENT(DBF_ERR, device,
+                                     "Unable to allocate ERP request "
+				     "(%i retries left)",
+                                     cqr->retries);
+			dasd_block_set_timer(device->block, (HZ << 3));
+                }
+		return erp;
+	}
+
+	ccw = cqr->cpaddr;
+	if (cqr->cpmode == 1) {
+		/* make a shallow copy of the original tcw but set new tsb */
+		erp->cpmode = 1;
+		erp->cpaddr = PTR_ALIGN(erp->data, 64);
+		tcw = erp->cpaddr;
+		tsb = (struct tsb *) &tcw[1];
+		*tcw = *((struct tcw *)cqr->cpaddr);
+		tcw->tsb = (long)tsb;
+	} else if (ccw->cmd_code == DASD_ECKD_CCW_PSF) {
+		/* PSF cannot be chained from NOOP/TIC */
+		erp->cpaddr = cqr->cpaddr;
+	} else {
+		/* initialize request with default TIC to current ERP/CQR */
+		ccw = erp->cpaddr;
+		ccw->cmd_code = CCW_CMD_NOOP;
+		ccw->flags = CCW_FLAG_CC;
+		ccw++;
+		ccw->cmd_code = CCW_CMD_TIC;
+		ccw->cda      = (long)(cqr->cpaddr);
+	}
+
+	erp->flags = cqr->flags;
+	erp->function = dasd_3990_erp_add_erp;
+	erp->refers   = cqr;
+	erp->startdev = device;
+	erp->memdev   = device;
+	erp->block    = cqr->block;
+	erp->magic    = cqr->magic;
+	erp->expires  = cqr->expires;
+	erp->retries  = 256;
+	erp->buildclk = get_tod_clock();
+	erp->status = DASD_CQR_FILLED;
+
+	return erp;
+}
+
+/*
+ * DASD_3990_ERP_ADDITIONAL_ERP
+ *
+ * DESCRIPTION
+ *   An additional ERP is needed to handle the current error.
+ *   Add ERP to the head of the ERP-chain containing the ERP processing
+ *   determined based on the sense data.
+ *
+ * PARAMETER
+ *   cqr		head of the current ERP-chain (or single cqr if
+ *			first error)
+ *
+ * RETURN VALUES
+ *   erp		pointer to new ERP-chain head
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr)
+{
+
+	struct dasd_ccw_req *erp = NULL;
+
+	/* add erp and initialize with default TIC */
+	erp = dasd_3990_erp_add_erp(cqr);
+
+	if (IS_ERR(erp))
+		return erp;
+
+	/* inspect sense, determine specific ERP if possible */
+	if (erp != cqr) {
+
+		erp = dasd_3990_erp_inspect(erp);
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_additional_erp */
+
+/*
+ * DASD_3990_ERP_ERROR_MATCH
+ *
+ * DESCRIPTION
+ *   Check if the device status of the given cqr is the same.
+ *   This means that the failed CCW and the relevant sense data
+ *   must match.
+ *   I don't distinguish between 24 and 32 byte sense because in case of
+ *   24 byte sense byte 25 and 27 is set as well.
+ *
+ * PARAMETER
+ *   cqr1		first cqr, which will be compared with the
+ *   cqr2		second cqr.
+ *
+ * RETURN VALUES
+ *   match		'boolean' for match found
+ *			returns 1 if match found, otherwise 0.
+ */
+static int dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1,
+				     struct dasd_ccw_req *cqr2)
+{
+	char *sense1, *sense2;
+
+	if (cqr1->startdev != cqr2->startdev)
+		return 0;
+
+	sense1 = dasd_get_sense(&cqr1->irb);
+	sense2 = dasd_get_sense(&cqr2->irb);
+
+	/* one request has sense data, the other not -> no match, return 0 */
+	if (!sense1 != !sense2)
+		return 0;
+	/* no sense data in both cases -> check cstat for IFCC */
+	if (!sense1 && !sense2)	{
+		if ((scsw_cstat(&cqr1->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK |
+						    SCHN_STAT_CHN_CTRL_CHK)) ==
+		    (scsw_cstat(&cqr2->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK |
+						    SCHN_STAT_CHN_CTRL_CHK)))
+			return 1; /* match with ifcc*/
+	}
+	/* check sense data; byte 0-2,25,27 */
+	if (!(sense1 && sense2 &&
+	      (memcmp(sense1, sense2, 3) == 0) &&
+	      (sense1[27] == sense2[27]) &&
+	      (sense1[25] == sense2[25]))) {
+
+		return 0;	/* sense doesn't match */
+	}
+
+	return 1;		/* match */
+
+}				/* end dasd_3990_erp_error_match */
+
+/*
+ * DASD_3990_ERP_IN_ERP
+ *
+ * DESCRIPTION
+ *   check if the current error already happened before.
+ *   quick exit if current cqr is not an ERP (cqr->refers=NULL)
+ *
+ * PARAMETER
+ *   cqr		failed cqr (either original cqr or already an erp)
+ *
+ * RETURN VALUES
+ *   erp		erp-pointer to the already defined error
+ *			recovery procedure OR
+ *			NULL if a 'new' error occurred.
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_in_erp(struct dasd_ccw_req *cqr)
+{
+
+	struct dasd_ccw_req *erp_head = cqr,	/* save erp chain head */
+	*erp_match = NULL;	/* save erp chain head */
+	int match = 0;		/* 'boolean' for matching error found */
+
+	if (cqr->refers == NULL) {	/* return if not in erp */
+		return NULL;
+	}
+
+	/* check the erp/cqr chain for current error */
+	do {
+		match = dasd_3990_erp_error_match(erp_head, cqr->refers);
+		erp_match = cqr;	/* save possible matching erp  */
+		cqr = cqr->refers;	/* check next erp/cqr in queue */
+
+	} while ((cqr->refers != NULL) && (!match));
+
+	if (!match) {
+		return NULL;	/* no match was found */
+	}
+
+	return erp_match;	/* return address of matching erp */
+
+}				/* END dasd_3990_erp_in_erp */
+
+/*
+ * DASD_3990_ERP_FURTHER_ERP (24 & 32 byte sense)
+ *
+ * DESCRIPTION
+ *   No retry is left for the current ERP. Check what has to be done
+ *   with the ERP.
+ *     - do further defined ERP action or
+ *     - wait for interrupt or
+ *     - exit with permanent error
+ *
+ * PARAMETER
+ *   erp		ERP which is in progress with no retry left
+ *
+ * RETURN VALUES
+ *   erp		modified/additional ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
+{
+
+	struct dasd_device *device = erp->startdev;
+	char *sense = dasd_get_sense(&erp->irb);
+
+	/* check for 24 byte sense ERP */
+	if ((erp->function == dasd_3990_erp_bus_out) ||
+	    (erp->function == dasd_3990_erp_action_1) ||
+	    (erp->function == dasd_3990_erp_action_4)) {
+
+		erp = dasd_3990_erp_action_1(erp);
+
+	} else if (erp->function == dasd_3990_erp_action_1_sec) {
+		erp = dasd_3990_erp_action_1_sec(erp);
+	} else if (erp->function == dasd_3990_erp_action_5) {
+
+		/* retries have not been successful */
+		/* prepare erp for retry on different channel path */
+		erp = dasd_3990_erp_action_1(erp);
+
+		if (sense && !(sense[2] & DASD_SENSE_BIT_0)) {
+
+			/* issue a Diagnostic Control command with an
+			 * Inhibit Write subcommand */
+
+			switch (sense[25]) {
+			case 0x17:
+			case 0x57:{	/* controller */
+					erp = dasd_3990_erp_DCTL(erp, 0x20);
+					break;
+				}
+			case 0x18:
+			case 0x58:{	/* channel path */
+					erp = dasd_3990_erp_DCTL(erp, 0x40);
+					break;
+				}
+			case 0x19:
+			case 0x59:{	/* storage director */
+					erp = dasd_3990_erp_DCTL(erp, 0x80);
+					break;
+				}
+			default:
+				DBF_DEV_EVENT(DBF_WARNING, device,
+					    "invalid subcommand modifier 0x%x "
+					    "for Diagnostic Control Command",
+					    sense[25]);
+			}
+		}
+
+		/* check for 32 byte sense ERP */
+	} else if (sense &&
+		   ((erp->function == dasd_3990_erp_compound_retry) ||
+		    (erp->function == dasd_3990_erp_compound_path) ||
+		    (erp->function == dasd_3990_erp_compound_code) ||
+		    (erp->function == dasd_3990_erp_compound_config))) {
+
+		erp = dasd_3990_erp_compound(erp, sense);
+
+	} else {
+		/*
+		 * No retry left and no additional special handling
+		 * necessary
+		 */
+		dev_err(&device->cdev->dev,
+			"ERP %p has run out of retries and failed\n", erp);
+
+		erp->status = DASD_CQR_FAILED;
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_further_erp */
+
+/*
+ * DASD_3990_ERP_HANDLE_MATCH_ERP
+ *
+ * DESCRIPTION
+ *   An error occurred again and an ERP has been detected which is already
+ *   used to handle this error (e.g. retries).
+ *   All prior ERP's are asumed to be successful and therefore removed
+ *   from queue.
+ *   If retry counter of matching erp is already 0, it is checked if further
+ *   action is needed (besides retry) or if the ERP has failed.
+ *
+ * PARAMETER
+ *   erp_head		first ERP in ERP-chain
+ *   erp		ERP that handles the actual error.
+ *			(matching erp)
+ *
+ * RETURN VALUES
+ *   erp		modified/additional ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
+			       struct dasd_ccw_req *erp)
+{
+
+	struct dasd_device *device = erp_head->startdev;
+	struct dasd_ccw_req *erp_done = erp_head;	/* finished req */
+	struct dasd_ccw_req *erp_free = NULL;	/* req to be freed */
+
+	/* loop over successful ERPs and remove them from chanq */
+	while (erp_done != erp) {
+
+		if (erp_done == NULL)	/* end of chain reached */
+			panic(PRINTK_HEADER "Programming error in ERP! The "
+			      "original request was lost\n");
+
+		/* remove the request from the device queue */
+		list_del(&erp_done->blocklist);
+
+		erp_free = erp_done;
+		erp_done = erp_done->refers;
+
+		/* free the finished erp request */
+		dasd_free_erp_request(erp_free, erp_free->memdev);
+
+	}			/* end while */
+
+	if (erp->retries > 0) {
+
+		char *sense = dasd_get_sense(&erp->refers->irb);
+
+		/* check for special retries */
+		if (sense && erp->function == dasd_3990_erp_action_4) {
+
+			erp = dasd_3990_erp_action_4(erp, sense);
+
+		} else if (sense &&
+			   erp->function == dasd_3990_erp_action_1B_32) {
+
+			erp = dasd_3990_update_1B(erp, sense);
+
+		} else if (sense && erp->function == dasd_3990_erp_int_req) {
+
+			erp = dasd_3990_erp_int_req(erp);
+
+		} else {
+			/* simple retry	  */
+			DBF_DEV_EVENT(DBF_DEBUG, device,
+				    "%i retries left for erp %p",
+				    erp->retries, erp);
+
+			/* handle the request again... */
+			erp->status = DASD_CQR_FILLED;
+		}
+
+	} else {
+		/* no retry left - check for further necessary action	 */
+		/* if no further actions, handle rest as permanent error */
+		erp = dasd_3990_erp_further_erp(erp);
+	}
+
+	return erp;
+
+}				/* end dasd_3990_erp_handle_match_erp */
+
+/*
+ * DASD_3990_ERP_ACTION
+ *
+ * DESCRIPTION
+ *   control routine for 3990 erp actions.
+ *   Has to be called with the queue lock (namely the s390_irq_lock) acquired.
+ *
+ * PARAMETER
+ *   cqr		failed cqr (either original cqr or already an erp)
+ *
+ * RETURN VALUES
+ *   erp		erp-pointer to the head of the ERP action chain.
+ *			This means:
+ *			 - either a ptr to an additional ERP cqr or
+ *			 - the original given cqr (which's status might
+ *			   be modified)
+ */
+struct dasd_ccw_req *
+dasd_3990_erp_action(struct dasd_ccw_req * cqr)
+{
+	struct dasd_ccw_req *erp = NULL;
+	struct dasd_device *device = cqr->startdev;
+	struct dasd_ccw_req *temp_erp = NULL;
+
+	if (device->features & DASD_FEATURE_ERPLOG) {
+		/* print current erp_chain */
+		dev_err(&device->cdev->dev,
+			    "ERP chain at BEGINNING of ERP-ACTION\n");
+		for (temp_erp = cqr;
+		     temp_erp != NULL; temp_erp = temp_erp->refers) {
+
+			dev_err(&device->cdev->dev,
+				    "ERP %p (%02x) refers to %p\n",
+				    temp_erp, temp_erp->status,
+				    temp_erp->refers);
+		}
+	}
+
+	/* double-check if current erp/cqr was successful */
+	if ((scsw_cstat(&cqr->irb.scsw) == 0x00) &&
+	    (scsw_dstat(&cqr->irb.scsw) ==
+	     (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
+
+		DBF_DEV_EVENT(DBF_DEBUG, device,
+			    "ERP called for successful request %p"
+			    " - NO ERP necessary", cqr);
+
+		cqr->status = DASD_CQR_DONE;
+
+		return cqr;
+	}
+
+	/* check if error happened before */
+	erp = dasd_3990_erp_in_erp(cqr);
+
+	if (erp == NULL) {
+		/* no matching erp found - set up erp */
+		erp = dasd_3990_erp_additional_erp(cqr);
+		if (IS_ERR(erp))
+			return erp;
+	} else {
+		/* matching erp found - set all leading erp's to DONE */
+		erp = dasd_3990_erp_handle_match_erp(cqr, erp);
+	}
+
+
+	/*
+	 * For path verification work we need to stick with the path that was
+	 * originally chosen so that the per path configuration data is
+	 * assigned correctly.
+	 */
+	if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) {
+		erp->lpm = cqr->lpm;
+	}
+
+	if (device->features & DASD_FEATURE_ERPLOG) {
+		/* print current erp_chain */
+		dev_err(&device->cdev->dev,
+			    "ERP chain at END of ERP-ACTION\n");
+		for (temp_erp = erp;
+		     temp_erp != NULL; temp_erp = temp_erp->refers) {
+
+			dev_err(&device->cdev->dev,
+				    "ERP %p (%02x) refers to %p\n",
+				    temp_erp, temp_erp->status,
+				    temp_erp->refers);
+		}
+	}
+
+	/* enqueue ERP request if it's a new one */
+	if (list_empty(&erp->blocklist)) {
+		cqr->status = DASD_CQR_IN_ERP;
+		/* add erp request before the cqr */
+		list_add_tail(&erp->blocklist, &cqr->blocklist);
+	}
+
+
+
+	return erp;
+
+}				/* end dasd_3990_erp_action */
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
new file mode 100644
index 0000000..b9ce93e
--- /dev/null
+++ b/drivers/s390/block/dasd_alias.c
@@ -0,0 +1,948 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PAV alias management for the DASD ECKD discipline
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Stefan Weinhuber <wein@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "dasd-eckd"
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <asm/ebcdic.h>
+#include "dasd_int.h"
+#include "dasd_eckd.h"
+
+#ifdef PRINTK_HEADER
+#undef PRINTK_HEADER
+#endif				/* PRINTK_HEADER */
+#define PRINTK_HEADER "dasd(eckd):"
+
+
+/*
+ * General concept of alias management:
+ * - PAV and DASD alias management is specific to the eckd discipline.
+ * - A device is connected to an lcu as long as the device exists.
+ *   dasd_alias_make_device_known_to_lcu will be called wenn the
+ *   device is checked by the eckd discipline and
+ *   dasd_alias_disconnect_device_from_lcu will be called
+ *   before the device is deleted.
+ * - The dasd_alias_add_device / dasd_alias_remove_device
+ *   functions mark the point when a device is 'ready for service'.
+ * - A summary unit check is a rare occasion, but it is mandatory to
+ *   support it. It requires some complex recovery actions before the
+ *   devices can be used again (see dasd_alias_handle_summary_unit_check).
+ * - dasd_alias_get_start_dev will find an alias device that can be used
+ *   instead of the base device and does some (very simple) load balancing.
+ *   This is the function that gets called for each I/O, so when improving
+ *   something, this function should get faster or better, the rest has just
+ *   to be correct.
+ */
+
+
+static void summary_unit_check_handling_work(struct work_struct *);
+static void lcu_update_work(struct work_struct *);
+static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
+
+static struct alias_root aliastree = {
+	.serverlist = LIST_HEAD_INIT(aliastree.serverlist),
+	.lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
+};
+
+static struct alias_server *_find_server(struct dasd_uid *uid)
+{
+	struct alias_server *pos;
+	list_for_each_entry(pos, &aliastree.serverlist, server) {
+		if (!strncmp(pos->uid.vendor, uid->vendor,
+			     sizeof(uid->vendor))
+		    && !strncmp(pos->uid.serial, uid->serial,
+				sizeof(uid->serial)))
+			return pos;
+	}
+	return NULL;
+}
+
+static struct alias_lcu *_find_lcu(struct alias_server *server,
+				   struct dasd_uid *uid)
+{
+	struct alias_lcu *pos;
+	list_for_each_entry(pos, &server->lculist, lcu) {
+		if (pos->uid.ssid == uid->ssid)
+			return pos;
+	}
+	return NULL;
+}
+
+static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
+					   struct dasd_uid *uid)
+{
+	struct alias_pav_group *pos;
+	__u8 search_unit_addr;
+
+	/* for hyper pav there is only one group */
+	if (lcu->pav == HYPER_PAV) {
+		if (list_empty(&lcu->grouplist))
+			return NULL;
+		else
+			return list_first_entry(&lcu->grouplist,
+						struct alias_pav_group, group);
+	}
+
+	/* for base pav we have to find the group that matches the base */
+	if (uid->type == UA_BASE_DEVICE)
+		search_unit_addr = uid->real_unit_addr;
+	else
+		search_unit_addr = uid->base_unit_addr;
+	list_for_each_entry(pos, &lcu->grouplist, group) {
+		if (pos->uid.base_unit_addr == search_unit_addr &&
+		    !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
+			return pos;
+	}
+	return NULL;
+}
+
+static struct alias_server *_allocate_server(struct dasd_uid *uid)
+{
+	struct alias_server *server;
+
+	server = kzalloc(sizeof(*server), GFP_KERNEL);
+	if (!server)
+		return ERR_PTR(-ENOMEM);
+	memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
+	memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
+	INIT_LIST_HEAD(&server->server);
+	INIT_LIST_HEAD(&server->lculist);
+	return server;
+}
+
+static void _free_server(struct alias_server *server)
+{
+	kfree(server);
+}
+
+static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
+{
+	struct alias_lcu *lcu;
+
+	lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
+	if (!lcu)
+		return ERR_PTR(-ENOMEM);
+	lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
+	if (!lcu->uac)
+		goto out_err1;
+	lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
+	if (!lcu->rsu_cqr)
+		goto out_err2;
+	lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
+				       GFP_KERNEL | GFP_DMA);
+	if (!lcu->rsu_cqr->cpaddr)
+		goto out_err3;
+	lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
+	if (!lcu->rsu_cqr->data)
+		goto out_err4;
+
+	memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
+	memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
+	lcu->uid.ssid = uid->ssid;
+	lcu->pav = NO_PAV;
+	lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
+	INIT_LIST_HEAD(&lcu->lcu);
+	INIT_LIST_HEAD(&lcu->inactive_devices);
+	INIT_LIST_HEAD(&lcu->active_devices);
+	INIT_LIST_HEAD(&lcu->grouplist);
+	INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
+	INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
+	spin_lock_init(&lcu->lock);
+	init_completion(&lcu->lcu_setup);
+	return lcu;
+
+out_err4:
+	kfree(lcu->rsu_cqr->cpaddr);
+out_err3:
+	kfree(lcu->rsu_cqr);
+out_err2:
+	kfree(lcu->uac);
+out_err1:
+	kfree(lcu);
+	return ERR_PTR(-ENOMEM);
+}
+
+static void _free_lcu(struct alias_lcu *lcu)
+{
+	kfree(lcu->rsu_cqr->data);
+	kfree(lcu->rsu_cqr->cpaddr);
+	kfree(lcu->rsu_cqr);
+	kfree(lcu->uac);
+	kfree(lcu);
+}
+
+/*
+ * This is the function that will allocate all the server and lcu data,
+ * so this function must be called first for a new device.
+ * If the return value is 1, the lcu was already known before, if it
+ * is 0, this is a new lcu.
+ * Negative return code indicates that something went wrong (e.g. -ENOMEM)
+ */
+int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+	unsigned long flags;
+	struct alias_server *server, *newserver;
+	struct alias_lcu *lcu, *newlcu;
+	struct dasd_uid uid;
+
+	device->discipline->get_uid(device, &uid);
+	spin_lock_irqsave(&aliastree.lock, flags);
+	server = _find_server(&uid);
+	if (!server) {
+		spin_unlock_irqrestore(&aliastree.lock, flags);
+		newserver = _allocate_server(&uid);
+		if (IS_ERR(newserver))
+			return PTR_ERR(newserver);
+		spin_lock_irqsave(&aliastree.lock, flags);
+		server = _find_server(&uid);
+		if (!server) {
+			list_add(&newserver->server, &aliastree.serverlist);
+			server = newserver;
+		} else {
+			/* someone was faster */
+			_free_server(newserver);
+		}
+	}
+
+	lcu = _find_lcu(server, &uid);
+	if (!lcu) {
+		spin_unlock_irqrestore(&aliastree.lock, flags);
+		newlcu = _allocate_lcu(&uid);
+		if (IS_ERR(newlcu))
+			return PTR_ERR(newlcu);
+		spin_lock_irqsave(&aliastree.lock, flags);
+		lcu = _find_lcu(server, &uid);
+		if (!lcu) {
+			list_add(&newlcu->lcu, &server->lculist);
+			lcu = newlcu;
+		} else {
+			/* someone was faster */
+			_free_lcu(newlcu);
+		}
+	}
+	spin_lock(&lcu->lock);
+	list_add(&device->alias_list, &lcu->inactive_devices);
+	private->lcu = lcu;
+	spin_unlock(&lcu->lock);
+	spin_unlock_irqrestore(&aliastree.lock, flags);
+
+	return 0;
+}
+
+/*
+ * This function removes a device from the scope of alias management.
+ * The complicated part is to make sure that it is not in use by
+ * any of the workers. If necessary cancel the work.
+ */
+void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+	unsigned long flags;
+	struct alias_lcu *lcu;
+	struct alias_server *server;
+	int was_pending;
+	struct dasd_uid uid;
+
+	lcu = private->lcu;
+	/* nothing to do if already disconnected */
+	if (!lcu)
+		return;
+	device->discipline->get_uid(device, &uid);
+	spin_lock_irqsave(&lcu->lock, flags);
+	list_del_init(&device->alias_list);
+	/* make sure that the workers don't use this device */
+	if (device == lcu->suc_data.device) {
+		spin_unlock_irqrestore(&lcu->lock, flags);
+		cancel_work_sync(&lcu->suc_data.worker);
+		spin_lock_irqsave(&lcu->lock, flags);
+		if (device == lcu->suc_data.device) {
+			dasd_put_device(device);
+			lcu->suc_data.device = NULL;
+		}
+	}
+	was_pending = 0;
+	if (device == lcu->ruac_data.device) {
+		spin_unlock_irqrestore(&lcu->lock, flags);
+		was_pending = 1;
+		cancel_delayed_work_sync(&lcu->ruac_data.dwork);
+		spin_lock_irqsave(&lcu->lock, flags);
+		if (device == lcu->ruac_data.device) {
+			dasd_put_device(device);
+			lcu->ruac_data.device = NULL;
+		}
+	}
+	private->lcu = NULL;
+	spin_unlock_irqrestore(&lcu->lock, flags);
+
+	spin_lock_irqsave(&aliastree.lock, flags);
+	spin_lock(&lcu->lock);
+	if (list_empty(&lcu->grouplist) &&
+	    list_empty(&lcu->active_devices) &&
+	    list_empty(&lcu->inactive_devices)) {
+		list_del(&lcu->lcu);
+		spin_unlock(&lcu->lock);
+		_free_lcu(lcu);
+		lcu = NULL;
+	} else {
+		if (was_pending)
+			_schedule_lcu_update(lcu, NULL);
+		spin_unlock(&lcu->lock);
+	}
+	server = _find_server(&uid);
+	if (server && list_empty(&server->lculist)) {
+		list_del(&server->server);
+		_free_server(server);
+	}
+	spin_unlock_irqrestore(&aliastree.lock, flags);
+}
+
+/*
+ * This function assumes that the unit address configuration stored
+ * in the lcu is up to date and will update the device uid before
+ * adding it to a pav group.
+ */
+
+static int _add_device_to_lcu(struct alias_lcu *lcu,
+			      struct dasd_device *device,
+			      struct dasd_device *pos)
+{
+
+	struct dasd_eckd_private *private = device->private;
+	struct alias_pav_group *group;
+	struct dasd_uid uid;
+
+	spin_lock(get_ccwdev_lock(device->cdev));
+	private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
+	private->uid.base_unit_addr =
+		lcu->uac->unit[private->uid.real_unit_addr].base_ua;
+	uid = private->uid;
+	spin_unlock(get_ccwdev_lock(device->cdev));
+	/* if we have no PAV anyway, we don't need to bother with PAV groups */
+	if (lcu->pav == NO_PAV) {
+		list_move(&device->alias_list, &lcu->active_devices);
+		return 0;
+	}
+	group = _find_group(lcu, &uid);
+	if (!group) {
+		group = kzalloc(sizeof(*group), GFP_ATOMIC);
+		if (!group)
+			return -ENOMEM;
+		memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
+		memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
+		group->uid.ssid = uid.ssid;
+		if (uid.type == UA_BASE_DEVICE)
+			group->uid.base_unit_addr = uid.real_unit_addr;
+		else
+			group->uid.base_unit_addr = uid.base_unit_addr;
+		memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
+		INIT_LIST_HEAD(&group->group);
+		INIT_LIST_HEAD(&group->baselist);
+		INIT_LIST_HEAD(&group->aliaslist);
+		list_add(&group->group, &lcu->grouplist);
+	}
+	if (uid.type == UA_BASE_DEVICE)
+		list_move(&device->alias_list, &group->baselist);
+	else
+		list_move(&device->alias_list, &group->aliaslist);
+	private->pavgroup = group;
+	return 0;
+};
+
+static void _remove_device_from_lcu(struct alias_lcu *lcu,
+				    struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+	struct alias_pav_group *group;
+
+	list_move(&device->alias_list, &lcu->inactive_devices);
+	group = private->pavgroup;
+	if (!group)
+		return;
+	private->pavgroup = NULL;
+	if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
+		list_del(&group->group);
+		kfree(group);
+		return;
+	}
+	if (group->next == device)
+		group->next = NULL;
+};
+
+static int
+suborder_not_supported(struct dasd_ccw_req *cqr)
+{
+	char *sense;
+	char reason;
+	char msg_format;
+	char msg_no;
+
+	sense = dasd_get_sense(&cqr->irb);
+	if (!sense)
+		return 0;
+
+	reason = sense[0];
+	msg_format = (sense[7] & 0xF0);
+	msg_no = (sense[7] & 0x0F);
+
+	/* command reject, Format 0 MSG 4 - invalid parameter */
+	if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04))
+		return 1;
+
+	return 0;
+}
+
+static int read_unit_address_configuration(struct dasd_device *device,
+					   struct alias_lcu *lcu)
+{
+	struct dasd_psf_prssd_data *prssdp;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	int rc;
+	unsigned long flags;
+
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
+				   (sizeof(struct dasd_psf_prssd_data)),
+				   device, NULL);
+	if (IS_ERR(cqr))
+		return PTR_ERR(cqr);
+	cqr->startdev = device;
+	cqr->memdev = device;
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	cqr->retries = 10;
+	cqr->expires = 20 * HZ;
+
+	/* Prepare for Read Subsystem Data */
+	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+	prssdp->order = PSF_ORDER_PRSSD;
+	prssdp->suborder = 0x0e;	/* Read unit address configuration */
+	/* all other bytes of prssdp must be zero */
+
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_PSF;
+	ccw->count = sizeof(struct dasd_psf_prssd_data);
+	ccw->flags |= CCW_FLAG_CC;
+	ccw->cda = (__u32)(addr_t) prssdp;
+
+	/* Read Subsystem Data - feature codes */
+	memset(lcu->uac, 0, sizeof(*(lcu->uac)));
+
+	ccw++;
+	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+	ccw->count = sizeof(*(lcu->uac));
+	ccw->cda = (__u32)(addr_t) lcu->uac;
+
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+
+	/* need to unset flag here to detect race with summary unit check */
+	spin_lock_irqsave(&lcu->lock, flags);
+	lcu->flags &= ~NEED_UAC_UPDATE;
+	spin_unlock_irqrestore(&lcu->lock, flags);
+
+	do {
+		rc = dasd_sleep_on(cqr);
+		if (rc && suborder_not_supported(cqr))
+			return -EOPNOTSUPP;
+	} while (rc && (cqr->retries > 0));
+	if (rc) {
+		spin_lock_irqsave(&lcu->lock, flags);
+		lcu->flags |= NEED_UAC_UPDATE;
+		spin_unlock_irqrestore(&lcu->lock, flags);
+	}
+	dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
+{
+	unsigned long flags;
+	struct alias_pav_group *pavgroup, *tempgroup;
+	struct dasd_device *device, *tempdev;
+	int i, rc;
+	struct dasd_eckd_private *private;
+
+	spin_lock_irqsave(&lcu->lock, flags);
+	list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
+		list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
+					 alias_list) {
+			list_move(&device->alias_list, &lcu->active_devices);
+			private = device->private;
+			private->pavgroup = NULL;
+		}
+		list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
+					 alias_list) {
+			list_move(&device->alias_list, &lcu->active_devices);
+			private = device->private;
+			private->pavgroup = NULL;
+		}
+		list_del(&pavgroup->group);
+		kfree(pavgroup);
+	}
+	spin_unlock_irqrestore(&lcu->lock, flags);
+
+	rc = read_unit_address_configuration(refdev, lcu);
+	if (rc)
+		return rc;
+
+	spin_lock_irqsave(&lcu->lock, flags);
+	lcu->pav = NO_PAV;
+	for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
+		switch (lcu->uac->unit[i].ua_type) {
+		case UA_BASE_PAV_ALIAS:
+			lcu->pav = BASE_PAV;
+			break;
+		case UA_HYPER_PAV_ALIAS:
+			lcu->pav = HYPER_PAV;
+			break;
+		}
+		if (lcu->pav != NO_PAV)
+			break;
+	}
+
+	list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
+				 alias_list) {
+		_add_device_to_lcu(lcu, device, refdev);
+	}
+	spin_unlock_irqrestore(&lcu->lock, flags);
+	return 0;
+}
+
+static void lcu_update_work(struct work_struct *work)
+{
+	struct alias_lcu *lcu;
+	struct read_uac_work_data *ruac_data;
+	struct dasd_device *device;
+	unsigned long flags;
+	int rc;
+
+	ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
+	lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
+	device = ruac_data->device;
+	rc = _lcu_update(device, lcu);
+	/*
+	 * Need to check flags again, as there could have been another
+	 * prepare_update or a new device a new device while we were still
+	 * processing the data
+	 */
+	spin_lock_irqsave(&lcu->lock, flags);
+	if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
+			    " alias data in lcu (rc = %d), retry later", rc);
+		if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
+			dasd_put_device(device);
+	} else {
+		dasd_put_device(device);
+		lcu->ruac_data.device = NULL;
+		lcu->flags &= ~UPDATE_PENDING;
+	}
+	spin_unlock_irqrestore(&lcu->lock, flags);
+}
+
+static int _schedule_lcu_update(struct alias_lcu *lcu,
+				struct dasd_device *device)
+{
+	struct dasd_device *usedev = NULL;
+	struct alias_pav_group *group;
+
+	lcu->flags |= NEED_UAC_UPDATE;
+	if (lcu->ruac_data.device) {
+		/* already scheduled or running */
+		return 0;
+	}
+	if (device && !list_empty(&device->alias_list))
+		usedev = device;
+
+	if (!usedev && !list_empty(&lcu->grouplist)) {
+		group = list_first_entry(&lcu->grouplist,
+					 struct alias_pav_group, group);
+		if (!list_empty(&group->baselist))
+			usedev = list_first_entry(&group->baselist,
+						  struct dasd_device,
+						  alias_list);
+		else if (!list_empty(&group->aliaslist))
+			usedev = list_first_entry(&group->aliaslist,
+						  struct dasd_device,
+						  alias_list);
+	}
+	if (!usedev && !list_empty(&lcu->active_devices)) {
+		usedev = list_first_entry(&lcu->active_devices,
+					  struct dasd_device, alias_list);
+	}
+	/*
+	 * if we haven't found a proper device yet, give up for now, the next
+	 * device that will be set active will trigger an lcu update
+	 */
+	if (!usedev)
+		return -EINVAL;
+	dasd_get_device(usedev);
+	lcu->ruac_data.device = usedev;
+	if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
+		dasd_put_device(usedev);
+	return 0;
+}
+
+int dasd_alias_add_device(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+	__u8 uaddr = private->uid.real_unit_addr;
+	struct alias_lcu *lcu = private->lcu;
+	unsigned long flags;
+	int rc;
+
+	rc = 0;
+	spin_lock_irqsave(&lcu->lock, flags);
+	/*
+	 * Check if device and lcu type differ. If so, the uac data may be
+	 * outdated and needs to be updated.
+	 */
+	if (private->uid.type !=  lcu->uac->unit[uaddr].ua_type) {
+		lcu->flags |= UPDATE_PENDING;
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "uid type mismatch - trigger rescan");
+	}
+	if (!(lcu->flags & UPDATE_PENDING)) {
+		rc = _add_device_to_lcu(lcu, device, device);
+		if (rc)
+			lcu->flags |= UPDATE_PENDING;
+	}
+	if (lcu->flags & UPDATE_PENDING) {
+		list_move(&device->alias_list, &lcu->active_devices);
+		_schedule_lcu_update(lcu, device);
+	}
+	spin_unlock_irqrestore(&lcu->lock, flags);
+	return rc;
+}
+
+int dasd_alias_update_add_device(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+
+	private->lcu->flags |= UPDATE_PENDING;
+	return dasd_alias_add_device(device);
+}
+
+int dasd_alias_remove_device(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+	struct alias_lcu *lcu = private->lcu;
+	unsigned long flags;
+
+	/* nothing to do if already removed */
+	if (!lcu)
+		return 0;
+	spin_lock_irqsave(&lcu->lock, flags);
+	_remove_device_from_lcu(lcu, device);
+	spin_unlock_irqrestore(&lcu->lock, flags);
+	return 0;
+}
+
+struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
+{
+	struct dasd_eckd_private *alias_priv, *private = base_device->private;
+	struct alias_pav_group *group = private->pavgroup;
+	struct alias_lcu *lcu = private->lcu;
+	struct dasd_device *alias_device;
+	unsigned long flags;
+
+	if (!group || !lcu)
+		return NULL;
+	if (lcu->pav == NO_PAV ||
+	    lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
+		return NULL;
+	if (unlikely(!(private->features.feature[8] & 0x01))) {
+		/*
+		 * PAV enabled but prefix not, very unlikely
+		 * seems to be a lost pathgroup
+		 * use base device to do IO
+		 */
+		DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
+			      "Prefix not enabled with PAV enabled\n");
+		return NULL;
+	}
+
+	spin_lock_irqsave(&lcu->lock, flags);
+	alias_device = group->next;
+	if (!alias_device) {
+		if (list_empty(&group->aliaslist)) {
+			spin_unlock_irqrestore(&lcu->lock, flags);
+			return NULL;
+		} else {
+			alias_device = list_first_entry(&group->aliaslist,
+							struct dasd_device,
+							alias_list);
+		}
+	}
+	if (list_is_last(&alias_device->alias_list, &group->aliaslist))
+		group->next = list_first_entry(&group->aliaslist,
+					       struct dasd_device, alias_list);
+	else
+		group->next = list_first_entry(&alias_device->alias_list,
+					       struct dasd_device, alias_list);
+	spin_unlock_irqrestore(&lcu->lock, flags);
+	alias_priv = alias_device->private;
+	if ((alias_priv->count < private->count) && !alias_device->stopped &&
+	    !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
+		return alias_device;
+	else
+		return NULL;
+}
+
+/*
+ * Summary unit check handling depends on the way alias devices
+ * are handled so it is done here rather then in dasd_eckd.c
+ */
+static int reset_summary_unit_check(struct alias_lcu *lcu,
+				    struct dasd_device *device,
+				    char reason)
+{
+	struct dasd_ccw_req *cqr;
+	int rc = 0;
+	struct ccw1 *ccw;
+
+	cqr = lcu->rsu_cqr;
+	memcpy((char *) &cqr->magic, "ECKD", 4);
+	ASCEBC((char *) &cqr->magic, 4);
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_RSCK;
+	ccw->flags = CCW_FLAG_SLI;
+	ccw->count = 16;
+	ccw->cda = (__u32)(addr_t) cqr->data;
+	((char *)cqr->data)[0] = reason;
+
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	cqr->retries = 255;	/* set retry counter to enable basic ERP */
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->block = NULL;
+	cqr->expires = 5 * HZ;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+
+	rc = dasd_sleep_on_immediatly(cqr);
+	return rc;
+}
+
+static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
+{
+	struct alias_pav_group *pavgroup;
+	struct dasd_device *device;
+	struct dasd_eckd_private *private;
+
+	/* active and inactive list can contain alias as well as base devices */
+	list_for_each_entry(device, &lcu->active_devices, alias_list) {
+		private = device->private;
+		if (private->uid.type != UA_BASE_DEVICE)
+			continue;
+		dasd_schedule_block_bh(device->block);
+		dasd_schedule_device_bh(device);
+	}
+	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+		private = device->private;
+		if (private->uid.type != UA_BASE_DEVICE)
+			continue;
+		dasd_schedule_block_bh(device->block);
+		dasd_schedule_device_bh(device);
+	}
+	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
+		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+			dasd_schedule_block_bh(device->block);
+			dasd_schedule_device_bh(device);
+		}
+	}
+}
+
+static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
+{
+	struct alias_pav_group *pavgroup;
+	struct dasd_device *device, *temp;
+	struct dasd_eckd_private *private;
+	unsigned long flags;
+	LIST_HEAD(active);
+
+	/*
+	 * Problem here ist that dasd_flush_device_queue may wait
+	 * for termination of a request to complete. We can't keep
+	 * the lcu lock during that time, so we must assume that
+	 * the lists may have changed.
+	 * Idea: first gather all active alias devices in a separate list,
+	 * then flush the first element of this list unlocked, and afterwards
+	 * check if it is still on the list before moving it to the
+	 * active_devices list.
+	 */
+
+	spin_lock_irqsave(&lcu->lock, flags);
+	list_for_each_entry_safe(device, temp, &lcu->active_devices,
+				 alias_list) {
+		private = device->private;
+		if (private->uid.type == UA_BASE_DEVICE)
+			continue;
+		list_move(&device->alias_list, &active);
+	}
+
+	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
+		list_splice_init(&pavgroup->aliaslist, &active);
+	}
+	while (!list_empty(&active)) {
+		device = list_first_entry(&active, struct dasd_device,
+					  alias_list);
+		spin_unlock_irqrestore(&lcu->lock, flags);
+		dasd_flush_device_queue(device);
+		spin_lock_irqsave(&lcu->lock, flags);
+		/*
+		 * only move device around if it wasn't moved away while we
+		 * were waiting for the flush
+		 */
+		if (device == list_first_entry(&active,
+					       struct dasd_device, alias_list)) {
+			list_move(&device->alias_list, &lcu->active_devices);
+			private = device->private;
+			private->pavgroup = NULL;
+		}
+	}
+	spin_unlock_irqrestore(&lcu->lock, flags);
+}
+
+static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
+{
+	struct alias_pav_group *pavgroup;
+	struct dasd_device *device;
+
+	list_for_each_entry(device, &lcu->active_devices, alias_list) {
+		spin_lock(get_ccwdev_lock(device->cdev));
+		dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+		spin_unlock(get_ccwdev_lock(device->cdev));
+	}
+	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+		spin_lock(get_ccwdev_lock(device->cdev));
+		dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+		spin_unlock(get_ccwdev_lock(device->cdev));
+	}
+	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
+		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+			spin_lock(get_ccwdev_lock(device->cdev));
+			dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+			spin_unlock(get_ccwdev_lock(device->cdev));
+		}
+		list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
+			spin_lock(get_ccwdev_lock(device->cdev));
+			dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+			spin_unlock(get_ccwdev_lock(device->cdev));
+		}
+	}
+}
+
+static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
+{
+	struct alias_pav_group *pavgroup;
+	struct dasd_device *device;
+
+	list_for_each_entry(device, &lcu->active_devices, alias_list) {
+		spin_lock(get_ccwdev_lock(device->cdev));
+		dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+		spin_unlock(get_ccwdev_lock(device->cdev));
+	}
+	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+		spin_lock(get_ccwdev_lock(device->cdev));
+		dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+		spin_unlock(get_ccwdev_lock(device->cdev));
+	}
+	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
+		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+			spin_lock(get_ccwdev_lock(device->cdev));
+			dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+			spin_unlock(get_ccwdev_lock(device->cdev));
+		}
+		list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
+			spin_lock(get_ccwdev_lock(device->cdev));
+			dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+			spin_unlock(get_ccwdev_lock(device->cdev));
+		}
+	}
+}
+
+static void summary_unit_check_handling_work(struct work_struct *work)
+{
+	struct alias_lcu *lcu;
+	struct summary_unit_check_work_data *suc_data;
+	unsigned long flags;
+	struct dasd_device *device;
+
+	suc_data = container_of(work, struct summary_unit_check_work_data,
+				worker);
+	lcu = container_of(suc_data, struct alias_lcu, suc_data);
+	device = suc_data->device;
+
+	/* 1. flush alias devices */
+	flush_all_alias_devices_on_lcu(lcu);
+
+	/* 2. reset summary unit check */
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	dasd_device_remove_stop_bits(device,
+				     (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	reset_summary_unit_check(lcu, device, suc_data->reason);
+
+	spin_lock_irqsave(&lcu->lock, flags);
+	_unstop_all_devices_on_lcu(lcu);
+	_restart_all_base_devices_on_lcu(lcu);
+	/* 3. read new alias configuration */
+	_schedule_lcu_update(lcu, device);
+	lcu->suc_data.device = NULL;
+	dasd_put_device(device);
+	spin_unlock_irqrestore(&lcu->lock, flags);
+}
+
+void dasd_alias_handle_summary_unit_check(struct work_struct *work)
+{
+	struct dasd_device *device = container_of(work, struct dasd_device,
+						  suc_work);
+	struct dasd_eckd_private *private = device->private;
+	struct alias_lcu *lcu;
+	unsigned long flags;
+
+	lcu = private->lcu;
+	if (!lcu) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "device not ready to handle summary"
+			    " unit check (no lcu structure)");
+		goto out;
+	}
+	spin_lock_irqsave(&lcu->lock, flags);
+	/* If this device is about to be removed just return and wait for
+	 * the next interrupt on a different device
+	 */
+	if (list_empty(&device->alias_list)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "device is in offline processing,"
+			    " don't do summary unit check handling");
+		goto out_unlock;
+	}
+	if (lcu->suc_data.device) {
+		/* already scheduled or running */
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "previous instance of summary unit check worker"
+			    " still pending");
+		goto out_unlock;
+	}
+	_stop_all_devices_on_lcu(lcu);
+	/* prepare for lcu_update */
+	lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
+	lcu->suc_data.reason = private->suc_reason;
+	lcu->suc_data.device = device;
+	dasd_get_device(device);
+	if (!schedule_work(&lcu->suc_data.worker))
+		dasd_put_device(device);
+out_unlock:
+	spin_unlock_irqrestore(&lcu->lock, flags);
+out:
+	clear_bit(DASD_FLAG_SUC, &device->flags);
+	dasd_put_device(device);
+};
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
new file mode 100644
index 0000000..fab35c6
--- /dev/null
+++ b/drivers/s390/block/dasd_devmap.c
@@ -0,0 +1,1747 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ *		    Carsten Otte <Cotte@de.ibm.com>
+ *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999,2001
+ *
+ * Device mapping and dasd= parameter parsing functions. All devmap
+ * functions may not be called from interrupt context. In particular
+ * dasd_get_device is a no-no from interrupt context.
+ *
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <asm/debug.h>
+#include <linux/uaccess.h>
+#include <asm/ipl.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_devmap:"
+#define DASD_BUS_ID_SIZE 20
+#define DASD_MAX_PARAMS 256
+
+#include "dasd_int.h"
+
+struct kmem_cache *dasd_page_cache;
+EXPORT_SYMBOL_GPL(dasd_page_cache);
+
+/*
+ * dasd_devmap_t is used to store the features and the relation
+ * between device number and device index. To find a dasd_devmap_t
+ * that corresponds to a device number of a device index each
+ * dasd_devmap_t is added to two linked lists, one to search by
+ * the device number and one to search by the device index. As
+ * soon as big minor numbers are available the device index list
+ * can be removed since the device number will then be identical
+ * to the device index.
+ */
+struct dasd_devmap {
+	struct list_head list;
+	char bus_id[DASD_BUS_ID_SIZE];
+        unsigned int devindex;
+        unsigned short features;
+	struct dasd_device *device;
+};
+
+/*
+ * Parameter parsing functions for dasd= parameter. The syntax is:
+ *   <devno>		: (0x)?[0-9a-fA-F]+
+ *   <busid>		: [0-0a-f]\.[0-9a-f]\.(0x)?[0-9a-fA-F]+
+ *   <feature>		: ro
+ *   <feature_list>	: \(<feature>(:<feature>)*\)
+ *   <devno-range>	: <devno>(-<devno>)?<feature_list>?
+ *   <busid-range>	: <busid>(-<busid>)?<feature_list>?
+ *   <devices>		: <devno-range>|<busid-range>
+ *   <dasd_module>	: dasd_diag_mod|dasd_eckd_mod|dasd_fba_mod
+ *
+ *   <dasd>		: autodetect|probeonly|<devices>(,<devices>)*
+ */
+
+int dasd_probeonly =  0;	/* is true, when probeonly mode is active */
+int dasd_autodetect = 0;	/* is true, when autodetection is active */
+int dasd_nopav = 0;		/* is true, when PAV is disabled */
+EXPORT_SYMBOL_GPL(dasd_nopav);
+int dasd_nofcx;			/* disable High Performance Ficon */
+EXPORT_SYMBOL_GPL(dasd_nofcx);
+
+/*
+ * char *dasd[] is intended to hold the ranges supplied by the dasd= statement
+ * it is named 'dasd' to directly be filled by insmod with the comma separated
+ * strings when running as a module.
+ */
+static char *dasd[DASD_MAX_PARAMS];
+module_param_array(dasd, charp, NULL, S_IRUGO);
+
+/*
+ * Single spinlock to protect devmap and servermap structures and lists.
+ */
+static DEFINE_SPINLOCK(dasd_devmap_lock);
+
+/*
+ * Hash lists for devmap structures.
+ */
+static struct list_head dasd_hashlists[256];
+int dasd_max_devindex;
+
+static struct dasd_devmap *dasd_add_busid(const char *, int);
+
+static inline int
+dasd_hash_busid(const char *bus_id)
+{
+	int hash, i;
+
+	hash = 0;
+	for (i = 0; (i < DASD_BUS_ID_SIZE) && *bus_id; i++, bus_id++)
+		hash += *bus_id;
+	return hash & 0xff;
+}
+
+#ifndef MODULE
+static int __init dasd_call_setup(char *opt)
+{
+	static int i __initdata;
+	char *tmp;
+
+	while (i < DASD_MAX_PARAMS) {
+		tmp = strsep(&opt, ",");
+		if (!tmp)
+			break;
+
+		dasd[i++] = tmp;
+	}
+
+	return 1;
+}
+
+__setup ("dasd=", dasd_call_setup);
+#endif	/* #ifndef MODULE */
+
+#define	DASD_IPLDEV	"ipldev"
+
+/*
+ * Read a device busid/devno from a string.
+ */
+static int __init dasd_busid(char *str, int *id0, int *id1, int *devno)
+{
+	unsigned int val;
+	char *tok;
+
+	/* Interpret ipldev busid */
+	if (strncmp(DASD_IPLDEV, str, strlen(DASD_IPLDEV)) == 0) {
+		if (ipl_info.type != IPL_TYPE_CCW) {
+			pr_err("The IPL device is not a CCW device\n");
+			return -EINVAL;
+		}
+		*id0 = 0;
+		*id1 = ipl_info.data.ccw.dev_id.ssid;
+		*devno = ipl_info.data.ccw.dev_id.devno;
+
+		return 0;
+	}
+
+	/* Old style 0xXXXX or XXXX */
+	if (!kstrtouint(str, 16, &val)) {
+		*id0 = *id1 = 0;
+		if (val > 0xffff)
+			return -EINVAL;
+		*devno = val;
+		return 0;
+	}
+
+	/* New style x.y.z busid */
+	tok = strsep(&str, ".");
+	if (kstrtouint(tok, 16, &val) || val > 0xff)
+		return -EINVAL;
+	*id0 = val;
+
+	tok = strsep(&str, ".");
+	if (kstrtouint(tok, 16, &val) || val > 0xff)
+		return -EINVAL;
+	*id1 = val;
+
+	tok = strsep(&str, ".");
+	if (kstrtouint(tok, 16, &val) || val > 0xffff)
+		return -EINVAL;
+	*devno = val;
+
+	return 0;
+}
+
+/*
+ * Read colon separated list of dasd features.
+ */
+static int __init dasd_feature_list(char *str)
+{
+	int features, len, rc;
+
+	features = 0;
+	rc = 0;
+
+	if (!str)
+		return DASD_FEATURE_DEFAULT;
+
+	while (1) {
+		for (len = 0;
+		     str[len] && str[len] != ':' && str[len] != ')'; len++);
+		if (len == 2 && !strncmp(str, "ro", 2))
+			features |= DASD_FEATURE_READONLY;
+		else if (len == 4 && !strncmp(str, "diag", 4))
+			features |= DASD_FEATURE_USEDIAG;
+		else if (len == 3 && !strncmp(str, "raw", 3))
+			features |= DASD_FEATURE_USERAW;
+		else if (len == 6 && !strncmp(str, "erplog", 6))
+			features |= DASD_FEATURE_ERPLOG;
+		else if (len == 8 && !strncmp(str, "failfast", 8))
+			features |= DASD_FEATURE_FAILFAST;
+		else {
+			pr_warn("%*s is not a supported device option\n",
+				len, str);
+			rc = -EINVAL;
+		}
+		str += len;
+		if (*str != ':')
+			break;
+		str++;
+	}
+
+	return rc ? : features;
+}
+
+/*
+ * Try to match the first element on the comma separated parse string
+ * with one of the known keywords. If a keyword is found, take the approprate
+ * action and return a pointer to the residual string. If the first element
+ * could not be matched to any keyword then return an error code.
+ */
+static int __init dasd_parse_keyword(char *keyword)
+{
+	int length = strlen(keyword);
+
+	if (strncmp("autodetect", keyword, length) == 0) {
+		dasd_autodetect = 1;
+		pr_info("The autodetection mode has been activated\n");
+		return 0;
+        }
+	if (strncmp("probeonly", keyword, length) == 0) {
+		dasd_probeonly = 1;
+		pr_info("The probeonly mode has been activated\n");
+		return 0;
+        }
+	if (strncmp("nopav", keyword, length) == 0) {
+		if (MACHINE_IS_VM)
+			pr_info("'nopav' is not supported on z/VM\n");
+		else {
+			dasd_nopav = 1;
+			pr_info("PAV support has be deactivated\n");
+		}
+		return 0;
+	}
+	if (strncmp("nofcx", keyword, length) == 0) {
+		dasd_nofcx = 1;
+		pr_info("High Performance FICON support has been "
+			"deactivated\n");
+		return 0;
+	}
+	if (strncmp("fixedbuffers", keyword, length) == 0) {
+		if (dasd_page_cache)
+			return 0;
+		dasd_page_cache =
+			kmem_cache_create("dasd_page_cache", PAGE_SIZE,
+					  PAGE_SIZE, SLAB_CACHE_DMA,
+					  NULL);
+		if (!dasd_page_cache)
+			DBF_EVENT(DBF_WARNING, "%s", "Failed to create slab, "
+				"fixed buffer mode disabled.");
+		else
+			DBF_EVENT(DBF_INFO, "%s",
+				 "turning on fixed buffer mode");
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+/*
+ * Split a string of a device range into its pieces and return the from, to, and
+ * feature parts separately.
+ * e.g.:
+ * 0.0.1234-0.0.5678(ro:erplog) -> from: 0.0.1234 to: 0.0.5678 features: ro:erplog
+ * 0.0.8765(raw) -> from: 0.0.8765 to: null features: raw
+ * 0x4321 -> from: 0x4321 to: null features: null
+ */
+static int __init dasd_evaluate_range_param(char *range, char **from_str,
+					    char **to_str, char **features_str)
+{
+	int rc = 0;
+
+	/* Do we have a range or a single device? */
+	if (strchr(range, '-')) {
+		*from_str = strsep(&range, "-");
+		*to_str = strsep(&range, "(");
+		*features_str = strsep(&range, ")");
+	} else {
+		*from_str = strsep(&range, "(");
+		*features_str = strsep(&range, ")");
+	}
+
+	if (*features_str && !range) {
+		pr_warn("A closing parenthesis ')' is missing in the dasd= parameter\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+/*
+ * Try to interprete the range string as a device number or a range of devices.
+ * If the interpretation is successful, create the matching dasd_devmap entries.
+ * If interpretation fails or in case of an error, return an error code.
+ */
+static int __init dasd_parse_range(const char *range)
+{
+	struct dasd_devmap *devmap;
+	int from, from_id0, from_id1;
+	int to, to_id0, to_id1;
+	int features;
+	char bus_id[DASD_BUS_ID_SIZE + 1];
+	char *features_str = NULL;
+	char *from_str = NULL;
+	char *to_str = NULL;
+	int rc = 0;
+	char *tmp;
+
+	tmp = kstrdup(range, GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+
+	if (dasd_evaluate_range_param(tmp, &from_str, &to_str, &features_str)) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (dasd_busid(from_str, &from_id0, &from_id1, &from)) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	to = from;
+	to_id0 = from_id0;
+	to_id1 = from_id1;
+	if (to_str) {
+		if (dasd_busid(to_str, &to_id0, &to_id1, &to)) {
+			rc = -EINVAL;
+			goto out;
+		}
+		if (from_id0 != to_id0 || from_id1 != to_id1 || from > to) {
+			pr_err("%s is not a valid device range\n", range);
+			rc = -EINVAL;
+			goto out;
+		}
+	}
+
+	features = dasd_feature_list(features_str);
+	if (features < 0) {
+		rc = -EINVAL;
+		goto out;
+	}
+	/* each device in dasd= parameter should be set initially online */
+	features |= DASD_FEATURE_INITIAL_ONLINE;
+	while (from <= to) {
+		sprintf(bus_id, "%01x.%01x.%04x", from_id0, from_id1, from++);
+		devmap = dasd_add_busid(bus_id, features);
+		if (IS_ERR(devmap)) {
+			rc = PTR_ERR(devmap);
+			goto out;
+		}
+	}
+
+out:
+	kfree(tmp);
+
+	return rc;
+}
+
+/*
+ * Parse parameters stored in dasd[]
+ * The 'dasd=...' parameter allows to specify a comma separated list of
+ * keywords and device ranges. The parameters in that list will be stored as
+ * separate elementes in dasd[].
+ */
+int __init dasd_parse(void)
+{
+	int rc, i;
+	char *cur;
+
+	rc = 0;
+	for (i = 0; i < DASD_MAX_PARAMS; i++) {
+		cur = dasd[i];
+		if (!cur)
+			break;
+		if (*cur == '\0')
+			continue;
+
+		rc = dasd_parse_keyword(cur);
+		if (rc)
+			rc = dasd_parse_range(cur);
+
+		if (rc)
+			break;
+	}
+
+	return rc;
+}
+
+/*
+ * Add a devmap for the device specified by busid. It is possible that
+ * the devmap already exists (dasd= parameter). The order of the devices
+ * added through this function will define the kdevs for the individual
+ * devices.
+ */
+static struct dasd_devmap *
+dasd_add_busid(const char *bus_id, int features)
+{
+	struct dasd_devmap *devmap, *new, *tmp;
+	int hash;
+
+	new = kzalloc(sizeof(struct dasd_devmap), GFP_KERNEL);
+	if (!new)
+		return ERR_PTR(-ENOMEM);
+	spin_lock(&dasd_devmap_lock);
+	devmap = NULL;
+	hash = dasd_hash_busid(bus_id);
+	list_for_each_entry(tmp, &dasd_hashlists[hash], list)
+		if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) {
+			devmap = tmp;
+			break;
+		}
+	if (!devmap) {
+		/* This bus_id is new. */
+		new->devindex = dasd_max_devindex++;
+		strlcpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
+		new->features = features;
+		new->device = NULL;
+		list_add(&new->list, &dasd_hashlists[hash]);
+		devmap = new;
+		new = NULL;
+	}
+	spin_unlock(&dasd_devmap_lock);
+	kfree(new);
+	return devmap;
+}
+
+/*
+ * Find devmap for device with given bus_id.
+ */
+static struct dasd_devmap *
+dasd_find_busid(const char *bus_id)
+{
+	struct dasd_devmap *devmap, *tmp;
+	int hash;
+
+	spin_lock(&dasd_devmap_lock);
+	devmap = ERR_PTR(-ENODEV);
+	hash = dasd_hash_busid(bus_id);
+	list_for_each_entry(tmp, &dasd_hashlists[hash], list) {
+		if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) {
+			devmap = tmp;
+			break;
+		}
+	}
+	spin_unlock(&dasd_devmap_lock);
+	return devmap;
+}
+
+/*
+ * Check if busid has been added to the list of dasd ranges.
+ */
+int
+dasd_busid_known(const char *bus_id)
+{
+	return IS_ERR(dasd_find_busid(bus_id)) ? -ENOENT : 0;
+}
+
+/*
+ * Forget all about the device numbers added so far.
+ * This may only be called at module unload or system shutdown.
+ */
+static void
+dasd_forget_ranges(void)
+{
+	struct dasd_devmap *devmap, *n;
+	int i;
+
+	spin_lock(&dasd_devmap_lock);
+	for (i = 0; i < 256; i++) {
+		list_for_each_entry_safe(devmap, n, &dasd_hashlists[i], list) {
+			BUG_ON(devmap->device != NULL);
+			list_del(&devmap->list);
+			kfree(devmap);
+		}
+	}
+	spin_unlock(&dasd_devmap_lock);
+}
+
+/*
+ * Find the device struct by its device index.
+ */
+struct dasd_device *
+dasd_device_from_devindex(int devindex)
+{
+	struct dasd_devmap *devmap, *tmp;
+	struct dasd_device *device;
+	int i;
+
+	spin_lock(&dasd_devmap_lock);
+	devmap = NULL;
+	for (i = 0; (i < 256) && !devmap; i++)
+		list_for_each_entry(tmp, &dasd_hashlists[i], list)
+			if (tmp->devindex == devindex) {
+				/* Found the devmap for the device. */
+				devmap = tmp;
+				break;
+			}
+	if (devmap && devmap->device) {
+		device = devmap->device;
+		dasd_get_device(device);
+	} else
+		device = ERR_PTR(-ENODEV);
+	spin_unlock(&dasd_devmap_lock);
+	return device;
+}
+
+/*
+ * Return devmap for cdev. If no devmap exists yet, create one and
+ * connect it to the cdev.
+ */
+static struct dasd_devmap *
+dasd_devmap_from_cdev(struct ccw_device *cdev)
+{
+	struct dasd_devmap *devmap;
+
+	devmap = dasd_find_busid(dev_name(&cdev->dev));
+	if (IS_ERR(devmap))
+		devmap = dasd_add_busid(dev_name(&cdev->dev),
+					DASD_FEATURE_DEFAULT);
+	return devmap;
+}
+
+/*
+ * Create a dasd device structure for cdev.
+ */
+struct dasd_device *
+dasd_create_device(struct ccw_device *cdev)
+{
+	struct dasd_devmap *devmap;
+	struct dasd_device *device;
+	unsigned long flags;
+	int rc;
+
+	devmap = dasd_devmap_from_cdev(cdev);
+	if (IS_ERR(devmap))
+		return (void *) devmap;
+
+	device = dasd_alloc_device();
+	if (IS_ERR(device))
+		return device;
+	atomic_set(&device->ref_count, 3);
+
+	spin_lock(&dasd_devmap_lock);
+	if (!devmap->device) {
+		devmap->device = device;
+		device->devindex = devmap->devindex;
+		device->features = devmap->features;
+		get_device(&cdev->dev);
+		device->cdev = cdev;
+		rc = 0;
+	} else
+		/* Someone else was faster. */
+		rc = -EBUSY;
+	spin_unlock(&dasd_devmap_lock);
+
+	if (rc) {
+		dasd_free_device(device);
+		return ERR_PTR(rc);
+	}
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	dev_set_drvdata(&cdev->dev, device);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+	return device;
+}
+
+/*
+ * Wait queue for dasd_delete_device waits.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(dasd_delete_wq);
+
+/*
+ * Remove a dasd device structure. The passed referenced
+ * is destroyed.
+ */
+void
+dasd_delete_device(struct dasd_device *device)
+{
+	struct ccw_device *cdev;
+	struct dasd_devmap *devmap;
+	unsigned long flags;
+
+	/* First remove device pointer from devmap. */
+	devmap = dasd_find_busid(dev_name(&device->cdev->dev));
+	BUG_ON(IS_ERR(devmap));
+	spin_lock(&dasd_devmap_lock);
+	if (devmap->device != device) {
+		spin_unlock(&dasd_devmap_lock);
+		dasd_put_device(device);
+		return;
+	}
+	devmap->device = NULL;
+	spin_unlock(&dasd_devmap_lock);
+
+	/* Disconnect dasd_device structure from ccw_device structure. */
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	dev_set_drvdata(&device->cdev->dev, NULL);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+	/*
+	 * Drop ref_count by 3, one for the devmap reference, one for
+	 * the cdev reference and one for the passed reference.
+	 */
+	atomic_sub(3, &device->ref_count);
+
+	/* Wait for reference counter to drop to zero. */
+	wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0);
+
+	dasd_generic_free_discipline(device);
+	/* Disconnect dasd_device structure from ccw_device structure. */
+	cdev = device->cdev;
+	device->cdev = NULL;
+
+	/* Put ccw_device structure. */
+	put_device(&cdev->dev);
+
+	/* Now the device structure can be freed. */
+	dasd_free_device(device);
+}
+
+/*
+ * Reference counter dropped to zero. Wake up waiter
+ * in dasd_delete_device.
+ */
+void
+dasd_put_device_wake(struct dasd_device *device)
+{
+	wake_up(&dasd_delete_wq);
+}
+EXPORT_SYMBOL_GPL(dasd_put_device_wake);
+
+/*
+ * Return dasd_device structure associated with cdev.
+ * This function needs to be called with the ccw device
+ * lock held. It can be used from interrupt context.
+ */
+struct dasd_device *
+dasd_device_from_cdev_locked(struct ccw_device *cdev)
+{
+	struct dasd_device *device = dev_get_drvdata(&cdev->dev);
+
+	if (!device)
+		return ERR_PTR(-ENODEV);
+	dasd_get_device(device);
+	return device;
+}
+
+/*
+ * Return dasd_device structure associated with cdev.
+ */
+struct dasd_device *
+dasd_device_from_cdev(struct ccw_device *cdev)
+{
+	struct dasd_device *device;
+	unsigned long flags;
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	device = dasd_device_from_cdev_locked(cdev);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+	return device;
+}
+
+void dasd_add_link_to_gendisk(struct gendisk *gdp, struct dasd_device *device)
+{
+	struct dasd_devmap *devmap;
+
+	devmap = dasd_find_busid(dev_name(&device->cdev->dev));
+	if (IS_ERR(devmap))
+		return;
+	spin_lock(&dasd_devmap_lock);
+	gdp->private_data = devmap;
+	spin_unlock(&dasd_devmap_lock);
+}
+
+struct dasd_device *dasd_device_from_gendisk(struct gendisk *gdp)
+{
+	struct dasd_device *device;
+	struct dasd_devmap *devmap;
+
+	if (!gdp->private_data)
+		return NULL;
+	device = NULL;
+	spin_lock(&dasd_devmap_lock);
+	devmap = gdp->private_data;
+	if (devmap && devmap->device) {
+		device = devmap->device;
+		dasd_get_device(device);
+	}
+	spin_unlock(&dasd_devmap_lock);
+	return device;
+}
+
+/*
+ * SECTION: files in sysfs
+ */
+
+/*
+ * failfast controls the behaviour, if no path is available
+ */
+static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct dasd_devmap *devmap;
+	int ff_flag;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (!IS_ERR(devmap))
+		ff_flag = (devmap->features & DASD_FEATURE_FAILFAST) != 0;
+	else
+		ff_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_FAILFAST) != 0;
+	return snprintf(buf, PAGE_SIZE, ff_flag ? "1\n" : "0\n");
+}
+
+static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr,
+	      const char *buf, size_t count)
+{
+	unsigned int val;
+	int rc;
+
+	if (kstrtouint(buf, 0, &val) || val > 1)
+		return -EINVAL;
+
+	rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_FAILFAST, val);
+
+	return rc ? : count;
+}
+
+static DEVICE_ATTR(failfast, 0644, dasd_ff_show, dasd_ff_store);
+
+/*
+ * readonly controls the readonly status of a dasd
+ */
+static ssize_t
+dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_devmap *devmap;
+	struct dasd_device *device;
+	int ro_flag = 0;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (IS_ERR(devmap))
+		goto out;
+
+	ro_flag = !!(devmap->features & DASD_FEATURE_READONLY);
+
+	spin_lock(&dasd_devmap_lock);
+	device = devmap->device;
+	if (device)
+		ro_flag |= test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+	spin_unlock(&dasd_devmap_lock);
+
+out:
+	return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_ro_store(struct device *dev, struct device_attribute *attr,
+	      const char *buf, size_t count)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct dasd_device *device;
+	unsigned long flags;
+	unsigned int val;
+	int rc;
+
+	if (kstrtouint(buf, 0, &val) || val > 1)
+		return -EINVAL;
+
+	rc = dasd_set_feature(cdev, DASD_FEATURE_READONLY, val);
+	if (rc)
+		return rc;
+
+	device = dasd_device_from_cdev(cdev);
+	if (IS_ERR(device))
+		return count;
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+
+	if (!device->block || !device->block->gdp ||
+	    test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+		goto out;
+	}
+	/* Increase open_count to avoid losing the block device */
+	atomic_inc(&device->block->open_count);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+	set_disk_ro(device->block->gdp, val);
+	atomic_dec(&device->block->open_count);
+
+out:
+	dasd_put_device(device);
+
+	return count;
+}
+
+static DEVICE_ATTR(readonly, 0644, dasd_ro_show, dasd_ro_store);
+/*
+ * erplog controls the logging of ERP related data
+ * (e.g. failing channel programs).
+ */
+static ssize_t
+dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_devmap *devmap;
+	int erplog;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (!IS_ERR(devmap))
+		erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0;
+	else
+		erplog = (DASD_FEATURE_DEFAULT & DASD_FEATURE_ERPLOG) != 0;
+	return snprintf(buf, PAGE_SIZE, erplog ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_erplog_store(struct device *dev, struct device_attribute *attr,
+	      const char *buf, size_t count)
+{
+	unsigned int val;
+	int rc;
+
+	if (kstrtouint(buf, 0, &val) || val > 1)
+		return -EINVAL;
+
+	rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_ERPLOG, val);
+
+	return rc ? : count;
+}
+
+static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store);
+
+/*
+ * use_diag controls whether the driver should use diag rather than ssch
+ * to talk to the device
+ */
+static ssize_t
+dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_devmap *devmap;
+	int use_diag;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (!IS_ERR(devmap))
+		use_diag = (devmap->features & DASD_FEATURE_USEDIAG) != 0;
+	else
+		use_diag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USEDIAG) != 0;
+	return sprintf(buf, use_diag ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
+		    const char *buf, size_t count)
+{
+	struct dasd_devmap *devmap;
+	unsigned int val;
+	ssize_t rc;
+
+	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(devmap))
+		return PTR_ERR(devmap);
+
+	if (kstrtouint(buf, 0, &val) || val > 1)
+		return -EINVAL;
+
+	spin_lock(&dasd_devmap_lock);
+	/* Changing diag discipline flag is only allowed in offline state. */
+	rc = count;
+	if (!devmap->device && !(devmap->features & DASD_FEATURE_USERAW)) {
+		if (val)
+			devmap->features |= DASD_FEATURE_USEDIAG;
+		else
+			devmap->features &= ~DASD_FEATURE_USEDIAG;
+	} else
+		rc = -EPERM;
+	spin_unlock(&dasd_devmap_lock);
+	return rc;
+}
+
+static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
+
+/*
+ * use_raw controls whether the driver should give access to raw eckd data or
+ * operate in standard mode
+ */
+static ssize_t
+dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_devmap *devmap;
+	int use_raw;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (!IS_ERR(devmap))
+		use_raw = (devmap->features & DASD_FEATURE_USERAW) != 0;
+	else
+		use_raw = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USERAW) != 0;
+	return sprintf(buf, use_raw ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_use_raw_store(struct device *dev, struct device_attribute *attr,
+		    const char *buf, size_t count)
+{
+	struct dasd_devmap *devmap;
+	ssize_t rc;
+	unsigned long val;
+
+	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(devmap))
+		return PTR_ERR(devmap);
+
+	if ((kstrtoul(buf, 10, &val) != 0) || val > 1)
+		return -EINVAL;
+
+	spin_lock(&dasd_devmap_lock);
+	/* Changing diag discipline flag is only allowed in offline state. */
+	rc = count;
+	if (!devmap->device && !(devmap->features & DASD_FEATURE_USEDIAG)) {
+		if (val)
+			devmap->features |= DASD_FEATURE_USERAW;
+		else
+			devmap->features &= ~DASD_FEATURE_USERAW;
+	} else
+		rc = -EPERM;
+	spin_unlock(&dasd_devmap_lock);
+	return rc;
+}
+
+static DEVICE_ATTR(raw_track_access, 0644, dasd_use_raw_show,
+		   dasd_use_raw_store);
+
+static ssize_t
+dasd_safe_offline_store(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct dasd_device *device;
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	device = dasd_device_from_cdev_locked(cdev);
+	if (IS_ERR(device)) {
+		rc = PTR_ERR(device);
+		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+		goto out;
+	}
+
+	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
+	    test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+		/* Already doing offline processing */
+		dasd_put_device(device);
+		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+		rc = -EBUSY;
+		goto out;
+	}
+
+	set_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
+	dasd_put_device(device);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+	rc = ccw_device_set_offline(cdev);
+
+out:
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(safe_offline, 0200, NULL, dasd_safe_offline_store);
+
+static ssize_t
+dasd_access_show(struct device *dev, struct device_attribute *attr,
+		 char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct dasd_device *device;
+	int count;
+
+	device = dasd_device_from_cdev(cdev);
+	if (IS_ERR(device))
+		return PTR_ERR(device);
+
+	if (!device->discipline)
+		count = -ENODEV;
+	else if (!device->discipline->host_access_count)
+		count = -EOPNOTSUPP;
+	else
+		count = device->discipline->host_access_count(device);
+
+	dasd_put_device(device);
+	if (count < 0)
+		return count;
+
+	return sprintf(buf, "%d\n", count);
+}
+
+static DEVICE_ATTR(host_access_count, 0444, dasd_access_show, NULL);
+
+static ssize_t
+dasd_discipline_show(struct device *dev, struct device_attribute *attr,
+		     char *buf)
+{
+	struct dasd_device *device;
+	ssize_t len;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		goto out;
+	else if (!device->discipline) {
+		dasd_put_device(device);
+		goto out;
+	} else {
+		len = snprintf(buf, PAGE_SIZE, "%s\n",
+			       device->discipline->name);
+		dasd_put_device(device);
+		return len;
+	}
+out:
+	len = snprintf(buf, PAGE_SIZE, "none\n");
+	return len;
+}
+
+static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
+
+static ssize_t
+dasd_device_status_show(struct device *dev, struct device_attribute *attr,
+		     char *buf)
+{
+	struct dasd_device *device;
+	ssize_t len;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (!IS_ERR(device)) {
+		switch (device->state) {
+		case DASD_STATE_NEW:
+			len = snprintf(buf, PAGE_SIZE, "new\n");
+			break;
+		case DASD_STATE_KNOWN:
+			len = snprintf(buf, PAGE_SIZE, "detected\n");
+			break;
+		case DASD_STATE_BASIC:
+			len = snprintf(buf, PAGE_SIZE, "basic\n");
+			break;
+		case DASD_STATE_UNFMT:
+			len = snprintf(buf, PAGE_SIZE, "unformatted\n");
+			break;
+		case DASD_STATE_READY:
+			len = snprintf(buf, PAGE_SIZE, "ready\n");
+			break;
+		case DASD_STATE_ONLINE:
+			len = snprintf(buf, PAGE_SIZE, "online\n");
+			break;
+		default:
+			len = snprintf(buf, PAGE_SIZE, "no stat\n");
+			break;
+		}
+		dasd_put_device(device);
+	} else
+		len = snprintf(buf, PAGE_SIZE, "unknown\n");
+	return len;
+}
+
+static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL);
+
+static ssize_t dasd_alias_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct dasd_device *device;
+	struct dasd_uid uid;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return sprintf(buf, "0\n");
+
+	if (device->discipline && device->discipline->get_uid &&
+	    !device->discipline->get_uid(device, &uid)) {
+		if (uid.type == UA_BASE_PAV_ALIAS ||
+		    uid.type == UA_HYPER_PAV_ALIAS) {
+			dasd_put_device(device);
+			return sprintf(buf, "1\n");
+		}
+	}
+	dasd_put_device(device);
+
+	return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL);
+
+static ssize_t dasd_vendor_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct dasd_device *device;
+	struct dasd_uid uid;
+	char *vendor;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	vendor = "";
+	if (IS_ERR(device))
+		return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
+
+	if (device->discipline && device->discipline->get_uid &&
+	    !device->discipline->get_uid(device, &uid))
+			vendor = uid.vendor;
+
+	dasd_put_device(device);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
+}
+
+static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
+
+#define UID_STRLEN ( /* vendor */ 3 + 1 + /* serial    */ 14 + 1 +\
+		     /* SSID   */ 4 + 1 + /* unit addr */ 2 + 1 +\
+		     /* vduit */ 32 + 1)
+
+static ssize_t
+dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_device *device;
+	struct dasd_uid uid;
+	char uid_string[UID_STRLEN];
+	char ua_string[3];
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	uid_string[0] = 0;
+	if (IS_ERR(device))
+		return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
+
+	if (device->discipline && device->discipline->get_uid &&
+	    !device->discipline->get_uid(device, &uid)) {
+		switch (uid.type) {
+		case UA_BASE_DEVICE:
+			snprintf(ua_string, sizeof(ua_string), "%02x",
+				 uid.real_unit_addr);
+			break;
+		case UA_BASE_PAV_ALIAS:
+			snprintf(ua_string, sizeof(ua_string), "%02x",
+				 uid.base_unit_addr);
+			break;
+		case UA_HYPER_PAV_ALIAS:
+			snprintf(ua_string, sizeof(ua_string), "xx");
+			break;
+		default:
+			/* should not happen, treat like base device */
+			snprintf(ua_string, sizeof(ua_string), "%02x",
+				 uid.real_unit_addr);
+			break;
+		}
+
+		if (strlen(uid.vduit) > 0)
+			snprintf(uid_string, sizeof(uid_string),
+				 "%s.%s.%04x.%s.%s",
+				 uid.vendor, uid.serial, uid.ssid, ua_string,
+				 uid.vduit);
+		else
+			snprintf(uid_string, sizeof(uid_string),
+				 "%s.%s.%04x.%s",
+				 uid.vendor, uid.serial, uid.ssid, ua_string);
+	}
+	dasd_put_device(device);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
+}
+static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
+
+/*
+ * extended error-reporting
+ */
+static ssize_t
+dasd_eer_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_devmap *devmap;
+	int eer_flag;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (!IS_ERR(devmap) && devmap->device)
+		eer_flag = dasd_eer_enabled(devmap->device);
+	else
+		eer_flag = 0;
+	return snprintf(buf, PAGE_SIZE, eer_flag ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_eer_store(struct device *dev, struct device_attribute *attr,
+	       const char *buf, size_t count)
+{
+	struct dasd_device *device;
+	unsigned int val;
+	int rc = 0;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return PTR_ERR(device);
+
+	if (kstrtouint(buf, 0, &val) || val > 1)
+		return -EINVAL;
+
+	if (val)
+		rc = dasd_eer_enable(device);
+	else
+		dasd_eer_disable(device);
+
+	dasd_put_device(device);
+
+	return rc ? : count;
+}
+
+static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
+
+/*
+ * expiration time for default requests
+ */
+static ssize_t
+dasd_expires_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_device *device;
+	int len;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+	len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_expires);
+	dasd_put_device(device);
+	return len;
+}
+
+static ssize_t
+dasd_expires_store(struct device *dev, struct device_attribute *attr,
+	       const char *buf, size_t count)
+{
+	struct dasd_device *device;
+	unsigned long val;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+
+	if ((kstrtoul(buf, 10, &val) != 0) ||
+	    (val > DASD_EXPIRES_MAX) || val == 0) {
+		dasd_put_device(device);
+		return -EINVAL;
+	}
+
+	if (val)
+		device->default_expires = val;
+
+	dasd_put_device(device);
+	return count;
+}
+
+static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store);
+
+static ssize_t
+dasd_retries_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_device *device;
+	int len;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+	len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_retries);
+	dasd_put_device(device);
+	return len;
+}
+
+static ssize_t
+dasd_retries_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct dasd_device *device;
+	unsigned long val;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+
+	if ((kstrtoul(buf, 10, &val) != 0) ||
+	    (val > DASD_RETRIES_MAX)) {
+		dasd_put_device(device);
+		return -EINVAL;
+	}
+
+	if (val)
+		device->default_retries = val;
+
+	dasd_put_device(device);
+	return count;
+}
+
+static DEVICE_ATTR(retries, 0644, dasd_retries_show, dasd_retries_store);
+
+static ssize_t
+dasd_timeout_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct dasd_device *device;
+	int len;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+	len = snprintf(buf, PAGE_SIZE, "%lu\n", device->blk_timeout);
+	dasd_put_device(device);
+	return len;
+}
+
+static ssize_t
+dasd_timeout_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct dasd_device *device;
+	struct request_queue *q;
+	unsigned long val;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device) || !device->block)
+		return -ENODEV;
+
+	if ((kstrtoul(buf, 10, &val) != 0) ||
+	    val > UINT_MAX / HZ) {
+		dasd_put_device(device);
+		return -EINVAL;
+	}
+	q = device->block->request_queue;
+	if (!q) {
+		dasd_put_device(device);
+		return -ENODEV;
+	}
+
+	device->blk_timeout = val;
+
+	blk_queue_rq_timeout(q, device->blk_timeout * HZ);
+
+	dasd_put_device(device);
+	return count;
+}
+
+static DEVICE_ATTR(timeout, 0644,
+		   dasd_timeout_show, dasd_timeout_store);
+
+
+static ssize_t
+dasd_path_reset_store(struct device *dev, struct device_attribute *attr,
+		      const char *buf, size_t count)
+{
+	struct dasd_device *device;
+	unsigned int val;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+
+	if ((kstrtouint(buf, 16, &val) != 0) || val > 0xff)
+		val = 0;
+
+	if (device->discipline && device->discipline->reset_path)
+		device->discipline->reset_path(device, (__u8) val);
+
+	dasd_put_device(device);
+	return count;
+}
+
+static DEVICE_ATTR(path_reset, 0200, NULL, dasd_path_reset_store);
+
+static ssize_t dasd_hpf_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct dasd_device *device;
+	int hpf;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+	if (!device->discipline || !device->discipline->hpf_enabled) {
+		dasd_put_device(device);
+		return snprintf(buf, PAGE_SIZE, "%d\n", dasd_nofcx);
+	}
+	hpf = device->discipline->hpf_enabled(device);
+	dasd_put_device(device);
+	return snprintf(buf, PAGE_SIZE, "%d\n", hpf);
+}
+
+static DEVICE_ATTR(hpf, 0444, dasd_hpf_show, NULL);
+
+static ssize_t dasd_reservation_policy_show(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct dasd_devmap *devmap;
+	int rc = 0;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (IS_ERR(devmap)) {
+		rc = snprintf(buf, PAGE_SIZE, "ignore\n");
+	} else {
+		spin_lock(&dasd_devmap_lock);
+		if (devmap->features & DASD_FEATURE_FAILONSLCK)
+			rc = snprintf(buf, PAGE_SIZE, "fail\n");
+		else
+			rc = snprintf(buf, PAGE_SIZE, "ignore\n");
+		spin_unlock(&dasd_devmap_lock);
+	}
+	return rc;
+}
+
+static ssize_t dasd_reservation_policy_store(struct device *dev,
+					     struct device_attribute *attr,
+					     const char *buf, size_t count)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	int rc;
+
+	if (sysfs_streq("ignore", buf))
+		rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 0);
+	else if (sysfs_streq("fail", buf))
+		rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 1);
+	else
+		rc = -EINVAL;
+
+	return rc ? : count;
+}
+
+static DEVICE_ATTR(reservation_policy, 0644,
+		   dasd_reservation_policy_show, dasd_reservation_policy_store);
+
+static ssize_t dasd_reservation_state_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct dasd_device *device;
+	int rc = 0;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return snprintf(buf, PAGE_SIZE, "none\n");
+
+	if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags))
+		rc = snprintf(buf, PAGE_SIZE, "reserved\n");
+	else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags))
+		rc = snprintf(buf, PAGE_SIZE, "lost\n");
+	else
+		rc = snprintf(buf, PAGE_SIZE, "none\n");
+	dasd_put_device(device);
+	return rc;
+}
+
+static ssize_t dasd_reservation_state_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct dasd_device *device;
+	int rc = 0;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+	if (sysfs_streq("reset", buf))
+		clear_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
+	else
+		rc = -EINVAL;
+	dasd_put_device(device);
+
+	if (rc)
+		return rc;
+	else
+		return count;
+}
+
+static DEVICE_ATTR(last_known_reservation_state, 0644,
+		   dasd_reservation_state_show, dasd_reservation_state_store);
+
+static ssize_t dasd_pm_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct dasd_device *device;
+	u8 opm, nppm, cablepm, cuirpm, hpfpm, ifccpm;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return sprintf(buf, "0\n");
+
+	opm = dasd_path_get_opm(device);
+	nppm = dasd_path_get_nppm(device);
+	cablepm = dasd_path_get_cablepm(device);
+	cuirpm = dasd_path_get_cuirpm(device);
+	hpfpm = dasd_path_get_hpfpm(device);
+	ifccpm = dasd_path_get_ifccpm(device);
+	dasd_put_device(device);
+
+	return sprintf(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm,
+		       cablepm, cuirpm, hpfpm, ifccpm);
+}
+
+static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL);
+
+/*
+ * threshold value for IFCC/CCC errors
+ */
+static ssize_t
+dasd_path_threshold_show(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	struct dasd_device *device;
+	int len;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+	len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_thrhld);
+	dasd_put_device(device);
+	return len;
+}
+
+static ssize_t
+dasd_path_threshold_store(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	struct dasd_device *device;
+	unsigned long flags;
+	unsigned long val;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+
+	if (kstrtoul(buf, 10, &val) != 0 || val > DASD_THRHLD_MAX) {
+		dasd_put_device(device);
+		return -EINVAL;
+	}
+	spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
+	device->path_thrhld = val;
+	spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
+	dasd_put_device(device);
+	return count;
+}
+static DEVICE_ATTR(path_threshold, 0644, dasd_path_threshold_show,
+		   dasd_path_threshold_store);
+
+/*
+ * configure if path is disabled after IFCC/CCC error threshold is
+ * exceeded
+ */
+static ssize_t
+dasd_path_autodisable_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct dasd_devmap *devmap;
+	int flag;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (!IS_ERR(devmap))
+		flag = (devmap->features & DASD_FEATURE_PATH_AUTODISABLE) != 0;
+	else
+		flag = (DASD_FEATURE_DEFAULT &
+			DASD_FEATURE_PATH_AUTODISABLE) != 0;
+	return snprintf(buf, PAGE_SIZE, flag ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_path_autodisable_store(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t count)
+{
+	unsigned int val;
+	int rc;
+
+	if (kstrtouint(buf, 0, &val) || val > 1)
+		return -EINVAL;
+
+	rc = dasd_set_feature(to_ccwdev(dev),
+			      DASD_FEATURE_PATH_AUTODISABLE, val);
+
+	return rc ? : count;
+}
+
+static DEVICE_ATTR(path_autodisable, 0644,
+		   dasd_path_autodisable_show,
+		   dasd_path_autodisable_store);
+/*
+ * interval for IFCC/CCC checks
+ * meaning time with no IFCC/CCC error before the error counter
+ * gets reset
+ */
+static ssize_t
+dasd_path_interval_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct dasd_device *device;
+	int len;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+	len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_interval);
+	dasd_put_device(device);
+	return len;
+}
+
+static ssize_t
+dasd_path_interval_store(struct device *dev, struct device_attribute *attr,
+	       const char *buf, size_t count)
+{
+	struct dasd_device *device;
+	unsigned long flags;
+	unsigned long val;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+
+	if ((kstrtoul(buf, 10, &val) != 0) ||
+	    (val > DASD_INTERVAL_MAX) || val == 0) {
+		dasd_put_device(device);
+		return -EINVAL;
+	}
+	spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
+	if (val)
+		device->path_interval = val;
+	spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
+	dasd_put_device(device);
+	return count;
+}
+
+static DEVICE_ATTR(path_interval, 0644, dasd_path_interval_show,
+		   dasd_path_interval_store);
+
+
+static struct attribute * dasd_attrs[] = {
+	&dev_attr_readonly.attr,
+	&dev_attr_discipline.attr,
+	&dev_attr_status.attr,
+	&dev_attr_alias.attr,
+	&dev_attr_vendor.attr,
+	&dev_attr_uid.attr,
+	&dev_attr_use_diag.attr,
+	&dev_attr_raw_track_access.attr,
+	&dev_attr_eer_enabled.attr,
+	&dev_attr_erplog.attr,
+	&dev_attr_failfast.attr,
+	&dev_attr_expires.attr,
+	&dev_attr_retries.attr,
+	&dev_attr_timeout.attr,
+	&dev_attr_reservation_policy.attr,
+	&dev_attr_last_known_reservation_state.attr,
+	&dev_attr_safe_offline.attr,
+	&dev_attr_host_access_count.attr,
+	&dev_attr_path_masks.attr,
+	&dev_attr_path_threshold.attr,
+	&dev_attr_path_autodisable.attr,
+	&dev_attr_path_interval.attr,
+	&dev_attr_path_reset.attr,
+	&dev_attr_hpf.attr,
+	NULL,
+};
+
+static const struct attribute_group dasd_attr_group = {
+	.attrs = dasd_attrs,
+};
+
+/*
+ * Return value of the specified feature.
+ */
+int
+dasd_get_feature(struct ccw_device *cdev, int feature)
+{
+	struct dasd_devmap *devmap;
+
+	devmap = dasd_find_busid(dev_name(&cdev->dev));
+	if (IS_ERR(devmap))
+		return PTR_ERR(devmap);
+
+	return ((devmap->features & feature) != 0);
+}
+
+/*
+ * Set / reset given feature.
+ * Flag indicates whether to set (!=0) or the reset (=0) the feature.
+ */
+int
+dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
+{
+	struct dasd_devmap *devmap;
+
+	devmap = dasd_devmap_from_cdev(cdev);
+	if (IS_ERR(devmap))
+		return PTR_ERR(devmap);
+
+	spin_lock(&dasd_devmap_lock);
+	if (flag)
+		devmap->features |= feature;
+	else
+		devmap->features &= ~feature;
+	if (devmap->device)
+		devmap->device->features = devmap->features;
+	spin_unlock(&dasd_devmap_lock);
+	return 0;
+}
+EXPORT_SYMBOL(dasd_set_feature);
+
+
+int
+dasd_add_sysfs_files(struct ccw_device *cdev)
+{
+	return sysfs_create_group(&cdev->dev.kobj, &dasd_attr_group);
+}
+
+void
+dasd_remove_sysfs_files(struct ccw_device *cdev)
+{
+	sysfs_remove_group(&cdev->dev.kobj, &dasd_attr_group);
+}
+
+
+int
+dasd_devmap_init(void)
+{
+	int i;
+
+	/* Initialize devmap structures. */
+	dasd_max_devindex = 0;
+	for (i = 0; i < 256; i++)
+		INIT_LIST_HEAD(&dasd_hashlists[i]);
+	return 0;
+}
+
+void
+dasd_devmap_exit(void)
+{
+	dasd_forget_ranges();
+}
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
new file mode 100644
index 0000000..e1fe024
--- /dev/null
+++ b/drivers/s390/block/dasd_diag.c
@@ -0,0 +1,662 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Based on.......: linux/drivers/s390/block/mdisk.c
+ * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2000
+ *
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/kernel_stat.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+
+#include <asm/dasd.h>
+#include <asm/debug.h>
+#include <asm/diag.h>
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/vtoc.h>
+
+#include "dasd_int.h"
+#include "dasd_diag.h"
+
+#define PRINTK_HEADER "dasd(diag):"
+
+MODULE_LICENSE("GPL");
+
+/* The maximum number of blocks per request (max_blocks) is dependent on the
+ * amount of storage that is available in the static I/O buffer for each
+ * device. Currently each device gets 2 pages. We want to fit two requests
+ * into the available memory so that we can immediately start the next if one
+ * finishes. */
+#define DIAG_MAX_BLOCKS	(((2 * PAGE_SIZE - sizeof(struct dasd_ccw_req) - \
+			   sizeof(struct dasd_diag_req)) / \
+		           sizeof(struct dasd_diag_bio)) / 2)
+#define DIAG_MAX_RETRIES	32
+#define DIAG_TIMEOUT		50
+
+static struct dasd_discipline dasd_diag_discipline;
+
+struct dasd_diag_private {
+	struct dasd_diag_characteristics rdc_data;
+	struct dasd_diag_rw_io iob;
+	struct dasd_diag_init_io iib;
+	blocknum_t pt_block;
+	struct ccw_dev_id dev_id;
+};
+
+struct dasd_diag_req {
+	unsigned int block_count;
+	struct dasd_diag_bio bio[0];
+};
+
+static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
+
+/* Perform DIAG250 call with block I/O parameter list iob (input and output)
+ * and function code cmd.
+ * In case of an exception return 3. Otherwise return result of bitwise OR of
+ * resulting condition code and DIAG return code. */
+static inline int __dia250(void *iob, int cmd)
+{
+	register unsigned long reg2 asm ("2") = (unsigned long) iob;
+	typedef union {
+		struct dasd_diag_init_io init_io;
+		struct dasd_diag_rw_io rw_io;
+	} addr_type;
+	int rc;
+
+	rc = 3;
+	asm volatile(
+		"	diag	2,%2,0x250\n"
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"	or	%0,3\n"
+		"1:\n"
+		EX_TABLE(0b,1b)
+		: "+d" (rc), "=m" (*(addr_type *) iob)
+		: "d" (cmd), "d" (reg2), "m" (*(addr_type *) iob)
+		: "3", "cc");
+	return rc;
+}
+
+static inline int dia250(void *iob, int cmd)
+{
+	diag_stat_inc(DIAG_STAT_X250);
+	return __dia250(iob, cmd);
+}
+
+/* Initialize block I/O to DIAG device using the specified blocksize and
+ * block offset. On success, return zero and set end_block to contain the
+ * number of blocks on the device minus the specified offset. Return non-zero
+ * otherwise. */
+static inline int
+mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
+	     blocknum_t offset, blocknum_t *end_block)
+{
+	struct dasd_diag_private *private = device->private;
+	struct dasd_diag_init_io *iib = &private->iib;
+	int rc;
+
+	memset(iib, 0, sizeof (struct dasd_diag_init_io));
+
+	iib->dev_nr = private->dev_id.devno;
+	iib->block_size = blocksize;
+	iib->offset = offset;
+	iib->flaga = DASD_DIAG_FLAGA_DEFAULT;
+
+	rc = dia250(iib, INIT_BIO);
+
+	if ((rc & 3) == 0 && end_block)
+		*end_block = iib->end_block;
+
+	return rc;
+}
+
+/* Remove block I/O environment for device. Return zero on success, non-zero
+ * otherwise. */
+static inline int
+mdsk_term_io(struct dasd_device * device)
+{
+	struct dasd_diag_private *private = device->private;
+	struct dasd_diag_init_io *iib = &private->iib;
+	int rc;
+
+	memset(iib, 0, sizeof (struct dasd_diag_init_io));
+	iib->dev_nr = private->dev_id.devno;
+	rc = dia250(iib, TERM_BIO);
+	return rc;
+}
+
+/* Error recovery for failed DIAG requests - try to reestablish the DIAG
+ * environment. */
+static void
+dasd_diag_erp(struct dasd_device *device)
+{
+	int rc;
+
+	mdsk_term_io(device);
+	rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
+	if (rc == 4) {
+		if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO, &device->flags)))
+			pr_warn("%s: The access mode of a DIAG device changed to read-only\n",
+				dev_name(&device->cdev->dev));
+		rc = 0;
+	}
+	if (rc)
+		pr_warn("%s: DIAG ERP failed with rc=%d\n",
+			dev_name(&device->cdev->dev), rc);
+}
+
+/* Start a given request at the device. Return zero on success, non-zero
+ * otherwise. */
+static int
+dasd_start_diag(struct dasd_ccw_req * cqr)
+{
+	struct dasd_device *device;
+	struct dasd_diag_private *private;
+	struct dasd_diag_req *dreq;
+	int rc;
+
+	device = cqr->startdev;
+	if (cqr->retries < 0) {
+		DBF_DEV_EVENT(DBF_ERR, device, "DIAG start_IO: request %p "
+			    "- no retry left)", cqr);
+		cqr->status = DASD_CQR_ERROR;
+		return -EIO;
+	}
+	private = device->private;
+	dreq = cqr->data;
+
+	private->iob.dev_nr = private->dev_id.devno;
+	private->iob.key = 0;
+	private->iob.flags = DASD_DIAG_RWFLAG_ASYNC;
+	private->iob.block_count = dreq->block_count;
+	private->iob.interrupt_params = (addr_t) cqr;
+	private->iob.bio_list = dreq->bio;
+	private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
+
+	cqr->startclk = get_tod_clock();
+	cqr->starttime = jiffies;
+	cqr->retries--;
+
+	rc = dia250(&private->iob, RW_BIO);
+	switch (rc) {
+	case 0: /* Synchronous I/O finished successfully */
+		cqr->stopclk = get_tod_clock();
+		cqr->status = DASD_CQR_SUCCESS;
+		/* Indicate to calling function that only a dasd_schedule_bh()
+		   and no timer is needed */
+                rc = -EACCES;
+		break;
+	case 8: /* Asynchronous I/O was started */
+		cqr->status = DASD_CQR_IN_IO;
+		rc = 0;
+		break;
+	default: /* Error condition */
+		cqr->status = DASD_CQR_QUEUED;
+		DBF_DEV_EVENT(DBF_WARNING, device, "dia250 returned rc=%d", rc);
+		dasd_diag_erp(device);
+		rc = -EIO;
+		break;
+	}
+	cqr->intrc = rc;
+	return rc;
+}
+
+/* Terminate given request at the device. */
+static int
+dasd_diag_term_IO(struct dasd_ccw_req * cqr)
+{
+	struct dasd_device *device;
+
+	device = cqr->startdev;
+	mdsk_term_io(device);
+	mdsk_init_io(device, device->block->bp_block, 0, NULL);
+	cqr->status = DASD_CQR_CLEAR_PENDING;
+	cqr->stopclk = get_tod_clock();
+	dasd_schedule_device_bh(device);
+	return 0;
+}
+
+/* Handle external interruption. */
+static void dasd_ext_handler(struct ext_code ext_code,
+			     unsigned int param32, unsigned long param64)
+{
+	struct dasd_ccw_req *cqr, *next;
+	struct dasd_device *device;
+	unsigned long expires;
+	unsigned long flags;
+	addr_t ip;
+	int rc;
+
+	switch (ext_code.subcode >> 8) {
+	case DASD_DIAG_CODE_31BIT:
+		ip = (addr_t) param32;
+		break;
+	case DASD_DIAG_CODE_64BIT:
+		ip = (addr_t) param64;
+		break;
+	default:
+		return;
+	}
+	inc_irq_stat(IRQEXT_DSD);
+	if (!ip) {		/* no intparm: unsolicited interrupt */
+		DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
+			      "interrupt");
+		return;
+	}
+	cqr = (struct dasd_ccw_req *) ip;
+	device = (struct dasd_device *) cqr->startdev;
+	if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
+		DBF_DEV_EVENT(DBF_WARNING, device,
+			    " magic number of dasd_ccw_req 0x%08X doesn't"
+			    " match discipline 0x%08X",
+			    cqr->magic, *(int *) (&device->discipline->name));
+		return;
+	}
+
+	/* get irq lock to modify request queue */
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+
+	/* Check for a pending clear operation */
+	if (cqr->status == DASD_CQR_CLEAR_PENDING) {
+		cqr->status = DASD_CQR_CLEARED;
+		dasd_device_clear_timer(device);
+		dasd_schedule_device_bh(device);
+		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+		return;
+	}
+
+	cqr->stopclk = get_tod_clock();
+
+	expires = 0;
+	if ((ext_code.subcode & 0xff) == 0) {
+		cqr->status = DASD_CQR_SUCCESS;
+		/* Start first request on queue if possible -> fast_io. */
+		if (!list_empty(&device->ccw_queue)) {
+			next = list_entry(device->ccw_queue.next,
+					  struct dasd_ccw_req, devlist);
+			if (next->status == DASD_CQR_QUEUED) {
+				rc = dasd_start_diag(next);
+				if (rc == 0)
+					expires = next->expires;
+			}
+		}
+	} else {
+		cqr->status = DASD_CQR_QUEUED;
+		DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
+			      "request %p was %d (%d retries left)", cqr,
+			      ext_code.subcode & 0xff, cqr->retries);
+		dasd_diag_erp(device);
+	}
+
+	if (expires != 0)
+		dasd_device_set_timer(device, expires);
+	else
+		dasd_device_clear_timer(device);
+	dasd_schedule_device_bh(device);
+
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+
+/* Check whether device can be controlled by DIAG discipline. Return zero on
+ * success, non-zero otherwise. */
+static int
+dasd_diag_check_device(struct dasd_device *device)
+{
+	struct dasd_diag_private *private = device->private;
+	struct dasd_diag_characteristics *rdc_data;
+	struct vtoc_cms_label *label;
+	struct dasd_block *block;
+	struct dasd_diag_bio bio;
+	unsigned int sb, bsize;
+	blocknum_t end_block;
+	int rc;
+
+	if (private == NULL) {
+		private = kzalloc(sizeof(*private), GFP_KERNEL);
+		if (private == NULL) {
+			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+				"Allocating memory for private DASD data "
+				      "failed\n");
+			return -ENOMEM;
+		}
+		ccw_device_get_id(device->cdev, &private->dev_id);
+		device->private = private;
+	}
+	block = dasd_alloc_block();
+	if (IS_ERR(block)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "could not allocate dasd block structure");
+		device->private = NULL;
+		kfree(private);
+		return PTR_ERR(block);
+	}
+	device->block = block;
+	block->base = device;
+
+	/* Read Device Characteristics */
+	rdc_data = &private->rdc_data;
+	rdc_data->dev_nr = private->dev_id.devno;
+	rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
+
+	rc = diag210((struct diag210 *) rdc_data);
+	if (rc) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "failed to retrieve device "
+			    "information (rc=%d)", rc);
+		rc = -EOPNOTSUPP;
+		goto out;
+	}
+
+	device->default_expires = DIAG_TIMEOUT;
+	device->default_retries = DIAG_MAX_RETRIES;
+
+	/* Figure out position of label block */
+	switch (private->rdc_data.vdev_class) {
+	case DEV_CLASS_FBA:
+		private->pt_block = 1;
+		break;
+	case DEV_CLASS_ECKD:
+		private->pt_block = 2;
+		break;
+	default:
+		pr_warn("%s: Device type %d is not supported in DIAG mode\n",
+			dev_name(&device->cdev->dev),
+			private->rdc_data.vdev_class);
+		rc = -EOPNOTSUPP;
+		goto out;
+	}
+
+	DBF_DEV_EVENT(DBF_INFO, device,
+		      "%04X: %04X on real %04X/%02X",
+		      rdc_data->dev_nr,
+		      rdc_data->vdev_type,
+		      rdc_data->rdev_type, rdc_data->rdev_model);
+
+	/* terminate all outstanding operations */
+	mdsk_term_io(device);
+
+	/* figure out blocksize of device */
+	label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL);
+	if (label == NULL)  {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "No memory to allocate initialization request");
+		rc = -ENOMEM;
+		goto out;
+	}
+	rc = 0;
+	end_block = 0;
+	/* try all sizes - needed for ECKD devices */
+	for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
+		mdsk_init_io(device, bsize, 0, &end_block);
+		memset(&bio, 0, sizeof (struct dasd_diag_bio));
+		bio.type = MDSK_READ_REQ;
+		bio.block_number = private->pt_block + 1;
+		bio.buffer = label;
+		memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io));
+		private->iob.dev_nr = rdc_data->dev_nr;
+		private->iob.key = 0;
+		private->iob.flags = 0;	/* do synchronous io */
+		private->iob.block_count = 1;
+		private->iob.interrupt_params = 0;
+		private->iob.bio_list = &bio;
+		private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
+		rc = dia250(&private->iob, RW_BIO);
+		if (rc == 3) {
+			pr_warn("%s: A 64-bit DIAG call failed\n",
+				dev_name(&device->cdev->dev));
+			rc = -EOPNOTSUPP;
+			goto out_label;
+		}
+		mdsk_term_io(device);
+		if (rc == 0)
+			break;
+	}
+	if (bsize > PAGE_SIZE) {
+		pr_warn("%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n",
+			dev_name(&device->cdev->dev), rc);
+		rc = -EIO;
+		goto out_label;
+	}
+	/* check for label block */
+	if (memcmp(label->label_id, DASD_DIAG_CMS1,
+		  sizeof(DASD_DIAG_CMS1)) == 0) {
+		/* get formatted blocksize from label block */
+		bsize = (unsigned int) label->block_size;
+		block->blocks = (unsigned long) label->block_count;
+	} else
+		block->blocks = end_block;
+	block->bp_block = bsize;
+	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
+	for (sb = 512; sb < bsize; sb = sb << 1)
+		block->s2b_shift++;
+	rc = mdsk_init_io(device, block->bp_block, 0, NULL);
+	if (rc && (rc != 4)) {
+		pr_warn("%s: DIAG initialization failed with rc=%d\n",
+			dev_name(&device->cdev->dev), rc);
+		rc = -EIO;
+	} else {
+		if (rc == 4)
+			set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+		pr_info("%s: New DASD with %ld byte/block, total size %ld "
+			"KB%s\n", dev_name(&device->cdev->dev),
+			(unsigned long) block->bp_block,
+			(unsigned long) (block->blocks <<
+					 block->s2b_shift) >> 1,
+			(rc == 4) ? ", read-only device" : "");
+		rc = 0;
+	}
+out_label:
+	free_page((long) label);
+out:
+	if (rc) {
+		device->block = NULL;
+		dasd_free_block(block);
+		device->private = NULL;
+		kfree(private);
+	}
+	return rc;
+}
+
+/* Fill in virtual disk geometry for device. Return zero on success, non-zero
+ * otherwise. */
+static int
+dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
+{
+	if (dasd_check_blocksize(block->bp_block) != 0)
+		return -EINVAL;
+	geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
+	geo->heads = 16;
+	geo->sectors = 128 >> block->s2b_shift;
+	return 0;
+}
+
+static dasd_erp_fn_t
+dasd_diag_erp_action(struct dasd_ccw_req * cqr)
+{
+	return dasd_default_erp_action;
+}
+
+static dasd_erp_fn_t
+dasd_diag_erp_postaction(struct dasd_ccw_req * cqr)
+{
+	return dasd_default_erp_postaction;
+}
+
+/* Create DASD request from block device request. Return pointer to new
+ * request on success, ERR_PTR otherwise. */
+static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
+					       struct dasd_block *block,
+					       struct request *req)
+{
+	struct dasd_ccw_req *cqr;
+	struct dasd_diag_req *dreq;
+	struct dasd_diag_bio *dbio;
+	struct req_iterator iter;
+	struct bio_vec bv;
+	char *dst;
+	unsigned int count, datasize;
+	sector_t recid, first_rec, last_rec;
+	unsigned int blksize, off;
+	unsigned char rw_cmd;
+
+	if (rq_data_dir(req) == READ)
+		rw_cmd = MDSK_READ_REQ;
+	else if (rq_data_dir(req) == WRITE)
+		rw_cmd = MDSK_WRITE_REQ;
+	else
+		return ERR_PTR(-EINVAL);
+	blksize = block->bp_block;
+	/* Calculate record id of first and last block. */
+	first_rec = blk_rq_pos(req) >> block->s2b_shift;
+	last_rec =
+		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+	/* Check struct bio and count the number of blocks for the request. */
+	count = 0;
+	rq_for_each_segment(bv, req, iter) {
+		if (bv.bv_len & (blksize - 1))
+			/* Fba can only do full blocks. */
+			return ERR_PTR(-EINVAL);
+		count += bv.bv_len >> (block->s2b_shift + 9);
+	}
+	/* Paranoia. */
+	if (count != last_rec - first_rec + 1)
+		return ERR_PTR(-EINVAL);
+	/* Build the request */
+	datasize = sizeof(struct dasd_diag_req) +
+		count*sizeof(struct dasd_diag_bio);
+	cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev,
+				   blk_mq_rq_to_pdu(req));
+	if (IS_ERR(cqr))
+		return cqr;
+
+	dreq = (struct dasd_diag_req *) cqr->data;
+	dreq->block_count = count;
+	dbio = dreq->bio;
+	recid = first_rec;
+	rq_for_each_segment(bv, req, iter) {
+		dst = page_address(bv.bv_page) + bv.bv_offset;
+		for (off = 0; off < bv.bv_len; off += blksize) {
+			memset(dbio, 0, sizeof (struct dasd_diag_bio));
+			dbio->type = rw_cmd;
+			dbio->block_number = recid + 1;
+			dbio->buffer = dst;
+			dbio++;
+			dst += blksize;
+			recid++;
+		}
+	}
+	cqr->retries = memdev->default_retries;
+	cqr->buildclk = get_tod_clock();
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
+		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->startdev = memdev;
+	cqr->memdev = memdev;
+	cqr->block = block;
+	cqr->expires = memdev->default_expires * HZ;
+	cqr->status = DASD_CQR_FILLED;
+	return cqr;
+}
+
+/* Release DASD request. Return non-zero if request was successful, zero
+ * otherwise. */
+static int
+dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+{
+	int status;
+
+	status = cqr->status == DASD_CQR_DONE;
+	dasd_sfree_request(cqr, cqr->memdev);
+	return status;
+}
+
+static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr)
+{
+	if (cqr->retries < 0)
+		cqr->status = DASD_CQR_FAILED;
+	else
+		cqr->status = DASD_CQR_FILLED;
+};
+
+/* Fill in IOCTL data for device. */
+static int
+dasd_diag_fill_info(struct dasd_device * device,
+		    struct dasd_information2_t * info)
+{
+	struct dasd_diag_private *private = device->private;
+
+	info->label_block = (unsigned int) private->pt_block;
+	info->FBA_layout = 1;
+	info->format = DASD_FORMAT_LDL;
+	info->characteristics_size = sizeof(private->rdc_data);
+	memcpy(info->characteristics, &private->rdc_data,
+	       sizeof(private->rdc_data));
+	info->confdata_size = 0;
+	return 0;
+}
+
+static void
+dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
+		     struct irb *stat)
+{
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+		    "dump sense not available for DIAG data");
+}
+
+static struct dasd_discipline dasd_diag_discipline = {
+	.owner = THIS_MODULE,
+	.name = "DIAG",
+	.ebcname = "DIAG",
+	.max_blocks = DIAG_MAX_BLOCKS,
+	.check_device = dasd_diag_check_device,
+	.verify_path = dasd_generic_verify_path,
+	.fill_geometry = dasd_diag_fill_geometry,
+	.start_IO = dasd_start_diag,
+	.term_IO = dasd_diag_term_IO,
+	.handle_terminated_request = dasd_diag_handle_terminated_request,
+	.erp_action = dasd_diag_erp_action,
+	.erp_postaction = dasd_diag_erp_postaction,
+	.build_cp = dasd_diag_build_cp,
+	.free_cp = dasd_diag_free_cp,
+	.dump_sense = dasd_diag_dump_sense,
+	.fill_info = dasd_diag_fill_info,
+};
+
+static int __init
+dasd_diag_init(void)
+{
+	if (!MACHINE_IS_VM) {
+		pr_info("Discipline %s cannot be used without z/VM\n",
+			dasd_diag_discipline.name);
+		return -ENODEV;
+	}
+	ASCEBC(dasd_diag_discipline.ebcname, 4);
+
+	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
+	register_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
+	dasd_diag_discipline_pointer = &dasd_diag_discipline;
+	return 0;
+}
+
+static void __exit
+dasd_diag_cleanup(void)
+{
+	unregister_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
+	irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
+	dasd_diag_discipline_pointer = NULL;
+}
+
+module_init(dasd_diag_init);
+module_exit(dasd_diag_cleanup);
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
new file mode 100644
index 0000000..405b6fe
--- /dev/null
+++ b/drivers/s390/block/dasd_diag.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Based on.......: linux/drivers/s390/block/mdisk.h
+ * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2000
+ *
+ */
+
+#define MDSK_WRITE_REQ 0x01
+#define MDSK_READ_REQ  0x02
+
+#define INIT_BIO	0x00
+#define RW_BIO		0x01
+#define TERM_BIO	0x02
+
+#define DEV_CLASS_FBA	0x01
+#define DEV_CLASS_ECKD	0x04
+
+#define DASD_DIAG_CODE_31BIT		0x03
+#define DASD_DIAG_CODE_64BIT		0x07
+
+#define DASD_DIAG_RWFLAG_ASYNC		0x02
+#define DASD_DIAG_RWFLAG_NOCACHE	0x01
+
+#define DASD_DIAG_FLAGA_FORMAT_64BIT	0x80
+
+struct dasd_diag_characteristics {
+	u16 dev_nr;
+	u16 rdc_len;
+	u8 vdev_class;
+	u8 vdev_type;
+	u8 vdev_status;
+	u8 vdev_flags;
+	u8 rdev_class;
+	u8 rdev_type;
+	u8 rdev_model;
+	u8 rdev_features;
+} __attribute__ ((packed, aligned(4)));
+
+#define DASD_DIAG_FLAGA_DEFAULT		DASD_DIAG_FLAGA_FORMAT_64BIT
+
+typedef u64 blocknum_t;
+typedef s64 sblocknum_t;
+
+struct dasd_diag_bio {
+	u8 type;
+	u8 status;
+	u8 spare1[2];
+	u32 alet;
+	blocknum_t block_number;
+	void *buffer;
+} __attribute__ ((packed, aligned(8)));
+
+struct dasd_diag_init_io {
+	u16 dev_nr;
+	u8 flaga;
+	u8 spare1[21];
+	u32 block_size;
+	u8 spare2[4];
+	blocknum_t offset;
+	sblocknum_t start_block;
+	blocknum_t end_block;
+	u8  spare3[8];
+} __attribute__ ((packed, aligned(8)));
+
+struct dasd_diag_rw_io {
+	u16 dev_nr;
+	u8  flaga;
+	u8  spare1[21];
+	u8  key;
+	u8  flags;
+	u8  spare2[2];
+	u32 block_count;
+	u32 alet;
+	u8  spare3[4];
+	u64 interrupt_params;
+	struct dasd_diag_bio *bio_list;
+	u8  spare4[8];
+} __attribute__ ((packed, aligned(8)));
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
new file mode 100644
index 0000000..4e7b55a
--- /dev/null
+++ b/drivers/s390/block/dasd_eckd.c
@@ -0,0 +1,5846 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ *		    Carsten Otte <Cotte@de.ibm.com>
+ *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
+ * Author.........: Nigel Hislop <hislop_nigel@emc.com>
+ */
+
+#define KMSG_COMPONENT "dasd-eckd"
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h>	/* HDIO_GETGEO			    */
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/compat.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+
+#include <asm/css_chars.h>
+#include <asm/debug.h>
+#include <asm/idals.h>
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+#include <asm/itcw.h>
+#include <asm/schid.h>
+#include <asm/chpid.h>
+
+#include "dasd_int.h"
+#include "dasd_eckd.h"
+
+#ifdef PRINTK_HEADER
+#undef PRINTK_HEADER
+#endif				/* PRINTK_HEADER */
+#define PRINTK_HEADER "dasd(eckd):"
+
+#define ECKD_C0(i) (i->home_bytes)
+#define ECKD_F(i) (i->formula)
+#define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
+		    (i->factors.f_0x02.f1))
+#define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
+		    (i->factors.f_0x02.f2))
+#define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
+		    (i->factors.f_0x02.f3))
+#define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
+#define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
+#define ECKD_F6(i) (i->factor6)
+#define ECKD_F7(i) (i->factor7)
+#define ECKD_F8(i) (i->factor8)
+
+/*
+ * raw track access always map to 64k in memory
+ * so it maps to 16 blocks of 4k per track
+ */
+#define DASD_RAW_BLOCK_PER_TRACK 16
+#define DASD_RAW_BLOCKSIZE 4096
+/* 64k are 128 x 512 byte sectors  */
+#define DASD_RAW_SECTORS_PER_TRACK 128
+
+MODULE_LICENSE("GPL");
+
+static struct dasd_discipline dasd_eckd_discipline;
+
+/* The ccw bus type uses this table to find devices that it sends to
+ * dasd_eckd_probe */
+static struct ccw_device_id dasd_eckd_ids[] = {
+	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
+	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
+	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
+	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
+	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
+	{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
+	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
+	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
+	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
+	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
+	{ /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
+
+static struct ccw_driver dasd_eckd_driver; /* see below */
+
+static void *rawpadpage;
+
+#define INIT_CQR_OK 0
+#define INIT_CQR_UNFORMATTED 1
+#define INIT_CQR_ERROR 2
+
+/* emergency request for reserve/release */
+static struct {
+	struct dasd_ccw_req cqr;
+	struct ccw1 ccw;
+	char data[32];
+} *dasd_reserve_req;
+static DEFINE_MUTEX(dasd_reserve_mutex);
+
+/* definitions for the path verification worker */
+struct path_verification_work_data {
+	struct work_struct worker;
+	struct dasd_device *device;
+	struct dasd_ccw_req cqr;
+	struct ccw1 ccw;
+	__u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
+	int isglobal;
+	__u8 tbvpm;
+};
+static struct path_verification_work_data *path_verification_worker;
+static DEFINE_MUTEX(dasd_path_verification_mutex);
+
+struct check_attention_work_data {
+	struct work_struct worker;
+	struct dasd_device *device;
+	__u8 lpum;
+};
+
+static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
+			struct dasd_device *, struct dasd_device *,
+			unsigned int, int, unsigned int, unsigned int,
+			unsigned int, unsigned int);
+
+/* initial attempt at a probe function. this can be simplified once
+ * the other detection code is gone */
+static int
+dasd_eckd_probe (struct ccw_device *cdev)
+{
+	int ret;
+
+	/* set ECKD specific ccw-device options */
+	ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
+				     CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
+	if (ret) {
+		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
+				"dasd_eckd_probe: could not set "
+				"ccw-device options");
+		return ret;
+	}
+	ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
+	return ret;
+}
+
+static int
+dasd_eckd_set_online(struct ccw_device *cdev)
+{
+	return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
+}
+
+static const int sizes_trk0[] = { 28, 148, 84 };
+#define LABEL_SIZE 140
+
+/* head and record addresses of count_area read in analysis ccw */
+static const int count_area_head[] = { 0, 0, 0, 0, 2 };
+static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
+
+static inline unsigned int
+round_up_multiple(unsigned int no, unsigned int mult)
+{
+	int rem = no % mult;
+	return (rem ? no - rem + mult : no);
+}
+
+static inline unsigned int
+ceil_quot(unsigned int d1, unsigned int d2)
+{
+	return (d1 + (d2 - 1)) / d2;
+}
+
+static unsigned int
+recs_per_track(struct dasd_eckd_characteristics * rdc,
+	       unsigned int kl, unsigned int dl)
+{
+	int dn, kn;
+
+	switch (rdc->dev_type) {
+	case 0x3380:
+		if (kl)
+			return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
+				       ceil_quot(dl + 12, 32));
+		else
+			return 1499 / (15 + ceil_quot(dl + 12, 32));
+	case 0x3390:
+		dn = ceil_quot(dl + 6, 232) + 1;
+		if (kl) {
+			kn = ceil_quot(kl + 6, 232) + 1;
+			return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
+				       9 + ceil_quot(dl + 6 * dn, 34));
+		} else
+			return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
+	case 0x9345:
+		dn = ceil_quot(dl + 6, 232) + 1;
+		if (kl) {
+			kn = ceil_quot(kl + 6, 232) + 1;
+			return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
+				       ceil_quot(dl + 6 * dn, 34));
+		} else
+			return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
+	}
+	return 0;
+}
+
+static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
+{
+	geo->cyl = (__u16) cyl;
+	geo->head = cyl >> 16;
+	geo->head <<= 4;
+	geo->head |= head;
+}
+
+static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
+		     struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+	int rc;
+
+	rc = get_phys_clock(&data->ep_sys_time);
+	/*
+	 * Ignore return code if XRC is not supported or
+	 * sync clock is switched off
+	 */
+	if ((rc && !private->rdc_data.facilities.XRC_supported) ||
+	    rc == -EOPNOTSUPP || rc == -EACCES)
+		return 0;
+
+	/* switch on System Time Stamp - needed for XRC Support */
+	data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid'   */
+	data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
+
+	if (ccw) {
+		ccw->count = sizeof(struct DE_eckd_data);
+		ccw->flags |= CCW_FLAG_SLI;
+	}
+
+	return rc;
+}
+
+static int
+define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
+	      unsigned int totrk, int cmd, struct dasd_device *device,
+	      int blksize)
+{
+	struct dasd_eckd_private *private = device->private;
+	u16 heads, beghead, endhead;
+	u32 begcyl, endcyl;
+	int rc = 0;
+
+	if (ccw) {
+		ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
+		ccw->flags = 0;
+		ccw->count = 16;
+		ccw->cda = (__u32)__pa(data);
+	}
+
+	memset(data, 0, sizeof(struct DE_eckd_data));
+	switch (cmd) {
+	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
+	case DASD_ECKD_CCW_READ_RECORD_ZERO:
+	case DASD_ECKD_CCW_READ:
+	case DASD_ECKD_CCW_READ_MT:
+	case DASD_ECKD_CCW_READ_CKD:
+	case DASD_ECKD_CCW_READ_CKD_MT:
+	case DASD_ECKD_CCW_READ_KD:
+	case DASD_ECKD_CCW_READ_KD_MT:
+		data->mask.perm = 0x1;
+		data->attributes.operation = private->attrib.operation;
+		break;
+	case DASD_ECKD_CCW_READ_COUNT:
+		data->mask.perm = 0x1;
+		data->attributes.operation = DASD_BYPASS_CACHE;
+		break;
+	case DASD_ECKD_CCW_READ_TRACK:
+	case DASD_ECKD_CCW_READ_TRACK_DATA:
+		data->mask.perm = 0x1;
+		data->attributes.operation = private->attrib.operation;
+		data->blk_size = 0;
+		break;
+	case DASD_ECKD_CCW_WRITE:
+	case DASD_ECKD_CCW_WRITE_MT:
+	case DASD_ECKD_CCW_WRITE_KD:
+	case DASD_ECKD_CCW_WRITE_KD_MT:
+		data->mask.perm = 0x02;
+		data->attributes.operation = private->attrib.operation;
+		rc = set_timestamp(ccw, data, device);
+		break;
+	case DASD_ECKD_CCW_WRITE_CKD:
+	case DASD_ECKD_CCW_WRITE_CKD_MT:
+		data->attributes.operation = DASD_BYPASS_CACHE;
+		rc = set_timestamp(ccw, data, device);
+		break;
+	case DASD_ECKD_CCW_ERASE:
+	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
+	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
+		data->mask.perm = 0x3;
+		data->mask.auth = 0x1;
+		data->attributes.operation = DASD_BYPASS_CACHE;
+		rc = set_timestamp(ccw, data, device);
+		break;
+	case DASD_ECKD_CCW_WRITE_FULL_TRACK:
+		data->mask.perm = 0x03;
+		data->attributes.operation = private->attrib.operation;
+		data->blk_size = 0;
+		break;
+	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
+		data->mask.perm = 0x02;
+		data->attributes.operation = private->attrib.operation;
+		data->blk_size = blksize;
+		rc = set_timestamp(ccw, data, device);
+		break;
+	default:
+		dev_err(&device->cdev->dev,
+			"0x%x is not a known command\n", cmd);
+		break;
+	}
+
+	data->attributes.mode = 0x3;	/* ECKD */
+
+	if ((private->rdc_data.cu_type == 0x2105 ||
+	     private->rdc_data.cu_type == 0x2107 ||
+	     private->rdc_data.cu_type == 0x1750)
+	    && !(private->uses_cdl && trk < 2))
+		data->ga_extended |= 0x40; /* Regular Data Format Mode */
+
+	heads = private->rdc_data.trk_per_cyl;
+	begcyl = trk / heads;
+	beghead = trk % heads;
+	endcyl = totrk / heads;
+	endhead = totrk % heads;
+
+	/* check for sequential prestage - enhance cylinder range */
+	if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
+	    data->attributes.operation == DASD_SEQ_ACCESS) {
+
+		if (endcyl + private->attrib.nr_cyl < private->real_cyl)
+			endcyl += private->attrib.nr_cyl;
+		else
+			endcyl = (private->real_cyl - 1);
+	}
+
+	set_ch_t(&data->beg_ext, begcyl, beghead);
+	set_ch_t(&data->end_ext, endcyl, endhead);
+	return rc;
+}
+
+
+static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
+			      unsigned int trk, unsigned int rec_on_trk,
+			      int count, int cmd, struct dasd_device *device,
+			      unsigned int reclen, unsigned int tlf)
+{
+	struct dasd_eckd_private *private = device->private;
+	int sector;
+	int dn, d;
+
+	if (ccw) {
+		ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
+		ccw->flags = 0;
+		if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
+			ccw->count = 22;
+		else
+			ccw->count = 20;
+		ccw->cda = (__u32)__pa(data);
+	}
+
+	memset(data, 0, sizeof(*data));
+	sector = 0;
+	if (rec_on_trk) {
+		switch (private->rdc_data.dev_type) {
+		case 0x3390:
+			dn = ceil_quot(reclen + 6, 232);
+			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
+			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
+			break;
+		case 0x3380:
+			d = 7 + ceil_quot(reclen + 12, 32);
+			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
+			break;
+		}
+	}
+	data->sector = sector;
+	/* note: meaning of count depends on the operation
+	 *	 for record based I/O it's the number of records, but for
+	 *	 track based I/O it's the number of tracks
+	 */
+	data->count = count;
+	switch (cmd) {
+	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
+		data->operation.orientation = 0x3;
+		data->operation.operation = 0x03;
+		break;
+	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
+		data->operation.orientation = 0x3;
+		data->operation.operation = 0x16;
+		break;
+	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
+		data->operation.orientation = 0x1;
+		data->operation.operation = 0x03;
+		data->count++;
+		break;
+	case DASD_ECKD_CCW_READ_RECORD_ZERO:
+		data->operation.orientation = 0x3;
+		data->operation.operation = 0x16;
+		data->count++;
+		break;
+	case DASD_ECKD_CCW_WRITE:
+	case DASD_ECKD_CCW_WRITE_MT:
+	case DASD_ECKD_CCW_WRITE_KD:
+	case DASD_ECKD_CCW_WRITE_KD_MT:
+		data->auxiliary.length_valid = 0x1;
+		data->length = reclen;
+		data->operation.operation = 0x01;
+		break;
+	case DASD_ECKD_CCW_WRITE_CKD:
+	case DASD_ECKD_CCW_WRITE_CKD_MT:
+		data->auxiliary.length_valid = 0x1;
+		data->length = reclen;
+		data->operation.operation = 0x03;
+		break;
+	case DASD_ECKD_CCW_WRITE_FULL_TRACK:
+		data->operation.orientation = 0x0;
+		data->operation.operation = 0x3F;
+		data->extended_operation = 0x11;
+		data->length = 0;
+		data->extended_parameter_length = 0x02;
+		if (data->count > 8) {
+			data->extended_parameter[0] = 0xFF;
+			data->extended_parameter[1] = 0xFF;
+			data->extended_parameter[1] <<= (16 - count);
+		} else {
+			data->extended_parameter[0] = 0xFF;
+			data->extended_parameter[0] <<= (8 - count);
+			data->extended_parameter[1] = 0x00;
+		}
+		data->sector = 0xFF;
+		break;
+	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
+		data->auxiliary.length_valid = 0x1;
+		data->length = reclen;	/* not tlf, as one might think */
+		data->operation.operation = 0x3F;
+		data->extended_operation = 0x23;
+		break;
+	case DASD_ECKD_CCW_READ:
+	case DASD_ECKD_CCW_READ_MT:
+	case DASD_ECKD_CCW_READ_KD:
+	case DASD_ECKD_CCW_READ_KD_MT:
+		data->auxiliary.length_valid = 0x1;
+		data->length = reclen;
+		data->operation.operation = 0x06;
+		break;
+	case DASD_ECKD_CCW_READ_CKD:
+	case DASD_ECKD_CCW_READ_CKD_MT:
+		data->auxiliary.length_valid = 0x1;
+		data->length = reclen;
+		data->operation.operation = 0x16;
+		break;
+	case DASD_ECKD_CCW_READ_COUNT:
+		data->operation.operation = 0x06;
+		break;
+	case DASD_ECKD_CCW_READ_TRACK:
+		data->operation.orientation = 0x1;
+		data->operation.operation = 0x0C;
+		data->extended_parameter_length = 0;
+		data->sector = 0xFF;
+		break;
+	case DASD_ECKD_CCW_READ_TRACK_DATA:
+		data->auxiliary.length_valid = 0x1;
+		data->length = tlf;
+		data->operation.operation = 0x0C;
+		break;
+	case DASD_ECKD_CCW_ERASE:
+		data->length = reclen;
+		data->auxiliary.length_valid = 0x1;
+		data->operation.operation = 0x0b;
+		break;
+	default:
+		DBF_DEV_EVENT(DBF_ERR, device,
+			    "fill LRE unknown opcode 0x%x", cmd);
+		BUG();
+	}
+	set_ch_t(&data->seek_addr,
+		 trk / private->rdc_data.trk_per_cyl,
+		 trk % private->rdc_data.trk_per_cyl);
+	data->search_arg.cyl = data->seek_addr.cyl;
+	data->search_arg.head = data->seek_addr.head;
+	data->search_arg.record = rec_on_trk;
+}
+
+static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
+		      unsigned int trk, unsigned int totrk, int cmd,
+		      struct dasd_device *basedev, struct dasd_device *startdev,
+		      unsigned int format, unsigned int rec_on_trk, int count,
+		      unsigned int blksize, unsigned int tlf)
+{
+	struct dasd_eckd_private *basepriv, *startpriv;
+	struct LRE_eckd_data *lredata;
+	struct DE_eckd_data *dedata;
+	int rc = 0;
+
+	basepriv = basedev->private;
+	startpriv = startdev->private;
+	dedata = &pfxdata->define_extent;
+	lredata = &pfxdata->locate_record;
+
+	ccw->cmd_code = DASD_ECKD_CCW_PFX;
+	ccw->flags = 0;
+	if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
+		ccw->count = sizeof(*pfxdata) + 2;
+		ccw->cda = (__u32) __pa(pfxdata);
+		memset(pfxdata, 0, sizeof(*pfxdata) + 2);
+	} else {
+		ccw->count = sizeof(*pfxdata);
+		ccw->cda = (__u32) __pa(pfxdata);
+		memset(pfxdata, 0, sizeof(*pfxdata));
+	}
+
+	/* prefix data */
+	if (format > 1) {
+		DBF_DEV_EVENT(DBF_ERR, basedev,
+			      "PFX LRE unknown format 0x%x", format);
+		BUG();
+		return -EINVAL;
+	}
+	pfxdata->format = format;
+	pfxdata->base_address = basepriv->ned->unit_addr;
+	pfxdata->base_lss = basepriv->ned->ID;
+	pfxdata->validity.define_extent = 1;
+
+	/* private uid is kept up to date, conf_data may be outdated */
+	if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
+		pfxdata->validity.verify_base = 1;
+
+	if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
+		pfxdata->validity.verify_base = 1;
+		pfxdata->validity.hyper_pav = 1;
+	}
+
+	rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
+
+	/*
+	 * For some commands the System Time Stamp is set in the define extent
+	 * data when XRC is supported. The validity of the time stamp must be
+	 * reflected in the prefix data as well.
+	 */
+	if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
+		pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid'   */
+
+	if (format == 1) {
+		locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
+				  basedev, blksize, tlf);
+	}
+
+	return rc;
+}
+
+static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
+		  unsigned int trk, unsigned int totrk, int cmd,
+		  struct dasd_device *basedev, struct dasd_device *startdev)
+{
+	return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
+			  0, 0, 0, 0, 0);
+}
+
+static void
+locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
+	      unsigned int rec_on_trk, int no_rec, int cmd,
+	      struct dasd_device * device, int reclen)
+{
+	struct dasd_eckd_private *private = device->private;
+	int sector;
+	int dn, d;
+
+	DBF_DEV_EVENT(DBF_INFO, device,
+		  "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
+		  trk, rec_on_trk, no_rec, cmd, reclen);
+
+	ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
+	ccw->flags = 0;
+	ccw->count = 16;
+	ccw->cda = (__u32) __pa(data);
+
+	memset(data, 0, sizeof(struct LO_eckd_data));
+	sector = 0;
+	if (rec_on_trk) {
+		switch (private->rdc_data.dev_type) {
+		case 0x3390:
+			dn = ceil_quot(reclen + 6, 232);
+			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
+			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
+			break;
+		case 0x3380:
+			d = 7 + ceil_quot(reclen + 12, 32);
+			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
+			break;
+		}
+	}
+	data->sector = sector;
+	data->count = no_rec;
+	switch (cmd) {
+	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
+		data->operation.orientation = 0x3;
+		data->operation.operation = 0x03;
+		break;
+	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
+		data->operation.orientation = 0x3;
+		data->operation.operation = 0x16;
+		break;
+	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
+		data->operation.orientation = 0x1;
+		data->operation.operation = 0x03;
+		data->count++;
+		break;
+	case DASD_ECKD_CCW_READ_RECORD_ZERO:
+		data->operation.orientation = 0x3;
+		data->operation.operation = 0x16;
+		data->count++;
+		break;
+	case DASD_ECKD_CCW_WRITE:
+	case DASD_ECKD_CCW_WRITE_MT:
+	case DASD_ECKD_CCW_WRITE_KD:
+	case DASD_ECKD_CCW_WRITE_KD_MT:
+		data->auxiliary.last_bytes_used = 0x1;
+		data->length = reclen;
+		data->operation.operation = 0x01;
+		break;
+	case DASD_ECKD_CCW_WRITE_CKD:
+	case DASD_ECKD_CCW_WRITE_CKD_MT:
+		data->auxiliary.last_bytes_used = 0x1;
+		data->length = reclen;
+		data->operation.operation = 0x03;
+		break;
+	case DASD_ECKD_CCW_READ:
+	case DASD_ECKD_CCW_READ_MT:
+	case DASD_ECKD_CCW_READ_KD:
+	case DASD_ECKD_CCW_READ_KD_MT:
+		data->auxiliary.last_bytes_used = 0x1;
+		data->length = reclen;
+		data->operation.operation = 0x06;
+		break;
+	case DASD_ECKD_CCW_READ_CKD:
+	case DASD_ECKD_CCW_READ_CKD_MT:
+		data->auxiliary.last_bytes_used = 0x1;
+		data->length = reclen;
+		data->operation.operation = 0x16;
+		break;
+	case DASD_ECKD_CCW_READ_COUNT:
+		data->operation.operation = 0x06;
+		break;
+	case DASD_ECKD_CCW_ERASE:
+		data->length = reclen;
+		data->auxiliary.last_bytes_used = 0x1;
+		data->operation.operation = 0x0b;
+		break;
+	default:
+		DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
+			      "opcode 0x%x", cmd);
+	}
+	set_ch_t(&data->seek_addr,
+		 trk / private->rdc_data.trk_per_cyl,
+		 trk % private->rdc_data.trk_per_cyl);
+	data->search_arg.cyl = data->seek_addr.cyl;
+	data->search_arg.head = data->seek_addr.head;
+	data->search_arg.record = rec_on_trk;
+}
+
+/*
+ * Returns 1 if the block is one of the special blocks that needs
+ * to get read/written with the KD variant of the command.
+ * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
+ * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
+ * Luckily the KD variants differ only by one bit (0x08) from the
+ * normal variant. So don't wonder about code like:
+ * if (dasd_eckd_cdl_special(blk_per_trk, recid))
+ *         ccw->cmd_code |= 0x8;
+ */
+static inline int
+dasd_eckd_cdl_special(int blk_per_trk, int recid)
+{
+	if (recid < 3)
+		return 1;
+	if (recid < blk_per_trk)
+		return 0;
+	if (recid < 2 * blk_per_trk)
+		return 1;
+	return 0;
+}
+
+/*
+ * Returns the record size for the special blocks of the cdl format.
+ * Only returns something useful if dasd_eckd_cdl_special is true
+ * for the recid.
+ */
+static inline int
+dasd_eckd_cdl_reclen(int recid)
+{
+	if (recid < 3)
+		return sizes_trk0[recid];
+	return LABEL_SIZE;
+}
+/* create unique id from private structure. */
+static void create_uid(struct dasd_eckd_private *private)
+{
+	int count;
+	struct dasd_uid *uid;
+
+	uid = &private->uid;
+	memset(uid, 0, sizeof(struct dasd_uid));
+	memcpy(uid->vendor, private->ned->HDA_manufacturer,
+	       sizeof(uid->vendor) - 1);
+	EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
+	memcpy(uid->serial, private->ned->HDA_location,
+	       sizeof(uid->serial) - 1);
+	EBCASC(uid->serial, sizeof(uid->serial) - 1);
+	uid->ssid = private->gneq->subsystemID;
+	uid->real_unit_addr = private->ned->unit_addr;
+	if (private->sneq) {
+		uid->type = private->sneq->sua_flags;
+		if (uid->type == UA_BASE_PAV_ALIAS)
+			uid->base_unit_addr = private->sneq->base_unit_addr;
+	} else {
+		uid->type = UA_BASE_DEVICE;
+	}
+	if (private->vdsneq) {
+		for (count = 0; count < 16; count++) {
+			sprintf(uid->vduit+2*count, "%02x",
+				private->vdsneq->uit[count]);
+		}
+	}
+}
+
+/*
+ * Generate device unique id that specifies the physical device.
+ */
+static int dasd_eckd_generate_uid(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+	unsigned long flags;
+
+	if (!private)
+		return -ENODEV;
+	if (!private->ned || !private->gneq)
+		return -ENODEV;
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	create_uid(private);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	return 0;
+}
+
+static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
+{
+	struct dasd_eckd_private *private = device->private;
+	unsigned long flags;
+
+	if (private) {
+		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+		*uid = private->uid;
+		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+/*
+ * compare device UID with data of a given dasd_eckd_private structure
+ * return 0 for match
+ */
+static int dasd_eckd_compare_path_uid(struct dasd_device *device,
+				      struct dasd_eckd_private *private)
+{
+	struct dasd_uid device_uid;
+
+	create_uid(private);
+	dasd_eckd_get_uid(device, &device_uid);
+
+	return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
+}
+
+static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
+				   struct dasd_ccw_req *cqr,
+				   __u8 *rcd_buffer,
+				   __u8 lpm)
+{
+	struct ccw1 *ccw;
+	/*
+	 * buffer has to start with EBCDIC "V1.0" to show
+	 * support for virtual device SNEQ
+	 */
+	rcd_buffer[0] = 0xE5;
+	rcd_buffer[1] = 0xF1;
+	rcd_buffer[2] = 0x4B;
+	rcd_buffer[3] = 0xF0;
+
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_RCD;
+	ccw->flags = 0;
+	ccw->cda = (__u32)(addr_t)rcd_buffer;
+	ccw->count = DASD_ECKD_RCD_DATA_SIZE;
+	cqr->magic = DASD_ECKD_MAGIC;
+
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->block = NULL;
+	cqr->expires = 10*HZ;
+	cqr->lpm = lpm;
+	cqr->retries = 256;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
+}
+
+/*
+ * Wakeup helper for read_conf
+ * if the cqr is not done and needs some error recovery
+ * the buffer has to be re-initialized with the EBCDIC "V1.0"
+ * to show support for virtual device SNEQ
+ */
+static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
+{
+	struct ccw1 *ccw;
+	__u8 *rcd_buffer;
+
+	if (cqr->status !=  DASD_CQR_DONE) {
+		ccw = cqr->cpaddr;
+		rcd_buffer = (__u8 *)((addr_t) ccw->cda);
+		memset(rcd_buffer, 0, sizeof(*rcd_buffer));
+
+		rcd_buffer[0] = 0xE5;
+		rcd_buffer[1] = 0xF1;
+		rcd_buffer[2] = 0x4B;
+		rcd_buffer[3] = 0xF0;
+	}
+	dasd_wakeup_cb(cqr, data);
+}
+
+static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
+					   struct dasd_ccw_req *cqr,
+					   __u8 *rcd_buffer,
+					   __u8 lpm)
+{
+	struct ciw *ciw;
+	int rc;
+	/*
+	 * sanity check: scan for RCD command in extended SenseID data
+	 * some devices do not support RCD
+	 */
+	ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
+	if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
+		return -EOPNOTSUPP;
+
+	dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
+	cqr->retries = 5;
+	cqr->callback = read_conf_cb;
+	rc = dasd_sleep_on_immediatly(cqr);
+	return rc;
+}
+
+static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
+				   void **rcd_buffer,
+				   int *rcd_buffer_size, __u8 lpm)
+{
+	struct ciw *ciw;
+	char *rcd_buf = NULL;
+	int ret;
+	struct dasd_ccw_req *cqr;
+
+	/*
+	 * sanity check: scan for RCD command in extended SenseID data
+	 * some devices do not support RCD
+	 */
+	ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
+	if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
+		ret = -EOPNOTSUPP;
+		goto out_error;
+	}
+	rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
+	if (!rcd_buf) {
+		ret = -ENOMEM;
+		goto out_error;
+	}
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
+				   0, /* use rcd_buf as data ara */
+				   device, NULL);
+	if (IS_ERR(cqr)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "Could not allocate RCD request");
+		ret = -ENOMEM;
+		goto out_error;
+	}
+	dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
+	cqr->callback = read_conf_cb;
+	ret = dasd_sleep_on(cqr);
+	/*
+	 * on success we update the user input parms
+	 */
+	dasd_sfree_request(cqr, cqr->memdev);
+	if (ret)
+		goto out_error;
+
+	*rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
+	*rcd_buffer = rcd_buf;
+	return 0;
+out_error:
+	kfree(rcd_buf);
+	*rcd_buffer = NULL;
+	*rcd_buffer_size = 0;
+	return ret;
+}
+
+static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
+{
+
+	struct dasd_sneq *sneq;
+	int i, count;
+
+	private->ned = NULL;
+	private->sneq = NULL;
+	private->vdsneq = NULL;
+	private->gneq = NULL;
+	count = private->conf_len / sizeof(struct dasd_sneq);
+	sneq = (struct dasd_sneq *)private->conf_data;
+	for (i = 0; i < count; ++i) {
+		if (sneq->flags.identifier == 1 && sneq->format == 1)
+			private->sneq = sneq;
+		else if (sneq->flags.identifier == 1 && sneq->format == 4)
+			private->vdsneq = (struct vd_sneq *)sneq;
+		else if (sneq->flags.identifier == 2)
+			private->gneq = (struct dasd_gneq *)sneq;
+		else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
+			private->ned = (struct dasd_ned *)sneq;
+		sneq++;
+	}
+	if (!private->ned || !private->gneq) {
+		private->ned = NULL;
+		private->sneq = NULL;
+		private->vdsneq = NULL;
+		private->gneq = NULL;
+		return -EINVAL;
+	}
+	return 0;
+
+};
+
+static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
+{
+	struct dasd_gneq *gneq;
+	int i, count, found;
+
+	count = conf_len / sizeof(*gneq);
+	gneq = (struct dasd_gneq *)conf_data;
+	found = 0;
+	for (i = 0; i < count; ++i) {
+		if (gneq->flags.identifier == 2) {
+			found = 1;
+			break;
+		}
+		gneq++;
+	}
+	if (found)
+		return ((char *)gneq)[18] & 0x07;
+	else
+		return 0;
+}
+
+static void dasd_eckd_clear_conf_data(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+	int i;
+
+	private->conf_data = NULL;
+	private->conf_len = 0;
+	for (i = 0; i < 8; i++) {
+		kfree(device->path[i].conf_data);
+		device->path[i].conf_data = NULL;
+		device->path[i].cssid = 0;
+		device->path[i].ssid = 0;
+		device->path[i].chpid = 0;
+	}
+}
+
+
+static int dasd_eckd_read_conf(struct dasd_device *device)
+{
+	void *conf_data;
+	int conf_len, conf_data_saved;
+	int rc, path_err, pos;
+	__u8 lpm, opm;
+	struct dasd_eckd_private *private, path_private;
+	struct dasd_uid *uid;
+	char print_path_uid[60], print_device_uid[60];
+	struct channel_path_desc_fmt0 *chp_desc;
+	struct subchannel_id sch_id;
+
+	private = device->private;
+	opm = ccw_device_get_path_mask(device->cdev);
+	ccw_device_get_schid(device->cdev, &sch_id);
+	conf_data_saved = 0;
+	path_err = 0;
+	/* get configuration data per operational path */
+	for (lpm = 0x80; lpm; lpm>>= 1) {
+		if (!(lpm & opm))
+			continue;
+		rc = dasd_eckd_read_conf_lpm(device, &conf_data,
+					     &conf_len, lpm);
+		if (rc && rc != -EOPNOTSUPP) {	/* -EOPNOTSUPP is ok */
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+					"Read configuration data returned "
+					"error %d", rc);
+			return rc;
+		}
+		if (conf_data == NULL) {
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+					"No configuration data "
+					"retrieved");
+			/* no further analysis possible */
+			dasd_path_add_opm(device, opm);
+			continue;	/* no error */
+		}
+		/* save first valid configuration data */
+		if (!conf_data_saved) {
+			/* initially clear previously stored conf_data */
+			dasd_eckd_clear_conf_data(device);
+			private->conf_data = conf_data;
+			private->conf_len = conf_len;
+			if (dasd_eckd_identify_conf_parts(private)) {
+				private->conf_data = NULL;
+				private->conf_len = 0;
+				kfree(conf_data);
+				continue;
+			}
+			pos = pathmask_to_pos(lpm);
+			/* store per path conf_data */
+			device->path[pos].conf_data = conf_data;
+			device->path[pos].cssid = sch_id.cssid;
+			device->path[pos].ssid = sch_id.ssid;
+			chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
+			if (chp_desc)
+				device->path[pos].chpid = chp_desc->chpid;
+			kfree(chp_desc);
+			/*
+			 * build device UID that other path data
+			 * can be compared to it
+			 */
+			dasd_eckd_generate_uid(device);
+			conf_data_saved++;
+		} else {
+			path_private.conf_data = conf_data;
+			path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
+			if (dasd_eckd_identify_conf_parts(
+				    &path_private)) {
+				path_private.conf_data = NULL;
+				path_private.conf_len = 0;
+				kfree(conf_data);
+				continue;
+			}
+			if (dasd_eckd_compare_path_uid(
+				    device, &path_private)) {
+				uid = &path_private.uid;
+				if (strlen(uid->vduit) > 0)
+					snprintf(print_path_uid,
+						 sizeof(print_path_uid),
+						 "%s.%s.%04x.%02x.%s",
+						 uid->vendor, uid->serial,
+						 uid->ssid, uid->real_unit_addr,
+						 uid->vduit);
+				else
+					snprintf(print_path_uid,
+						 sizeof(print_path_uid),
+						 "%s.%s.%04x.%02x",
+						 uid->vendor, uid->serial,
+						 uid->ssid,
+						 uid->real_unit_addr);
+				uid = &private->uid;
+				if (strlen(uid->vduit) > 0)
+					snprintf(print_device_uid,
+						 sizeof(print_device_uid),
+						 "%s.%s.%04x.%02x.%s",
+						 uid->vendor, uid->serial,
+						 uid->ssid, uid->real_unit_addr,
+						 uid->vduit);
+				else
+					snprintf(print_device_uid,
+						 sizeof(print_device_uid),
+						 "%s.%s.%04x.%02x",
+						 uid->vendor, uid->serial,
+						 uid->ssid,
+						 uid->real_unit_addr);
+				dev_err(&device->cdev->dev,
+					"Not all channel paths lead to "
+					"the same device, path %02X leads to "
+					"device %s instead of %s\n", lpm,
+					print_path_uid, print_device_uid);
+				path_err = -EINVAL;
+				dasd_path_add_cablepm(device, lpm);
+				continue;
+			}
+			pos = pathmask_to_pos(lpm);
+			/* store per path conf_data */
+			device->path[pos].conf_data = conf_data;
+			device->path[pos].cssid = sch_id.cssid;
+			device->path[pos].ssid = sch_id.ssid;
+			chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
+			if (chp_desc)
+				device->path[pos].chpid = chp_desc->chpid;
+			kfree(chp_desc);
+			path_private.conf_data = NULL;
+			path_private.conf_len = 0;
+		}
+		switch (dasd_eckd_path_access(conf_data, conf_len)) {
+		case 0x02:
+			dasd_path_add_nppm(device, lpm);
+			break;
+		case 0x03:
+			dasd_path_add_ppm(device, lpm);
+			break;
+		}
+		if (!dasd_path_get_opm(device)) {
+			dasd_path_set_opm(device, lpm);
+			dasd_generic_path_operational(device);
+		} else {
+			dasd_path_add_opm(device, lpm);
+		}
+	}
+
+	return path_err;
+}
+
+static u32 get_fcx_max_data(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+	int fcx_in_css, fcx_in_gneq, fcx_in_features;
+	int tpm, mdc;
+
+	if (dasd_nofcx)
+		return 0;
+	/* is transport mode supported? */
+	fcx_in_css = css_general_characteristics.fcx;
+	fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
+	fcx_in_features = private->features.feature[40] & 0x80;
+	tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
+
+	if (!tpm)
+		return 0;
+
+	mdc = ccw_device_get_mdc(device->cdev, 0);
+	if (mdc < 0) {
+		dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
+		return 0;
+	} else {
+		return (u32)mdc * FCX_MAX_DATA_FACTOR;
+	}
+}
+
+static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
+{
+	struct dasd_eckd_private *private = device->private;
+	int mdc;
+	u32 fcx_max_data;
+
+	if (private->fcx_max_data) {
+		mdc = ccw_device_get_mdc(device->cdev, lpm);
+		if ((mdc < 0)) {
+			dev_warn(&device->cdev->dev,
+				 "Detecting the maximum data size for zHPF "
+				 "requests failed (rc=%d) for a new path %x\n",
+				 mdc, lpm);
+			return mdc;
+		}
+		fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
+		if (fcx_max_data < private->fcx_max_data) {
+			dev_warn(&device->cdev->dev,
+				 "The maximum data size for zHPF requests %u "
+				 "on a new path %x is below the active maximum "
+				 "%u\n", fcx_max_data, lpm,
+				 private->fcx_max_data);
+			return -EACCES;
+		}
+	}
+	return 0;
+}
+
+static int rebuild_device_uid(struct dasd_device *device,
+			      struct path_verification_work_data *data)
+{
+	struct dasd_eckd_private *private = device->private;
+	__u8 lpm, opm = dasd_path_get_opm(device);
+	int rc = -ENODEV;
+
+	for (lpm = 0x80; lpm; lpm >>= 1) {
+		if (!(lpm & opm))
+			continue;
+		memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
+		memset(&data->cqr, 0, sizeof(data->cqr));
+		data->cqr.cpaddr = &data->ccw;
+		rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
+						     data->rcd_buffer,
+						     lpm);
+
+		if (rc) {
+			if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
+				continue;
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+					"Read configuration data "
+					"returned error %d", rc);
+			break;
+		}
+		memcpy(private->conf_data, data->rcd_buffer,
+		       DASD_ECKD_RCD_DATA_SIZE);
+		if (dasd_eckd_identify_conf_parts(private)) {
+			rc = -ENODEV;
+		} else /* first valid path is enough */
+			break;
+	}
+
+	if (!rc)
+		rc = dasd_eckd_generate_uid(device);
+
+	return rc;
+}
+
+static void do_path_verification_work(struct work_struct *work)
+{
+	struct path_verification_work_data *data;
+	struct dasd_device *device;
+	struct dasd_eckd_private path_private;
+	struct dasd_uid *uid;
+	__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
+	__u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
+	unsigned long flags;
+	char print_uid[60];
+	int rc;
+
+	data = container_of(work, struct path_verification_work_data, worker);
+	device = data->device;
+
+	/* delay path verification until device was resumed */
+	if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
+		schedule_work(work);
+		return;
+	}
+	/* check if path verification already running and delay if so */
+	if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
+		schedule_work(work);
+		return;
+	}
+	opm = 0;
+	npm = 0;
+	ppm = 0;
+	epm = 0;
+	hpfpm = 0;
+	cablepm = 0;
+
+	for (lpm = 0x80; lpm; lpm >>= 1) {
+		if (!(lpm & data->tbvpm))
+			continue;
+		memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
+		memset(&data->cqr, 0, sizeof(data->cqr));
+		data->cqr.cpaddr = &data->ccw;
+		rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
+						     data->rcd_buffer,
+						     lpm);
+		if (!rc) {
+			switch (dasd_eckd_path_access(data->rcd_buffer,
+						      DASD_ECKD_RCD_DATA_SIZE)
+				) {
+			case 0x02:
+				npm |= lpm;
+				break;
+			case 0x03:
+				ppm |= lpm;
+				break;
+			}
+			opm |= lpm;
+		} else if (rc == -EOPNOTSUPP) {
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+					"path verification: No configuration "
+					"data retrieved");
+			opm |= lpm;
+		} else if (rc == -EAGAIN) {
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+					"path verification: device is stopped,"
+					" try again later");
+			epm |= lpm;
+		} else {
+			dev_warn(&device->cdev->dev,
+				 "Reading device feature codes failed "
+				 "(rc=%d) for new path %x\n", rc, lpm);
+			continue;
+		}
+		if (verify_fcx_max_data(device, lpm)) {
+			opm &= ~lpm;
+			npm &= ~lpm;
+			ppm &= ~lpm;
+			hpfpm |= lpm;
+			continue;
+		}
+
+		/*
+		 * save conf_data for comparison after
+		 * rebuild_device_uid may have changed
+		 * the original data
+		 */
+		memcpy(&path_rcd_buf, data->rcd_buffer,
+		       DASD_ECKD_RCD_DATA_SIZE);
+		path_private.conf_data = (void *) &path_rcd_buf;
+		path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
+		if (dasd_eckd_identify_conf_parts(&path_private)) {
+			path_private.conf_data = NULL;
+			path_private.conf_len = 0;
+			continue;
+		}
+
+		/*
+		 * compare path UID with device UID only if at least
+		 * one valid path is left
+		 * in other case the device UID may have changed and
+		 * the first working path UID will be used as device UID
+		 */
+		if (dasd_path_get_opm(device) &&
+		    dasd_eckd_compare_path_uid(device, &path_private)) {
+			/*
+			 * the comparison was not successful
+			 * rebuild the device UID with at least one
+			 * known path in case a z/VM hyperswap command
+			 * has changed the device
+			 *
+			 * after this compare again
+			 *
+			 * if either the rebuild or the recompare fails
+			 * the path can not be used
+			 */
+			if (rebuild_device_uid(device, data) ||
+			    dasd_eckd_compare_path_uid(
+				    device, &path_private)) {
+				uid = &path_private.uid;
+				if (strlen(uid->vduit) > 0)
+					snprintf(print_uid, sizeof(print_uid),
+						 "%s.%s.%04x.%02x.%s",
+						 uid->vendor, uid->serial,
+						 uid->ssid, uid->real_unit_addr,
+						 uid->vduit);
+				else
+					snprintf(print_uid, sizeof(print_uid),
+						 "%s.%s.%04x.%02x",
+						 uid->vendor, uid->serial,
+						 uid->ssid,
+						 uid->real_unit_addr);
+				dev_err(&device->cdev->dev,
+					"The newly added channel path %02X "
+					"will not be used because it leads "
+					"to a different device %s\n",
+					lpm, print_uid);
+				opm &= ~lpm;
+				npm &= ~lpm;
+				ppm &= ~lpm;
+				cablepm |= lpm;
+				continue;
+			}
+		}
+
+		/*
+		 * There is a small chance that a path is lost again between
+		 * above path verification and the following modification of
+		 * the device opm mask. We could avoid that race here by using
+		 * yet another path mask, but we rather deal with this unlikely
+		 * situation in dasd_start_IO.
+		 */
+		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+		if (!dasd_path_get_opm(device) && opm) {
+			dasd_path_set_opm(device, opm);
+			dasd_generic_path_operational(device);
+		} else {
+			dasd_path_add_opm(device, opm);
+		}
+		dasd_path_add_nppm(device, npm);
+		dasd_path_add_ppm(device, ppm);
+		dasd_path_add_tbvpm(device, epm);
+		dasd_path_add_cablepm(device, cablepm);
+		dasd_path_add_nohpfpm(device, hpfpm);
+		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	}
+	clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
+	dasd_put_device(device);
+	if (data->isglobal)
+		mutex_unlock(&dasd_path_verification_mutex);
+	else
+		kfree(data);
+}
+
+static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
+{
+	struct path_verification_work_data *data;
+
+	data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
+	if (!data) {
+		if (mutex_trylock(&dasd_path_verification_mutex)) {
+			data = path_verification_worker;
+			data->isglobal = 1;
+		} else
+			return -ENOMEM;
+	} else {
+		memset(data, 0, sizeof(*data));
+		data->isglobal = 0;
+	}
+	INIT_WORK(&data->worker, do_path_verification_work);
+	dasd_get_device(device);
+	data->device = device;
+	data->tbvpm = lpm;
+	schedule_work(&data->worker);
+	return 0;
+}
+
+static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
+{
+	struct dasd_eckd_private *private = device->private;
+	unsigned long flags;
+
+	if (!private->fcx_max_data)
+		private->fcx_max_data = get_fcx_max_data(device);
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
+	dasd_schedule_device_bh(device);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+
+static int dasd_eckd_read_features(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+	struct dasd_psf_prssd_data *prssdp;
+	struct dasd_rssd_features *features;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	int rc;
+
+	memset(&private->features, 0, sizeof(struct dasd_rssd_features));
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
+				   (sizeof(struct dasd_psf_prssd_data) +
+				    sizeof(struct dasd_rssd_features)),
+				   device, NULL);
+	if (IS_ERR(cqr)) {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
+				"allocate initialization request");
+		return PTR_ERR(cqr);
+	}
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->block = NULL;
+	cqr->retries = 256;
+	cqr->expires = 10 * HZ;
+
+	/* Prepare for Read Subsystem Data */
+	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+	prssdp->order = PSF_ORDER_PRSSD;
+	prssdp->suborder = 0x41;	/* Read Feature Codes */
+	/* all other bytes of prssdp must be zero */
+
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_PSF;
+	ccw->count = sizeof(struct dasd_psf_prssd_data);
+	ccw->flags |= CCW_FLAG_CC;
+	ccw->cda = (__u32)(addr_t) prssdp;
+
+	/* Read Subsystem Data - feature codes */
+	features = (struct dasd_rssd_features *) (prssdp + 1);
+	memset(features, 0, sizeof(struct dasd_rssd_features));
+
+	ccw++;
+	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+	ccw->count = sizeof(struct dasd_rssd_features);
+	ccw->cda = (__u32)(addr_t) features;
+
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	rc = dasd_sleep_on(cqr);
+	if (rc == 0) {
+		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+		features = (struct dasd_rssd_features *) (prssdp + 1);
+		memcpy(&private->features, features,
+		       sizeof(struct dasd_rssd_features));
+	} else
+		dev_warn(&device->cdev->dev, "Reading device feature codes"
+			 " failed with rc=%d\n", rc);
+	dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+
+/*
+ * Build CP for Perform Subsystem Function - SSC.
+ */
+static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
+						    int enable_pav)
+{
+	struct dasd_ccw_req *cqr;
+	struct dasd_psf_ssc_data *psf_ssc_data;
+	struct ccw1 *ccw;
+
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
+				  sizeof(struct dasd_psf_ssc_data),
+				   device, NULL);
+
+	if (IS_ERR(cqr)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			   "Could not allocate PSF-SSC request");
+		return cqr;
+	}
+	psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
+	psf_ssc_data->order = PSF_ORDER_SSC;
+	psf_ssc_data->suborder = 0xc0;
+	if (enable_pav) {
+		psf_ssc_data->suborder |= 0x08;
+		psf_ssc_data->reserved[0] = 0x88;
+	}
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_PSF;
+	ccw->cda = (__u32)(addr_t)psf_ssc_data;
+	ccw->count = 66;
+
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->block = NULL;
+	cqr->retries = 256;
+	cqr->expires = 10*HZ;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	return cqr;
+}
+
+/*
+ * Perform Subsystem Function.
+ * It is necessary to trigger CIO for channel revalidation since this
+ * call might change behaviour of DASD devices.
+ */
+static int
+dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
+		  unsigned long flags)
+{
+	struct dasd_ccw_req *cqr;
+	int rc;
+
+	cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
+	if (IS_ERR(cqr))
+		return PTR_ERR(cqr);
+
+	/*
+	 * set flags e.g. turn on failfast, to prevent blocking
+	 * the calling function should handle failed requests
+	 */
+	cqr->flags |= flags;
+
+	rc = dasd_sleep_on(cqr);
+	if (!rc)
+		/* trigger CIO to reprobe devices */
+		css_schedule_reprobe();
+	else if (cqr->intrc == -EAGAIN)
+		rc = -EAGAIN;
+
+	dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+/*
+ * Valide storage server of current device.
+ */
+static int dasd_eckd_validate_server(struct dasd_device *device,
+				     unsigned long flags)
+{
+	struct dasd_eckd_private *private = device->private;
+	int enable_pav, rc;
+
+	if (private->uid.type == UA_BASE_PAV_ALIAS ||
+	    private->uid.type == UA_HYPER_PAV_ALIAS)
+		return 0;
+	if (dasd_nopav || MACHINE_IS_VM)
+		enable_pav = 0;
+	else
+		enable_pav = 1;
+	rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
+
+	/* may be requested feature is not available on server,
+	 * therefore just report error and go ahead */
+	DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
+			"returned rc=%d", private->uid.ssid, rc);
+	return rc;
+}
+
+/*
+ * worker to do a validate server in case of a lost pathgroup
+ */
+static void dasd_eckd_do_validate_server(struct work_struct *work)
+{
+	struct dasd_device *device = container_of(work, struct dasd_device,
+						  kick_validate);
+	unsigned long flags = 0;
+
+	set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
+	if (dasd_eckd_validate_server(device, flags)
+	    == -EAGAIN) {
+		/* schedule worker again if failed */
+		schedule_work(&device->kick_validate);
+		return;
+	}
+
+	dasd_put_device(device);
+}
+
+static void dasd_eckd_kick_validate_server(struct dasd_device *device)
+{
+	dasd_get_device(device);
+	/* exit if device not online or in offline processing */
+	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
+	   device->state < DASD_STATE_ONLINE) {
+		dasd_put_device(device);
+		return;
+	}
+	/* queue call to do_validate_server to the kernel event daemon. */
+	if (!schedule_work(&device->kick_validate))
+		dasd_put_device(device);
+}
+
+/*
+ * Check device characteristics.
+ * If the device is accessible using ECKD discipline, the device is enabled.
+ */
+static int
+dasd_eckd_check_characteristics(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+	struct dasd_block *block;
+	struct dasd_uid temp_uid;
+	int rc, i;
+	int readonly;
+	unsigned long value;
+
+	/* setup work queue for validate server*/
+	INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
+	/* setup work queue for summary unit check */
+	INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
+
+	if (!ccw_device_is_pathgroup(device->cdev)) {
+		dev_warn(&device->cdev->dev,
+			 "A channel path group could not be established\n");
+		return -EIO;
+	}
+	if (!ccw_device_is_multipath(device->cdev)) {
+		dev_info(&device->cdev->dev,
+			 "The DASD is not operating in multipath mode\n");
+	}
+	if (!private) {
+		private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+		if (!private) {
+			dev_warn(&device->cdev->dev,
+				 "Allocating memory for private DASD data "
+				 "failed\n");
+			return -ENOMEM;
+		}
+		device->private = private;
+	} else {
+		memset(private, 0, sizeof(*private));
+	}
+	/* Invalidate status of initial analysis. */
+	private->init_cqr_status = -1;
+	/* Set default cache operations. */
+	private->attrib.operation = DASD_NORMAL_CACHE;
+	private->attrib.nr_cyl = 0;
+
+	/* Read Configuration Data */
+	rc = dasd_eckd_read_conf(device);
+	if (rc)
+		goto out_err1;
+
+	/* set some default values */
+	device->default_expires = DASD_EXPIRES;
+	device->default_retries = DASD_RETRIES;
+	device->path_thrhld = DASD_ECKD_PATH_THRHLD;
+	device->path_interval = DASD_ECKD_PATH_INTERVAL;
+
+	if (private->gneq) {
+		value = 1;
+		for (i = 0; i < private->gneq->timeout.value; i++)
+			value = 10 * value;
+		value = value * private->gneq->timeout.number;
+		/* do not accept useless values */
+		if (value != 0 && value <= DASD_EXPIRES_MAX)
+			device->default_expires = value;
+	}
+
+	dasd_eckd_get_uid(device, &temp_uid);
+	if (temp_uid.type == UA_BASE_DEVICE) {
+		block = dasd_alloc_block();
+		if (IS_ERR(block)) {
+			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+					"could not allocate dasd "
+					"block structure");
+			rc = PTR_ERR(block);
+			goto out_err1;
+		}
+		device->block = block;
+		block->base = device;
+	}
+
+	/* register lcu with alias handling, enable PAV */
+	rc = dasd_alias_make_device_known_to_lcu(device);
+	if (rc)
+		goto out_err2;
+
+	dasd_eckd_validate_server(device, 0);
+
+	/* device may report different configuration data after LCU setup */
+	rc = dasd_eckd_read_conf(device);
+	if (rc)
+		goto out_err3;
+
+	/* Read Feature Codes */
+	dasd_eckd_read_features(device);
+
+	/* Read Device Characteristics */
+	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
+					 &private->rdc_data, 64);
+	if (rc) {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+				"Read device characteristic failed, rc=%d", rc);
+		goto out_err3;
+	}
+
+	if ((device->features & DASD_FEATURE_USERAW) &&
+	    !(private->rdc_data.facilities.RT_in_LR)) {
+		dev_err(&device->cdev->dev, "The storage server does not "
+			"support raw-track access\n");
+		rc = -EINVAL;
+		goto out_err3;
+	}
+
+	/* find the valid cylinder size */
+	if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
+	    private->rdc_data.long_no_cyl)
+		private->real_cyl = private->rdc_data.long_no_cyl;
+	else
+		private->real_cyl = private->rdc_data.no_cyl;
+
+	private->fcx_max_data = get_fcx_max_data(device);
+
+	readonly = dasd_device_is_ro(device);
+	if (readonly)
+		set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+
+	dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
+		 "with %d cylinders, %d heads, %d sectors%s\n",
+		 private->rdc_data.dev_type,
+		 private->rdc_data.dev_model,
+		 private->rdc_data.cu_type,
+		 private->rdc_data.cu_model.model,
+		 private->real_cyl,
+		 private->rdc_data.trk_per_cyl,
+		 private->rdc_data.sec_per_trk,
+		 readonly ? ", read-only device" : "");
+	return 0;
+
+out_err3:
+	dasd_alias_disconnect_device_from_lcu(device);
+out_err2:
+	dasd_free_block(device->block);
+	device->block = NULL;
+out_err1:
+	kfree(private->conf_data);
+	kfree(device->private);
+	device->private = NULL;
+	return rc;
+}
+
+static void dasd_eckd_uncheck_device(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+	int i;
+
+	if (!private)
+		return;
+
+	dasd_alias_disconnect_device_from_lcu(device);
+	private->ned = NULL;
+	private->sneq = NULL;
+	private->vdsneq = NULL;
+	private->gneq = NULL;
+	private->conf_len = 0;
+	for (i = 0; i < 8; i++) {
+		kfree(device->path[i].conf_data);
+		if ((__u8 *)device->path[i].conf_data ==
+		    private->conf_data) {
+			private->conf_data = NULL;
+			private->conf_len = 0;
+		}
+		device->path[i].conf_data = NULL;
+		device->path[i].cssid = 0;
+		device->path[i].ssid = 0;
+		device->path[i].chpid = 0;
+	}
+	kfree(private->conf_data);
+	private->conf_data = NULL;
+}
+
+static struct dasd_ccw_req *
+dasd_eckd_analysis_ccw(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+	struct eckd_count *count_data;
+	struct LO_eckd_data *LO_data;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	int cplength, datasize;
+	int i;
+
+	cplength = 8;
+	datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
+				   NULL);
+	if (IS_ERR(cqr))
+		return cqr;
+	ccw = cqr->cpaddr;
+	/* Define extent for the first 3 tracks. */
+	define_extent(ccw++, cqr->data, 0, 2,
+		      DASD_ECKD_CCW_READ_COUNT, device, 0);
+	LO_data = cqr->data + sizeof(struct DE_eckd_data);
+	/* Locate record for the first 4 records on track 0. */
+	ccw[-1].flags |= CCW_FLAG_CC;
+	locate_record(ccw++, LO_data++, 0, 0, 4,
+		      DASD_ECKD_CCW_READ_COUNT, device, 0);
+
+	count_data = private->count_area;
+	for (i = 0; i < 4; i++) {
+		ccw[-1].flags |= CCW_FLAG_CC;
+		ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
+		ccw->flags = 0;
+		ccw->count = 8;
+		ccw->cda = (__u32)(addr_t) count_data;
+		ccw++;
+		count_data++;
+	}
+
+	/* Locate record for the first record on track 2. */
+	ccw[-1].flags |= CCW_FLAG_CC;
+	locate_record(ccw++, LO_data++, 2, 0, 1,
+		      DASD_ECKD_CCW_READ_COUNT, device, 0);
+	/* Read count ccw. */
+	ccw[-1].flags |= CCW_FLAG_CC;
+	ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
+	ccw->flags = 0;
+	ccw->count = 8;
+	ccw->cda = (__u32)(addr_t) count_data;
+
+	cqr->block = NULL;
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->retries = 255;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	return cqr;
+}
+
+/* differentiate between 'no record found' and any other error */
+static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
+{
+	char *sense;
+	if (init_cqr->status == DASD_CQR_DONE)
+		return INIT_CQR_OK;
+	else if (init_cqr->status == DASD_CQR_NEED_ERP ||
+		 init_cqr->status == DASD_CQR_FAILED) {
+		sense = dasd_get_sense(&init_cqr->irb);
+		if (sense && (sense[1] & SNS1_NO_REC_FOUND))
+			return INIT_CQR_UNFORMATTED;
+		else
+			return INIT_CQR_ERROR;
+	} else
+		return INIT_CQR_ERROR;
+}
+
+/*
+ * This is the callback function for the init_analysis cqr. It saves
+ * the status of the initial analysis ccw before it frees it and kicks
+ * the device to continue the startup sequence. This will call
+ * dasd_eckd_do_analysis again (if the devices has not been marked
+ * for deletion in the meantime).
+ */
+static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
+					void *data)
+{
+	struct dasd_device *device = init_cqr->startdev;
+	struct dasd_eckd_private *private = device->private;
+
+	private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
+	dasd_sfree_request(init_cqr, device);
+	dasd_kick_device(device);
+}
+
+static int dasd_eckd_start_analysis(struct dasd_block *block)
+{
+	struct dasd_ccw_req *init_cqr;
+
+	init_cqr = dasd_eckd_analysis_ccw(block->base);
+	if (IS_ERR(init_cqr))
+		return PTR_ERR(init_cqr);
+	init_cqr->callback = dasd_eckd_analysis_callback;
+	init_cqr->callback_data = NULL;
+	init_cqr->expires = 5*HZ;
+	/* first try without ERP, so we can later handle unformatted
+	 * devices as special case
+	 */
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
+	init_cqr->retries = 0;
+	dasd_add_request_head(init_cqr);
+	return -EAGAIN;
+}
+
+static int dasd_eckd_end_analysis(struct dasd_block *block)
+{
+	struct dasd_device *device = block->base;
+	struct dasd_eckd_private *private = device->private;
+	struct eckd_count *count_area;
+	unsigned int sb, blk_per_trk;
+	int status, i;
+	struct dasd_ccw_req *init_cqr;
+
+	status = private->init_cqr_status;
+	private->init_cqr_status = -1;
+	if (status == INIT_CQR_ERROR) {
+		/* try again, this time with full ERP */
+		init_cqr = dasd_eckd_analysis_ccw(device);
+		dasd_sleep_on(init_cqr);
+		status = dasd_eckd_analysis_evaluation(init_cqr);
+		dasd_sfree_request(init_cqr, device);
+	}
+
+	if (device->features & DASD_FEATURE_USERAW) {
+		block->bp_block = DASD_RAW_BLOCKSIZE;
+		blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
+		block->s2b_shift = 3;
+		goto raw;
+	}
+
+	if (status == INIT_CQR_UNFORMATTED) {
+		dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
+		return -EMEDIUMTYPE;
+	} else if (status == INIT_CQR_ERROR) {
+		dev_err(&device->cdev->dev,
+			"Detecting the DASD disk layout failed because "
+			"of an I/O error\n");
+		return -EIO;
+	}
+
+	private->uses_cdl = 1;
+	/* Check Track 0 for Compatible Disk Layout */
+	count_area = NULL;
+	for (i = 0; i < 3; i++) {
+		if (private->count_area[i].kl != 4 ||
+		    private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
+		    private->count_area[i].cyl != 0 ||
+		    private->count_area[i].head != count_area_head[i] ||
+		    private->count_area[i].record != count_area_rec[i]) {
+			private->uses_cdl = 0;
+			break;
+		}
+	}
+	if (i == 3)
+		count_area = &private->count_area[4];
+
+	if (private->uses_cdl == 0) {
+		for (i = 0; i < 5; i++) {
+			if ((private->count_area[i].kl != 0) ||
+			    (private->count_area[i].dl !=
+			     private->count_area[0].dl) ||
+			    private->count_area[i].cyl !=  0 ||
+			    private->count_area[i].head != count_area_head[i] ||
+			    private->count_area[i].record != count_area_rec[i])
+				break;
+		}
+		if (i == 5)
+			count_area = &private->count_area[0];
+	} else {
+		if (private->count_area[3].record == 1)
+			dev_warn(&device->cdev->dev,
+				 "Track 0 has no records following the VTOC\n");
+	}
+
+	if (count_area != NULL && count_area->kl == 0) {
+		/* we found notthing violating our disk layout */
+		if (dasd_check_blocksize(count_area->dl) == 0)
+			block->bp_block = count_area->dl;
+	}
+	if (block->bp_block == 0) {
+		dev_warn(&device->cdev->dev,
+			 "The disk layout of the DASD is not supported\n");
+		return -EMEDIUMTYPE;
+	}
+	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
+	for (sb = 512; sb < block->bp_block; sb = sb << 1)
+		block->s2b_shift++;
+
+	blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
+
+raw:
+	block->blocks = (private->real_cyl *
+			  private->rdc_data.trk_per_cyl *
+			  blk_per_trk);
+
+	dev_info(&device->cdev->dev,
+		 "DASD with %d KB/block, %d KB total size, %d KB/track, "
+		 "%s\n", (block->bp_block >> 10),
+		 ((private->real_cyl *
+		   private->rdc_data.trk_per_cyl *
+		   blk_per_trk * (block->bp_block >> 9)) >> 1),
+		 ((blk_per_trk * block->bp_block) >> 10),
+		 private->uses_cdl ?
+		 "compatible disk layout" : "linux disk layout");
+
+	return 0;
+}
+
+static int dasd_eckd_do_analysis(struct dasd_block *block)
+{
+	struct dasd_eckd_private *private = block->base->private;
+
+	if (private->init_cqr_status < 0)
+		return dasd_eckd_start_analysis(block);
+	else
+		return dasd_eckd_end_analysis(block);
+}
+
+static int dasd_eckd_basic_to_ready(struct dasd_device *device)
+{
+	return dasd_alias_add_device(device);
+};
+
+static int dasd_eckd_online_to_ready(struct dasd_device *device)
+{
+	if (cancel_work_sync(&device->reload_device))
+		dasd_put_device(device);
+	if (cancel_work_sync(&device->kick_validate))
+		dasd_put_device(device);
+
+	return 0;
+};
+
+static int dasd_eckd_basic_to_known(struct dasd_device *device)
+{
+	return dasd_alias_remove_device(device);
+};
+
+static int
+dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
+{
+	struct dasd_eckd_private *private = block->base->private;
+
+	if (dasd_check_blocksize(block->bp_block) == 0) {
+		geo->sectors = recs_per_track(&private->rdc_data,
+					      0, block->bp_block);
+	}
+	geo->cylinders = private->rdc_data.no_cyl;
+	geo->heads = private->rdc_data.trk_per_cyl;
+	return 0;
+}
+
+/*
+ * Build the TCW request for the format check
+ */
+static struct dasd_ccw_req *
+dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
+			  int enable_pav, struct eckd_count *fmt_buffer,
+			  int rpt)
+{
+	struct dasd_eckd_private *start_priv;
+	struct dasd_device *startdev = NULL;
+	struct tidaw *last_tidaw = NULL;
+	struct dasd_ccw_req *cqr;
+	struct itcw *itcw;
+	int itcw_size;
+	int count;
+	int rc;
+	int i;
+
+	if (enable_pav)
+		startdev = dasd_alias_get_start_dev(base);
+
+	if (!startdev)
+		startdev = base;
+
+	start_priv = startdev->private;
+
+	count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
+
+	/*
+	 * we're adding 'count' amount of tidaw to the itcw.
+	 * calculate the corresponding itcw_size
+	 */
+	itcw_size = itcw_calc_size(0, count, 0);
+
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
+				   NULL);
+	if (IS_ERR(cqr))
+		return cqr;
+
+	start_priv->count++;
+
+	itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
+	if (IS_ERR(itcw)) {
+		rc = -EINVAL;
+		goto out_err;
+	}
+
+	cqr->cpaddr = itcw_get_tcw(itcw);
+	rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
+			  DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
+			  sizeof(struct eckd_count),
+			  count * sizeof(struct eckd_count), 0, rpt);
+	if (rc)
+		goto out_err;
+
+	for (i = 0; i < count; i++) {
+		last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
+					    sizeof(struct eckd_count));
+		if (IS_ERR(last_tidaw)) {
+			rc = -EINVAL;
+			goto out_err;
+		}
+	}
+
+	last_tidaw->flags |= TIDAW_FLAGS_LAST;
+	itcw_finalize(itcw);
+
+	cqr->cpmode = 1;
+	cqr->startdev = startdev;
+	cqr->memdev = startdev;
+	cqr->basedev = base;
+	cqr->retries = startdev->default_retries;
+	cqr->expires = startdev->default_expires * HZ;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	/* Set flags to suppress output for expected errors */
+	set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+	set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+
+	return cqr;
+
+out_err:
+	dasd_sfree_request(cqr, startdev);
+
+	return ERR_PTR(rc);
+}
+
+/*
+ * Build the CCW request for the format check
+ */
+static struct dasd_ccw_req *
+dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
+		      int enable_pav, struct eckd_count *fmt_buffer, int rpt)
+{
+	struct dasd_eckd_private *start_priv;
+	struct dasd_eckd_private *base_priv;
+	struct dasd_device *startdev = NULL;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	void *data;
+	int cplength, datasize;
+	int use_prefix;
+	int count;
+	int i;
+
+	if (enable_pav)
+		startdev = dasd_alias_get_start_dev(base);
+
+	if (!startdev)
+		startdev = base;
+
+	start_priv = startdev->private;
+	base_priv = base->private;
+
+	count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
+
+	use_prefix = base_priv->features.feature[8] & 0x01;
+
+	if (use_prefix) {
+		cplength = 1;
+		datasize = sizeof(struct PFX_eckd_data);
+	} else {
+		cplength = 2;
+		datasize = sizeof(struct DE_eckd_data) +
+			sizeof(struct LO_eckd_data);
+	}
+	cplength += count;
+
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
+				   startdev, NULL);
+	if (IS_ERR(cqr))
+		return cqr;
+
+	start_priv->count++;
+	data = cqr->data;
+	ccw = cqr->cpaddr;
+
+	if (use_prefix) {
+		prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
+			   DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
+			   count, 0, 0);
+	} else {
+		define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
+			      DASD_ECKD_CCW_READ_COUNT, startdev, 0);
+
+		data += sizeof(struct DE_eckd_data);
+		ccw[-1].flags |= CCW_FLAG_CC;
+
+		locate_record(ccw++, data, fdata->start_unit, 0, count,
+			      DASD_ECKD_CCW_READ_COUNT, base, 0);
+	}
+
+	for (i = 0; i < count; i++) {
+		ccw[-1].flags |= CCW_FLAG_CC;
+		ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
+		ccw->flags = CCW_FLAG_SLI;
+		ccw->count = 8;
+		ccw->cda = (__u32)(addr_t) fmt_buffer;
+		ccw++;
+		fmt_buffer++;
+	}
+
+	cqr->startdev = startdev;
+	cqr->memdev = startdev;
+	cqr->basedev = base;
+	cqr->retries = DASD_RETRIES;
+	cqr->expires = startdev->default_expires * HZ;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	/* Set flags to suppress output for expected errors */
+	set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+
+	return cqr;
+}
+
+static struct dasd_ccw_req *
+dasd_eckd_build_format(struct dasd_device *base,
+		       struct format_data_t *fdata,
+		       int enable_pav)
+{
+	struct dasd_eckd_private *base_priv;
+	struct dasd_eckd_private *start_priv;
+	struct dasd_device *startdev = NULL;
+	struct dasd_ccw_req *fcp;
+	struct eckd_count *ect;
+	struct ch_t address;
+	struct ccw1 *ccw;
+	void *data;
+	int rpt;
+	int cplength, datasize;
+	int i, j;
+	int intensity = 0;
+	int r0_perm;
+	int nr_tracks;
+	int use_prefix;
+
+	if (enable_pav)
+		startdev = dasd_alias_get_start_dev(base);
+
+	if (!startdev)
+		startdev = base;
+
+	start_priv = startdev->private;
+	base_priv = base->private;
+
+	rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
+
+	nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
+
+	/*
+	 * fdata->intensity is a bit string that tells us what to do:
+	 *   Bit 0: write record zero
+	 *   Bit 1: write home address, currently not supported
+	 *   Bit 2: invalidate tracks
+	 *   Bit 3: use OS/390 compatible disk layout (cdl)
+	 *   Bit 4: do not allow storage subsystem to modify record zero
+	 * Only some bit combinations do make sense.
+	 */
+	if (fdata->intensity & 0x10) {
+		r0_perm = 0;
+		intensity = fdata->intensity & ~0x10;
+	} else {
+		r0_perm = 1;
+		intensity = fdata->intensity;
+	}
+
+	use_prefix = base_priv->features.feature[8] & 0x01;
+
+	switch (intensity) {
+	case 0x00:	/* Normal format */
+	case 0x08:	/* Normal format, use cdl. */
+		cplength = 2 + (rpt*nr_tracks);
+		if (use_prefix)
+			datasize = sizeof(struct PFX_eckd_data) +
+				sizeof(struct LO_eckd_data) +
+				rpt * nr_tracks * sizeof(struct eckd_count);
+		else
+			datasize = sizeof(struct DE_eckd_data) +
+				sizeof(struct LO_eckd_data) +
+				rpt * nr_tracks * sizeof(struct eckd_count);
+		break;
+	case 0x01:	/* Write record zero and format track. */
+	case 0x09:	/* Write record zero and format track, use cdl. */
+		cplength = 2 + rpt * nr_tracks;
+		if (use_prefix)
+			datasize = sizeof(struct PFX_eckd_data) +
+				sizeof(struct LO_eckd_data) +
+				sizeof(struct eckd_count) +
+				rpt * nr_tracks * sizeof(struct eckd_count);
+		else
+			datasize = sizeof(struct DE_eckd_data) +
+				sizeof(struct LO_eckd_data) +
+				sizeof(struct eckd_count) +
+				rpt * nr_tracks * sizeof(struct eckd_count);
+		break;
+	case 0x04:	/* Invalidate track. */
+	case 0x0c:	/* Invalidate track, use cdl. */
+		cplength = 3;
+		if (use_prefix)
+			datasize = sizeof(struct PFX_eckd_data) +
+				sizeof(struct LO_eckd_data) +
+				sizeof(struct eckd_count);
+		else
+			datasize = sizeof(struct DE_eckd_data) +
+				sizeof(struct LO_eckd_data) +
+				sizeof(struct eckd_count);
+		break;
+	default:
+		dev_warn(&startdev->cdev->dev,
+			 "An I/O control call used incorrect flags 0x%x\n",
+			 fdata->intensity);
+		return ERR_PTR(-EINVAL);
+	}
+	/* Allocate the format ccw request. */
+	fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
+				   datasize, startdev, NULL);
+	if (IS_ERR(fcp))
+		return fcp;
+
+	start_priv->count++;
+	data = fcp->data;
+	ccw = fcp->cpaddr;
+
+	switch (intensity & ~0x08) {
+	case 0x00: /* Normal format. */
+		if (use_prefix) {
+			prefix(ccw++, (struct PFX_eckd_data *) data,
+			       fdata->start_unit, fdata->stop_unit,
+			       DASD_ECKD_CCW_WRITE_CKD, base, startdev);
+			/* grant subsystem permission to format R0 */
+			if (r0_perm)
+				((struct PFX_eckd_data *)data)
+					->define_extent.ga_extended |= 0x04;
+			data += sizeof(struct PFX_eckd_data);
+		} else {
+			define_extent(ccw++, (struct DE_eckd_data *) data,
+				      fdata->start_unit, fdata->stop_unit,
+				      DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
+			/* grant subsystem permission to format R0 */
+			if (r0_perm)
+				((struct DE_eckd_data *) data)
+					->ga_extended |= 0x04;
+			data += sizeof(struct DE_eckd_data);
+		}
+		ccw[-1].flags |= CCW_FLAG_CC;
+		locate_record(ccw++, (struct LO_eckd_data *) data,
+			      fdata->start_unit, 0, rpt*nr_tracks,
+			      DASD_ECKD_CCW_WRITE_CKD, base,
+			      fdata->blksize);
+		data += sizeof(struct LO_eckd_data);
+		break;
+	case 0x01: /* Write record zero + format track. */
+		if (use_prefix) {
+			prefix(ccw++, (struct PFX_eckd_data *) data,
+			       fdata->start_unit, fdata->stop_unit,
+			       DASD_ECKD_CCW_WRITE_RECORD_ZERO,
+			       base, startdev);
+			data += sizeof(struct PFX_eckd_data);
+		} else {
+			define_extent(ccw++, (struct DE_eckd_data *) data,
+			       fdata->start_unit, fdata->stop_unit,
+			       DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
+			data += sizeof(struct DE_eckd_data);
+		}
+		ccw[-1].flags |= CCW_FLAG_CC;
+		locate_record(ccw++, (struct LO_eckd_data *) data,
+			      fdata->start_unit, 0, rpt * nr_tracks + 1,
+			      DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
+			      base->block->bp_block);
+		data += sizeof(struct LO_eckd_data);
+		break;
+	case 0x04: /* Invalidate track. */
+		if (use_prefix) {
+			prefix(ccw++, (struct PFX_eckd_data *) data,
+			       fdata->start_unit, fdata->stop_unit,
+			       DASD_ECKD_CCW_WRITE_CKD, base, startdev);
+			data += sizeof(struct PFX_eckd_data);
+		} else {
+			define_extent(ccw++, (struct DE_eckd_data *) data,
+			       fdata->start_unit, fdata->stop_unit,
+			       DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
+			data += sizeof(struct DE_eckd_data);
+		}
+		ccw[-1].flags |= CCW_FLAG_CC;
+		locate_record(ccw++, (struct LO_eckd_data *) data,
+			      fdata->start_unit, 0, 1,
+			      DASD_ECKD_CCW_WRITE_CKD, base, 8);
+		data += sizeof(struct LO_eckd_data);
+		break;
+	}
+
+	for (j = 0; j < nr_tracks; j++) {
+		/* calculate cylinder and head for the current track */
+		set_ch_t(&address,
+			 (fdata->start_unit + j) /
+			 base_priv->rdc_data.trk_per_cyl,
+			 (fdata->start_unit + j) %
+			 base_priv->rdc_data.trk_per_cyl);
+		if (intensity & 0x01) {	/* write record zero */
+			ect = (struct eckd_count *) data;
+			data += sizeof(struct eckd_count);
+			ect->cyl = address.cyl;
+			ect->head = address.head;
+			ect->record = 0;
+			ect->kl = 0;
+			ect->dl = 8;
+			ccw[-1].flags |= CCW_FLAG_CC;
+			ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
+			ccw->flags = CCW_FLAG_SLI;
+			ccw->count = 8;
+			ccw->cda = (__u32)(addr_t) ect;
+			ccw++;
+		}
+		if ((intensity & ~0x08) & 0x04) {	/* erase track */
+			ect = (struct eckd_count *) data;
+			data += sizeof(struct eckd_count);
+			ect->cyl = address.cyl;
+			ect->head = address.head;
+			ect->record = 1;
+			ect->kl = 0;
+			ect->dl = 0;
+			ccw[-1].flags |= CCW_FLAG_CC;
+			ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
+			ccw->flags = CCW_FLAG_SLI;
+			ccw->count = 8;
+			ccw->cda = (__u32)(addr_t) ect;
+		} else {		/* write remaining records */
+			for (i = 0; i < rpt; i++) {
+				ect = (struct eckd_count *) data;
+				data += sizeof(struct eckd_count);
+				ect->cyl = address.cyl;
+				ect->head = address.head;
+				ect->record = i + 1;
+				ect->kl = 0;
+				ect->dl = fdata->blksize;
+				/*
+				 * Check for special tracks 0-1
+				 * when formatting CDL
+				 */
+				if ((intensity & 0x08) &&
+				    address.cyl == 0 && address.head == 0) {
+					if (i < 3) {
+						ect->kl = 4;
+						ect->dl = sizes_trk0[i] - 4;
+					}
+				}
+				if ((intensity & 0x08) &&
+				    address.cyl == 0 && address.head == 1) {
+					ect->kl = 44;
+					ect->dl = LABEL_SIZE - 44;
+				}
+				ccw[-1].flags |= CCW_FLAG_CC;
+				if (i != 0 || j == 0)
+					ccw->cmd_code =
+						DASD_ECKD_CCW_WRITE_CKD;
+				else
+					ccw->cmd_code =
+						DASD_ECKD_CCW_WRITE_CKD_MT;
+				ccw->flags = CCW_FLAG_SLI;
+				ccw->count = 8;
+				ccw->cda = (__u32)(addr_t) ect;
+				ccw++;
+			}
+		}
+	}
+
+	fcp->startdev = startdev;
+	fcp->memdev = startdev;
+	fcp->basedev = base;
+	fcp->retries = 256;
+	fcp->expires = startdev->default_expires * HZ;
+	fcp->buildclk = get_tod_clock();
+	fcp->status = DASD_CQR_FILLED;
+
+	return fcp;
+}
+
+/*
+ * Wrapper function to build a CCW request depending on input data
+ */
+static struct dasd_ccw_req *
+dasd_eckd_format_build_ccw_req(struct dasd_device *base,
+			       struct format_data_t *fdata, int enable_pav,
+			       int tpm, struct eckd_count *fmt_buffer, int rpt)
+{
+	struct dasd_ccw_req *ccw_req;
+
+	if (!fmt_buffer) {
+		ccw_req = dasd_eckd_build_format(base, fdata, enable_pav);
+	} else {
+		if (tpm)
+			ccw_req = dasd_eckd_build_check_tcw(base, fdata,
+							    enable_pav,
+							    fmt_buffer, rpt);
+		else
+			ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
+							fmt_buffer, rpt);
+	}
+
+	return ccw_req;
+}
+
+/*
+ * Sanity checks on format_data
+ */
+static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
+					  struct format_data_t *fdata)
+{
+	struct dasd_eckd_private *private = base->private;
+
+	if (fdata->start_unit >=
+	    (private->real_cyl * private->rdc_data.trk_per_cyl)) {
+		dev_warn(&base->cdev->dev,
+			 "Start track number %u used in formatting is too big\n",
+			 fdata->start_unit);
+		return -EINVAL;
+	}
+	if (fdata->stop_unit >=
+	    (private->real_cyl * private->rdc_data.trk_per_cyl)) {
+		dev_warn(&base->cdev->dev,
+			 "Stop track number %u used in formatting is too big\n",
+			 fdata->stop_unit);
+		return -EINVAL;
+	}
+	if (fdata->start_unit > fdata->stop_unit) {
+		dev_warn(&base->cdev->dev,
+			 "Start track %u used in formatting exceeds end track\n",
+			 fdata->start_unit);
+		return -EINVAL;
+	}
+	if (dasd_check_blocksize(fdata->blksize) != 0) {
+		dev_warn(&base->cdev->dev,
+			 "The DASD cannot be formatted with block size %u\n",
+			 fdata->blksize);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*
+ * This function will process format_data originally coming from an IOCTL
+ */
+static int dasd_eckd_format_process_data(struct dasd_device *base,
+					 struct format_data_t *fdata,
+					 int enable_pav, int tpm,
+					 struct eckd_count *fmt_buffer, int rpt,
+					 struct irb *irb)
+{
+	struct dasd_eckd_private *private = base->private;
+	struct dasd_ccw_req *cqr, *n;
+	struct list_head format_queue;
+	struct dasd_device *device;
+	char *sense = NULL;
+	int old_start, old_stop, format_step;
+	int step, retry;
+	int rc;
+
+	rc = dasd_eckd_format_sanity_checks(base, fdata);
+	if (rc)
+		return rc;
+
+	INIT_LIST_HEAD(&format_queue);
+
+	old_start = fdata->start_unit;
+	old_stop = fdata->stop_unit;
+
+	if (!tpm && fmt_buffer != NULL) {
+		/* Command Mode / Format Check */
+		format_step = 1;
+	} else if (tpm && fmt_buffer != NULL) {
+		/* Transport Mode / Format Check */
+		format_step = DASD_CQR_MAX_CCW / rpt;
+	} else {
+		/* Normal Formatting */
+		format_step = DASD_CQR_MAX_CCW /
+			recs_per_track(&private->rdc_data, 0, fdata->blksize);
+	}
+
+	do {
+		retry = 0;
+		while (fdata->start_unit <= old_stop) {
+			step = fdata->stop_unit - fdata->start_unit + 1;
+			if (step > format_step) {
+				fdata->stop_unit =
+					fdata->start_unit + format_step - 1;
+			}
+
+			cqr = dasd_eckd_format_build_ccw_req(base, fdata,
+							     enable_pav, tpm,
+							     fmt_buffer, rpt);
+			if (IS_ERR(cqr)) {
+				rc = PTR_ERR(cqr);
+				if (rc == -ENOMEM) {
+					if (list_empty(&format_queue))
+						goto out;
+					/*
+					 * not enough memory available, start
+					 * requests retry after first requests
+					 * were finished
+					 */
+					retry = 1;
+					break;
+				}
+				goto out_err;
+			}
+			list_add_tail(&cqr->blocklist, &format_queue);
+
+			if (fmt_buffer) {
+				step = fdata->stop_unit - fdata->start_unit + 1;
+				fmt_buffer += rpt * step;
+			}
+			fdata->start_unit = fdata->stop_unit + 1;
+			fdata->stop_unit = old_stop;
+		}
+
+		rc = dasd_sleep_on_queue(&format_queue);
+
+out_err:
+		list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
+			device = cqr->startdev;
+			private = device->private;
+
+			if (cqr->status == DASD_CQR_FAILED) {
+				/*
+				 * Only get sense data if called by format
+				 * check
+				 */
+				if (fmt_buffer && irb) {
+					sense = dasd_get_sense(&cqr->irb);
+					memcpy(irb, &cqr->irb, sizeof(*irb));
+				}
+				rc = -EIO;
+			}
+			list_del_init(&cqr->blocklist);
+			dasd_sfree_request(cqr, device);
+			private->count--;
+		}
+
+		if (rc && rc != -EIO)
+			goto out;
+		if (rc == -EIO) {
+			/*
+			 * In case fewer than the expected records are on the
+			 * track, we will most likely get a 'No Record Found'
+			 * error (in command mode) or a 'File Protected' error
+			 * (in transport mode). Those particular cases shouldn't
+			 * pass the -EIO to the IOCTL, therefore reset the rc
+			 * and continue.
+			 */
+			if (sense &&
+			    (sense[1] & SNS1_NO_REC_FOUND ||
+			     sense[1] & SNS1_FILE_PROTECTED))
+				retry = 1;
+			else
+				goto out;
+		}
+
+	} while (retry);
+
+out:
+	fdata->start_unit = old_start;
+	fdata->stop_unit = old_stop;
+
+	return rc;
+}
+
+static int dasd_eckd_format_device(struct dasd_device *base,
+				   struct format_data_t *fdata, int enable_pav)
+{
+	return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
+					     0, NULL);
+}
+
+/*
+ * Helper function to count consecutive records of a single track.
+ */
+static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
+				   int max)
+{
+	int head;
+	int i;
+
+	head = fmt_buffer[start].head;
+
+	/*
+	 * There are 3 conditions where we stop counting:
+	 * - if data reoccurs (same head and record may reoccur), which may
+	 *   happen due to the way DASD_ECKD_CCW_READ_COUNT works
+	 * - when the head changes, because we're iterating over several tracks
+	 *   then (DASD_ECKD_CCW_READ_COUNT_MT)
+	 * - when we've reached the end of sensible data in the buffer (the
+	 *   record will be 0 then)
+	 */
+	for (i = start; i < max; i++) {
+		if (i > start) {
+			if ((fmt_buffer[i].head == head &&
+			    fmt_buffer[i].record == 1) ||
+			    fmt_buffer[i].head != head ||
+			    fmt_buffer[i].record == 0)
+				break;
+		}
+	}
+
+	return i - start;
+}
+
+/*
+ * Evaluate a given range of tracks. Data like number of records, blocksize,
+ * record ids, and key length are compared with expected data.
+ *
+ * If a mismatch occurs, the corresponding error bit is set, as well as
+ * additional information, depending on the error.
+ */
+static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
+					     struct format_check_t *cdata,
+					     int rpt_max, int rpt_exp,
+					     int trk_per_cyl, int tpm)
+{
+	struct ch_t geo;
+	int max_entries;
+	int count = 0;
+	int trkcount;
+	int blksize;
+	int pos = 0;
+	int i, j;
+	int kl;
+
+	trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
+	max_entries = trkcount * rpt_max;
+
+	for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
+		/* Calculate the correct next starting position in the buffer */
+		if (tpm) {
+			while (fmt_buffer[pos].record == 0 &&
+			       fmt_buffer[pos].dl == 0) {
+				if (pos++ > max_entries)
+					break;
+			}
+		} else {
+			if (i != cdata->expect.start_unit)
+				pos += rpt_max - count;
+		}
+
+		/* Calculate the expected geo values for the current track */
+		set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
+
+		/* Count and check number of records */
+		count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
+
+		if (count < rpt_exp) {
+			cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
+			break;
+		}
+		if (count > rpt_exp) {
+			cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
+			break;
+		}
+
+		for (j = 0; j < count; j++, pos++) {
+			blksize = cdata->expect.blksize;
+			kl = 0;
+
+			/*
+			 * Set special values when checking CDL formatted
+			 * devices.
+			 */
+			if ((cdata->expect.intensity & 0x08) &&
+			    geo.cyl == 0 && geo.head == 0) {
+				if (j < 3) {
+					blksize = sizes_trk0[j] - 4;
+					kl = 4;
+				}
+			}
+			if ((cdata->expect.intensity & 0x08) &&
+			    geo.cyl == 0 && geo.head == 1) {
+				blksize = LABEL_SIZE - 44;
+				kl = 44;
+			}
+
+			/* Check blocksize */
+			if (fmt_buffer[pos].dl != blksize) {
+				cdata->result = DASD_FMT_ERR_BLKSIZE;
+				goto out;
+			}
+			/* Check if key length is 0 */
+			if (fmt_buffer[pos].kl != kl) {
+				cdata->result = DASD_FMT_ERR_KEY_LENGTH;
+				goto out;
+			}
+			/* Check if record_id is correct */
+			if (fmt_buffer[pos].cyl != geo.cyl ||
+			    fmt_buffer[pos].head != geo.head ||
+			    fmt_buffer[pos].record != (j + 1)) {
+				cdata->result = DASD_FMT_ERR_RECORD_ID;
+				goto out;
+			}
+		}
+	}
+
+out:
+	/*
+	 * In case of no errors, we need to decrease by one
+	 * to get the correct positions.
+	 */
+	if (!cdata->result) {
+		i--;
+		pos--;
+	}
+
+	cdata->unit = i;
+	cdata->num_records = count;
+	cdata->rec = fmt_buffer[pos].record;
+	cdata->blksize = fmt_buffer[pos].dl;
+	cdata->key_length = fmt_buffer[pos].kl;
+}
+
+/*
+ * Check the format of a range of tracks of a DASD.
+ */
+static int dasd_eckd_check_device_format(struct dasd_device *base,
+					 struct format_check_t *cdata,
+					 int enable_pav)
+{
+	struct dasd_eckd_private *private = base->private;
+	struct eckd_count *fmt_buffer;
+	struct irb irb;
+	int rpt_max, rpt_exp;
+	int fmt_buffer_size;
+	int trk_per_cyl;
+	int trkcount;
+	int tpm = 0;
+	int rc;
+
+	trk_per_cyl = private->rdc_data.trk_per_cyl;
+
+	/* Get maximum and expected amount of records per track */
+	rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
+	rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
+
+	trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
+	fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
+
+	fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
+	if (!fmt_buffer)
+		return -ENOMEM;
+
+	/*
+	 * A certain FICON feature subset is needed to operate in transport
+	 * mode. Additionally, the support for transport mode is implicitly
+	 * checked by comparing the buffer size with fcx_max_data. As long as
+	 * the buffer size is smaller we can operate in transport mode and
+	 * process multiple tracks. If not, only one track at once is being
+	 * processed using command mode.
+	 */
+	if ((private->features.feature[40] & 0x04) &&
+	    fmt_buffer_size <= private->fcx_max_data)
+		tpm = 1;
+
+	rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
+					   tpm, fmt_buffer, rpt_max, &irb);
+	if (rc && rc != -EIO)
+		goto out;
+	if (rc == -EIO) {
+		/*
+		 * If our first attempt with transport mode enabled comes back
+		 * with an incorrect length error, we're going to retry the
+		 * check with command mode.
+		 */
+		if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
+			tpm = 0;
+			rc = dasd_eckd_format_process_data(base, &cdata->expect,
+							   enable_pav, tpm,
+							   fmt_buffer, rpt_max,
+							   &irb);
+			if (rc)
+				goto out;
+		} else {
+			goto out;
+		}
+	}
+
+	dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
+					 trk_per_cyl, tpm);
+
+out:
+	kfree(fmt_buffer);
+
+	return rc;
+}
+
+static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
+{
+	if (cqr->retries < 0) {
+		cqr->status = DASD_CQR_FAILED;
+		return;
+	}
+	cqr->status = DASD_CQR_FILLED;
+	if (cqr->block && (cqr->startdev != cqr->block->base)) {
+		dasd_eckd_reset_ccw_to_base_io(cqr);
+		cqr->startdev = cqr->block->base;
+		cqr->lpm = dasd_path_get_opm(cqr->block->base);
+	}
+};
+
+static dasd_erp_fn_t
+dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
+{
+	struct dasd_device *device = (struct dasd_device *) cqr->startdev;
+	struct ccw_device *cdev = device->cdev;
+
+	switch (cdev->id.cu_type) {
+	case 0x3990:
+	case 0x2105:
+	case 0x2107:
+	case 0x1750:
+		return dasd_3990_erp_action;
+	case 0x9343:
+	case 0x3880:
+	default:
+		return dasd_default_erp_action;
+	}
+}
+
+static dasd_erp_fn_t
+dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
+{
+	return dasd_default_erp_postaction;
+}
+
+static void dasd_eckd_check_for_device_change(struct dasd_device *device,
+					      struct dasd_ccw_req *cqr,
+					      struct irb *irb)
+{
+	char mask;
+	char *sense = NULL;
+	struct dasd_eckd_private *private = device->private;
+
+	/* first of all check for state change pending interrupt */
+	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
+	if ((scsw_dstat(&irb->scsw) & mask) == mask) {
+		/*
+		 * for alias only, not in offline processing
+		 * and only if not suspended
+		 */
+		if (!device->block && private->lcu &&
+		    device->state == DASD_STATE_ONLINE &&
+		    !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
+		    !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
+			/* schedule worker to reload device */
+			dasd_reload_device(device);
+		}
+		dasd_generic_handle_state_change(device);
+		return;
+	}
+
+	sense = dasd_get_sense(irb);
+	if (!sense)
+		return;
+
+	/* summary unit check */
+	if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
+	    (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
+		if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
+			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+				      "eckd suc: device already notified");
+			return;
+		}
+		sense = dasd_get_sense(irb);
+		if (!sense) {
+			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+				      "eckd suc: no reason code available");
+			clear_bit(DASD_FLAG_SUC, &device->flags);
+			return;
+
+		}
+		private->suc_reason = sense[8];
+		DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
+			      "eckd handle summary unit check: reason",
+			      private->suc_reason);
+		dasd_get_device(device);
+		if (!schedule_work(&device->suc_work))
+			dasd_put_device(device);
+
+		return;
+	}
+
+	/* service information message SIM */
+	if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
+	    ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
+		dasd_3990_erp_handle_sim(device, sense);
+		return;
+	}
+
+	/* loss of device reservation is handled via base devices only
+	 * as alias devices may be used with several bases
+	 */
+	if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
+	    (sense[7] == 0x3F) &&
+	    (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
+	    test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
+		if (device->features & DASD_FEATURE_FAILONSLCK)
+			set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
+		clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
+		dev_err(&device->cdev->dev,
+			"The device reservation was lost\n");
+	}
+}
+
+static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
+					       struct dasd_device *startdev,
+					       struct dasd_block *block,
+					       struct request *req,
+					       sector_t first_rec,
+					       sector_t last_rec,
+					       sector_t first_trk,
+					       sector_t last_trk,
+					       unsigned int first_offs,
+					       unsigned int last_offs,
+					       unsigned int blk_per_trk,
+					       unsigned int blksize)
+{
+	struct dasd_eckd_private *private;
+	unsigned long *idaws;
+	struct LO_eckd_data *LO_data;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	struct req_iterator iter;
+	struct bio_vec bv;
+	char *dst;
+	unsigned int off;
+	int count, cidaw, cplength, datasize;
+	sector_t recid;
+	unsigned char cmd, rcmd;
+	int use_prefix;
+	struct dasd_device *basedev;
+
+	basedev = block->base;
+	private = basedev->private;
+	if (rq_data_dir(req) == READ)
+		cmd = DASD_ECKD_CCW_READ_MT;
+	else if (rq_data_dir(req) == WRITE)
+		cmd = DASD_ECKD_CCW_WRITE_MT;
+	else
+		return ERR_PTR(-EINVAL);
+
+	/* Check struct bio and count the number of blocks for the request. */
+	count = 0;
+	cidaw = 0;
+	rq_for_each_segment(bv, req, iter) {
+		if (bv.bv_len & (blksize - 1))
+			/* Eckd can only do full blocks. */
+			return ERR_PTR(-EINVAL);
+		count += bv.bv_len >> (block->s2b_shift + 9);
+		if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
+			cidaw += bv.bv_len >> (block->s2b_shift + 9);
+	}
+	/* Paranoia. */
+	if (count != last_rec - first_rec + 1)
+		return ERR_PTR(-EINVAL);
+
+	/* use the prefix command if available */
+	use_prefix = private->features.feature[8] & 0x01;
+	if (use_prefix) {
+		/* 1x prefix + number of blocks */
+		cplength = 2 + count;
+		/* 1x prefix + cidaws*sizeof(long) */
+		datasize = sizeof(struct PFX_eckd_data) +
+			sizeof(struct LO_eckd_data) +
+			cidaw * sizeof(unsigned long);
+	} else {
+		/* 1x define extent + 1x locate record + number of blocks */
+		cplength = 2 + count;
+		/* 1x define extent + 1x locate record + cidaws*sizeof(long) */
+		datasize = sizeof(struct DE_eckd_data) +
+			sizeof(struct LO_eckd_data) +
+			cidaw * sizeof(unsigned long);
+	}
+	/* Find out the number of additional locate record ccws for cdl. */
+	if (private->uses_cdl && first_rec < 2*blk_per_trk) {
+		if (last_rec >= 2*blk_per_trk)
+			count = 2*blk_per_trk - first_rec;
+		cplength += count;
+		datasize += count*sizeof(struct LO_eckd_data);
+	}
+	/* Allocate the ccw request. */
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
+				   startdev, blk_mq_rq_to_pdu(req));
+	if (IS_ERR(cqr))
+		return cqr;
+	ccw = cqr->cpaddr;
+	/* First ccw is define extent or prefix. */
+	if (use_prefix) {
+		if (prefix(ccw++, cqr->data, first_trk,
+			   last_trk, cmd, basedev, startdev) == -EAGAIN) {
+			/* Clock not in sync and XRC is enabled.
+			 * Try again later.
+			 */
+			dasd_sfree_request(cqr, startdev);
+			return ERR_PTR(-EAGAIN);
+		}
+		idaws = (unsigned long *) (cqr->data +
+					   sizeof(struct PFX_eckd_data));
+	} else {
+		if (define_extent(ccw++, cqr->data, first_trk,
+				  last_trk, cmd, basedev, 0) == -EAGAIN) {
+			/* Clock not in sync and XRC is enabled.
+			 * Try again later.
+			 */
+			dasd_sfree_request(cqr, startdev);
+			return ERR_PTR(-EAGAIN);
+		}
+		idaws = (unsigned long *) (cqr->data +
+					   sizeof(struct DE_eckd_data));
+	}
+	/* Build locate_record+read/write/ccws. */
+	LO_data = (struct LO_eckd_data *) (idaws + cidaw);
+	recid = first_rec;
+	if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
+		/* Only standard blocks so there is just one locate record. */
+		ccw[-1].flags |= CCW_FLAG_CC;
+		locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
+			      last_rec - recid + 1, cmd, basedev, blksize);
+	}
+	rq_for_each_segment(bv, req, iter) {
+		dst = page_address(bv.bv_page) + bv.bv_offset;
+		if (dasd_page_cache) {
+			char *copy = kmem_cache_alloc(dasd_page_cache,
+						      GFP_DMA | __GFP_NOWARN);
+			if (copy && rq_data_dir(req) == WRITE)
+				memcpy(copy + bv.bv_offset, dst, bv.bv_len);
+			if (copy)
+				dst = copy + bv.bv_offset;
+		}
+		for (off = 0; off < bv.bv_len; off += blksize) {
+			sector_t trkid = recid;
+			unsigned int recoffs = sector_div(trkid, blk_per_trk);
+			rcmd = cmd;
+			count = blksize;
+			/* Locate record for cdl special block ? */
+			if (private->uses_cdl && recid < 2*blk_per_trk) {
+				if (dasd_eckd_cdl_special(blk_per_trk, recid)){
+					rcmd |= 0x8;
+					count = dasd_eckd_cdl_reclen(recid);
+					if (count < blksize &&
+					    rq_data_dir(req) == READ)
+						memset(dst + count, 0xe5,
+						       blksize - count);
+				}
+				ccw[-1].flags |= CCW_FLAG_CC;
+				locate_record(ccw++, LO_data++,
+					      trkid, recoffs + 1,
+					      1, rcmd, basedev, count);
+			}
+			/* Locate record for standard blocks ? */
+			if (private->uses_cdl && recid == 2*blk_per_trk) {
+				ccw[-1].flags |= CCW_FLAG_CC;
+				locate_record(ccw++, LO_data++,
+					      trkid, recoffs + 1,
+					      last_rec - recid + 1,
+					      cmd, basedev, count);
+			}
+			/* Read/write ccw. */
+			ccw[-1].flags |= CCW_FLAG_CC;
+			ccw->cmd_code = rcmd;
+			ccw->count = count;
+			if (idal_is_needed(dst, blksize)) {
+				ccw->cda = (__u32)(addr_t) idaws;
+				ccw->flags = CCW_FLAG_IDA;
+				idaws = idal_create_words(idaws, dst, blksize);
+			} else {
+				ccw->cda = (__u32)(addr_t) dst;
+				ccw->flags = 0;
+			}
+			ccw++;
+			dst += blksize;
+			recid++;
+		}
+	}
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
+		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->startdev = startdev;
+	cqr->memdev = startdev;
+	cqr->block = block;
+	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
+	cqr->lpm = dasd_path_get_ppm(startdev);
+	cqr->retries = startdev->default_retries;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	return cqr;
+}
+
+static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
+					       struct dasd_device *startdev,
+					       struct dasd_block *block,
+					       struct request *req,
+					       sector_t first_rec,
+					       sector_t last_rec,
+					       sector_t first_trk,
+					       sector_t last_trk,
+					       unsigned int first_offs,
+					       unsigned int last_offs,
+					       unsigned int blk_per_trk,
+					       unsigned int blksize)
+{
+	unsigned long *idaws;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	struct req_iterator iter;
+	struct bio_vec bv;
+	char *dst, *idaw_dst;
+	unsigned int cidaw, cplength, datasize;
+	unsigned int tlf;
+	sector_t recid;
+	unsigned char cmd;
+	struct dasd_device *basedev;
+	unsigned int trkcount, count, count_to_trk_end;
+	unsigned int idaw_len, seg_len, part_len, len_to_track_end;
+	unsigned char new_track, end_idaw;
+	sector_t trkid;
+	unsigned int recoffs;
+
+	basedev = block->base;
+	if (rq_data_dir(req) == READ)
+		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
+	else if (rq_data_dir(req) == WRITE)
+		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
+	else
+		return ERR_PTR(-EINVAL);
+
+	/* Track based I/O needs IDAWs for each page, and not just for
+	 * 64 bit addresses. We need additional idals for pages
+	 * that get filled from two tracks, so we use the number
+	 * of records as upper limit.
+	 */
+	cidaw = last_rec - first_rec + 1;
+	trkcount = last_trk - first_trk + 1;
+
+	/* 1x prefix + one read/write ccw per track */
+	cplength = 1 + trkcount;
+
+	datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
+
+	/* Allocate the ccw request. */
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
+				   startdev, blk_mq_rq_to_pdu(req));
+	if (IS_ERR(cqr))
+		return cqr;
+	ccw = cqr->cpaddr;
+	/* transfer length factor: how many bytes to read from the last track */
+	if (first_trk == last_trk)
+		tlf = last_offs - first_offs + 1;
+	else
+		tlf = last_offs + 1;
+	tlf *= blksize;
+
+	if (prefix_LRE(ccw++, cqr->data, first_trk,
+		       last_trk, cmd, basedev, startdev,
+		       1 /* format */, first_offs + 1,
+		       trkcount, blksize,
+		       tlf) == -EAGAIN) {
+		/* Clock not in sync and XRC is enabled.
+		 * Try again later.
+		 */
+		dasd_sfree_request(cqr, startdev);
+		return ERR_PTR(-EAGAIN);
+	}
+
+	/*
+	 * The translation of request into ccw programs must meet the
+	 * following conditions:
+	 * - all idaws but the first and the last must address full pages
+	 *   (or 2K blocks on 31-bit)
+	 * - the scope of a ccw and it's idal ends with the track boundaries
+	 */
+	idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
+	recid = first_rec;
+	new_track = 1;
+	end_idaw = 0;
+	len_to_track_end = 0;
+	idaw_dst = NULL;
+	idaw_len = 0;
+	rq_for_each_segment(bv, req, iter) {
+		dst = page_address(bv.bv_page) + bv.bv_offset;
+		seg_len = bv.bv_len;
+		while (seg_len) {
+			if (new_track) {
+				trkid = recid;
+				recoffs = sector_div(trkid, blk_per_trk);
+				count_to_trk_end = blk_per_trk - recoffs;
+				count = min((last_rec - recid + 1),
+					    (sector_t)count_to_trk_end);
+				len_to_track_end = count * blksize;
+				ccw[-1].flags |= CCW_FLAG_CC;
+				ccw->cmd_code = cmd;
+				ccw->count = len_to_track_end;
+				ccw->cda = (__u32)(addr_t)idaws;
+				ccw->flags = CCW_FLAG_IDA;
+				ccw++;
+				recid += count;
+				new_track = 0;
+				/* first idaw for a ccw may start anywhere */
+				if (!idaw_dst)
+					idaw_dst = dst;
+			}
+			/* If we start a new idaw, we must make sure that it
+			 * starts on an IDA_BLOCK_SIZE boundary.
+			 * If we continue an idaw, we must make sure that the
+			 * current segment begins where the so far accumulated
+			 * idaw ends
+			 */
+			if (!idaw_dst) {
+				if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
+					dasd_sfree_request(cqr, startdev);
+					return ERR_PTR(-ERANGE);
+				} else
+					idaw_dst = dst;
+			}
+			if ((idaw_dst + idaw_len) != dst) {
+				dasd_sfree_request(cqr, startdev);
+				return ERR_PTR(-ERANGE);
+			}
+			part_len = min(seg_len, len_to_track_end);
+			seg_len -= part_len;
+			dst += part_len;
+			idaw_len += part_len;
+			len_to_track_end -= part_len;
+			/* collected memory area ends on an IDA_BLOCK border,
+			 * -> create an idaw
+			 * idal_create_words will handle cases where idaw_len
+			 * is larger then IDA_BLOCK_SIZE
+			 */
+			if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
+				end_idaw = 1;
+			/* We also need to end the idaw at track end */
+			if (!len_to_track_end) {
+				new_track = 1;
+				end_idaw = 1;
+			}
+			if (end_idaw) {
+				idaws = idal_create_words(idaws, idaw_dst,
+							  idaw_len);
+				idaw_dst = NULL;
+				idaw_len = 0;
+				end_idaw = 0;
+			}
+		}
+	}
+
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
+		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->startdev = startdev;
+	cqr->memdev = startdev;
+	cqr->block = block;
+	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
+	cqr->lpm = dasd_path_get_ppm(startdev);
+	cqr->retries = startdev->default_retries;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	return cqr;
+}
+
+static int prepare_itcw(struct itcw *itcw,
+			unsigned int trk, unsigned int totrk, int cmd,
+			struct dasd_device *basedev,
+			struct dasd_device *startdev,
+			unsigned int rec_on_trk, int count,
+			unsigned int blksize,
+			unsigned int total_data_size,
+			unsigned int tlf,
+			unsigned int blk_per_trk)
+{
+	struct PFX_eckd_data pfxdata;
+	struct dasd_eckd_private *basepriv, *startpriv;
+	struct DE_eckd_data *dedata;
+	struct LRE_eckd_data *lredata;
+	struct dcw *dcw;
+
+	u32 begcyl, endcyl;
+	u16 heads, beghead, endhead;
+	u8 pfx_cmd;
+
+	int rc = 0;
+	int sector = 0;
+	int dn, d;
+
+
+	/* setup prefix data */
+	basepriv = basedev->private;
+	startpriv = startdev->private;
+	dedata = &pfxdata.define_extent;
+	lredata = &pfxdata.locate_record;
+
+	memset(&pfxdata, 0, sizeof(pfxdata));
+	pfxdata.format = 1; /* PFX with LRE */
+	pfxdata.base_address = basepriv->ned->unit_addr;
+	pfxdata.base_lss = basepriv->ned->ID;
+	pfxdata.validity.define_extent = 1;
+
+	/* private uid is kept up to date, conf_data may be outdated */
+	if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
+		pfxdata.validity.verify_base = 1;
+
+	if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
+		pfxdata.validity.verify_base = 1;
+		pfxdata.validity.hyper_pav = 1;
+	}
+
+	switch (cmd) {
+	case DASD_ECKD_CCW_READ_TRACK_DATA:
+		dedata->mask.perm = 0x1;
+		dedata->attributes.operation = basepriv->attrib.operation;
+		dedata->blk_size = blksize;
+		dedata->ga_extended |= 0x42;
+		lredata->operation.orientation = 0x0;
+		lredata->operation.operation = 0x0C;
+		lredata->auxiliary.check_bytes = 0x01;
+		pfx_cmd = DASD_ECKD_CCW_PFX_READ;
+		break;
+	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
+		dedata->mask.perm = 0x02;
+		dedata->attributes.operation = basepriv->attrib.operation;
+		dedata->blk_size = blksize;
+		rc = set_timestamp(NULL, dedata, basedev);
+		dedata->ga_extended |= 0x42;
+		lredata->operation.orientation = 0x0;
+		lredata->operation.operation = 0x3F;
+		lredata->extended_operation = 0x23;
+		lredata->auxiliary.check_bytes = 0x2;
+		/*
+		 * If XRC is supported the System Time Stamp is set. The
+		 * validity of the time stamp must be reflected in the prefix
+		 * data as well.
+		 */
+		if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
+			pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
+		pfx_cmd = DASD_ECKD_CCW_PFX;
+		break;
+	case DASD_ECKD_CCW_READ_COUNT_MT:
+		dedata->mask.perm = 0x1;
+		dedata->attributes.operation = DASD_BYPASS_CACHE;
+		dedata->ga_extended |= 0x42;
+		dedata->blk_size = blksize;
+		lredata->operation.orientation = 0x2;
+		lredata->operation.operation = 0x16;
+		lredata->auxiliary.check_bytes = 0x01;
+		pfx_cmd = DASD_ECKD_CCW_PFX_READ;
+		break;
+	default:
+		DBF_DEV_EVENT(DBF_ERR, basedev,
+			      "prepare itcw, unknown opcode 0x%x", cmd);
+		BUG();
+		break;
+	}
+	if (rc)
+		return rc;
+
+	dedata->attributes.mode = 0x3;	/* ECKD */
+
+	heads = basepriv->rdc_data.trk_per_cyl;
+	begcyl = trk / heads;
+	beghead = trk % heads;
+	endcyl = totrk / heads;
+	endhead = totrk % heads;
+
+	/* check for sequential prestage - enhance cylinder range */
+	if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
+	    dedata->attributes.operation == DASD_SEQ_ACCESS) {
+
+		if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
+			endcyl += basepriv->attrib.nr_cyl;
+		else
+			endcyl = (basepriv->real_cyl - 1);
+	}
+
+	set_ch_t(&dedata->beg_ext, begcyl, beghead);
+	set_ch_t(&dedata->end_ext, endcyl, endhead);
+
+	dedata->ep_format = 0x20; /* records per track is valid */
+	dedata->ep_rec_per_track = blk_per_trk;
+
+	if (rec_on_trk) {
+		switch (basepriv->rdc_data.dev_type) {
+		case 0x3390:
+			dn = ceil_quot(blksize + 6, 232);
+			d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
+			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
+			break;
+		case 0x3380:
+			d = 7 + ceil_quot(blksize + 12, 32);
+			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
+			break;
+		}
+	}
+
+	if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
+		lredata->auxiliary.length_valid = 0;
+		lredata->auxiliary.length_scope = 0;
+		lredata->sector = 0xff;
+	} else {
+		lredata->auxiliary.length_valid = 1;
+		lredata->auxiliary.length_scope = 1;
+		lredata->sector = sector;
+	}
+	lredata->auxiliary.imbedded_ccw_valid = 1;
+	lredata->length = tlf;
+	lredata->imbedded_ccw = cmd;
+	lredata->count = count;
+	set_ch_t(&lredata->seek_addr, begcyl, beghead);
+	lredata->search_arg.cyl = lredata->seek_addr.cyl;
+	lredata->search_arg.head = lredata->seek_addr.head;
+	lredata->search_arg.record = rec_on_trk;
+
+	dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
+		     &pfxdata, sizeof(pfxdata), total_data_size);
+	return PTR_ERR_OR_ZERO(dcw);
+}
+
+static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
+					       struct dasd_device *startdev,
+					       struct dasd_block *block,
+					       struct request *req,
+					       sector_t first_rec,
+					       sector_t last_rec,
+					       sector_t first_trk,
+					       sector_t last_trk,
+					       unsigned int first_offs,
+					       unsigned int last_offs,
+					       unsigned int blk_per_trk,
+					       unsigned int blksize)
+{
+	struct dasd_ccw_req *cqr;
+	struct req_iterator iter;
+	struct bio_vec bv;
+	char *dst;
+	unsigned int trkcount, ctidaw;
+	unsigned char cmd;
+	struct dasd_device *basedev;
+	unsigned int tlf;
+	struct itcw *itcw;
+	struct tidaw *last_tidaw = NULL;
+	int itcw_op;
+	size_t itcw_size;
+	u8 tidaw_flags;
+	unsigned int seg_len, part_len, len_to_track_end;
+	unsigned char new_track;
+	sector_t recid, trkid;
+	unsigned int offs;
+	unsigned int count, count_to_trk_end;
+	int ret;
+
+	basedev = block->base;
+	if (rq_data_dir(req) == READ) {
+		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
+		itcw_op = ITCW_OP_READ;
+	} else if (rq_data_dir(req) == WRITE) {
+		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
+		itcw_op = ITCW_OP_WRITE;
+	} else
+		return ERR_PTR(-EINVAL);
+
+	/* trackbased I/O needs address all memory via TIDAWs,
+	 * not just for 64 bit addresses. This allows us to map
+	 * each segment directly to one tidaw.
+	 * In the case of write requests, additional tidaws may
+	 * be needed when a segment crosses a track boundary.
+	 */
+	trkcount = last_trk - first_trk + 1;
+	ctidaw = 0;
+	rq_for_each_segment(bv, req, iter) {
+		++ctidaw;
+	}
+	if (rq_data_dir(req) == WRITE)
+		ctidaw += (last_trk - first_trk);
+
+	/* Allocate the ccw request. */
+	itcw_size = itcw_calc_size(0, ctidaw, 0);
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
+				   blk_mq_rq_to_pdu(req));
+	if (IS_ERR(cqr))
+		return cqr;
+
+	/* transfer length factor: how many bytes to read from the last track */
+	if (first_trk == last_trk)
+		tlf = last_offs - first_offs + 1;
+	else
+		tlf = last_offs + 1;
+	tlf *= blksize;
+
+	itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
+	if (IS_ERR(itcw)) {
+		ret = -EINVAL;
+		goto out_error;
+	}
+	cqr->cpaddr = itcw_get_tcw(itcw);
+	if (prepare_itcw(itcw, first_trk, last_trk,
+			 cmd, basedev, startdev,
+			 first_offs + 1,
+			 trkcount, blksize,
+			 (last_rec - first_rec + 1) * blksize,
+			 tlf, blk_per_trk) == -EAGAIN) {
+		/* Clock not in sync and XRC is enabled.
+		 * Try again later.
+		 */
+		ret = -EAGAIN;
+		goto out_error;
+	}
+	len_to_track_end = 0;
+	/*
+	 * A tidaw can address 4k of memory, but must not cross page boundaries
+	 * We can let the block layer handle this by setting
+	 * blk_queue_segment_boundary to page boundaries and
+	 * blk_max_segment_size to page size when setting up the request queue.
+	 * For write requests, a TIDAW must not cross track boundaries, because
+	 * we have to set the CBC flag on the last tidaw for each track.
+	 */
+	if (rq_data_dir(req) == WRITE) {
+		new_track = 1;
+		recid = first_rec;
+		rq_for_each_segment(bv, req, iter) {
+			dst = page_address(bv.bv_page) + bv.bv_offset;
+			seg_len = bv.bv_len;
+			while (seg_len) {
+				if (new_track) {
+					trkid = recid;
+					offs = sector_div(trkid, blk_per_trk);
+					count_to_trk_end = blk_per_trk - offs;
+					count = min((last_rec - recid + 1),
+						    (sector_t)count_to_trk_end);
+					len_to_track_end = count * blksize;
+					recid += count;
+					new_track = 0;
+				}
+				part_len = min(seg_len, len_to_track_end);
+				seg_len -= part_len;
+				len_to_track_end -= part_len;
+				/* We need to end the tidaw at track end */
+				if (!len_to_track_end) {
+					new_track = 1;
+					tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
+				} else
+					tidaw_flags = 0;
+				last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
+							    dst, part_len);
+				if (IS_ERR(last_tidaw)) {
+					ret = -EINVAL;
+					goto out_error;
+				}
+				dst += part_len;
+			}
+		}
+	} else {
+		rq_for_each_segment(bv, req, iter) {
+			dst = page_address(bv.bv_page) + bv.bv_offset;
+			last_tidaw = itcw_add_tidaw(itcw, 0x00,
+						    dst, bv.bv_len);
+			if (IS_ERR(last_tidaw)) {
+				ret = -EINVAL;
+				goto out_error;
+			}
+		}
+	}
+	last_tidaw->flags |= TIDAW_FLAGS_LAST;
+	last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
+	itcw_finalize(itcw);
+
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
+		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->cpmode = 1;
+	cqr->startdev = startdev;
+	cqr->memdev = startdev;
+	cqr->block = block;
+	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
+	cqr->lpm = dasd_path_get_ppm(startdev);
+	cqr->retries = startdev->default_retries;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	return cqr;
+out_error:
+	dasd_sfree_request(cqr, startdev);
+	return ERR_PTR(ret);
+}
+
+static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
+					       struct dasd_block *block,
+					       struct request *req)
+{
+	int cmdrtd, cmdwtd;
+	int use_prefix;
+	int fcx_multitrack;
+	struct dasd_eckd_private *private;
+	struct dasd_device *basedev;
+	sector_t first_rec, last_rec;
+	sector_t first_trk, last_trk;
+	unsigned int first_offs, last_offs;
+	unsigned int blk_per_trk, blksize;
+	int cdlspecial;
+	unsigned int data_size;
+	struct dasd_ccw_req *cqr;
+
+	basedev = block->base;
+	private = basedev->private;
+
+	/* Calculate number of blocks/records per track. */
+	blksize = block->bp_block;
+	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+	if (blk_per_trk == 0)
+		return ERR_PTR(-EINVAL);
+	/* Calculate record id of first and last block. */
+	first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
+	first_offs = sector_div(first_trk, blk_per_trk);
+	last_rec = last_trk =
+		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+	last_offs = sector_div(last_trk, blk_per_trk);
+	cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
+
+	fcx_multitrack = private->features.feature[40] & 0x20;
+	data_size = blk_rq_bytes(req);
+	if (data_size % blksize)
+		return ERR_PTR(-EINVAL);
+	/* tpm write request add CBC data on each track boundary */
+	if (rq_data_dir(req) == WRITE)
+		data_size += (last_trk - first_trk) * 4;
+
+	/* is read track data and write track data in command mode supported? */
+	cmdrtd = private->features.feature[9] & 0x20;
+	cmdwtd = private->features.feature[12] & 0x40;
+	use_prefix = private->features.feature[8] & 0x01;
+
+	cqr = NULL;
+	if (cdlspecial || dasd_page_cache) {
+		/* do nothing, just fall through to the cmd mode single case */
+	} else if ((data_size <= private->fcx_max_data)
+		   && (fcx_multitrack || (first_trk == last_trk))) {
+		cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
+						    first_rec, last_rec,
+						    first_trk, last_trk,
+						    first_offs, last_offs,
+						    blk_per_trk, blksize);
+		if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
+		    (PTR_ERR(cqr) != -ENOMEM))
+			cqr = NULL;
+	} else if (use_prefix &&
+		   (((rq_data_dir(req) == READ) && cmdrtd) ||
+		    ((rq_data_dir(req) == WRITE) && cmdwtd))) {
+		cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
+						   first_rec, last_rec,
+						   first_trk, last_trk,
+						   first_offs, last_offs,
+						   blk_per_trk, blksize);
+		if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
+		    (PTR_ERR(cqr) != -ENOMEM))
+			cqr = NULL;
+	}
+	if (!cqr)
+		cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
+						    first_rec, last_rec,
+						    first_trk, last_trk,
+						    first_offs, last_offs,
+						    blk_per_trk, blksize);
+	return cqr;
+}
+
+static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
+						   struct dasd_block *block,
+						   struct request *req)
+{
+	sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
+	unsigned int seg_len, len_to_track_end;
+	unsigned int cidaw, cplength, datasize;
+	sector_t first_trk, last_trk, sectors;
+	struct dasd_eckd_private *base_priv;
+	struct dasd_device *basedev;
+	struct req_iterator iter;
+	struct dasd_ccw_req *cqr;
+	unsigned int first_offs;
+	unsigned int trkcount;
+	unsigned long *idaws;
+	unsigned int size;
+	unsigned char cmd;
+	struct bio_vec bv;
+	struct ccw1 *ccw;
+	int use_prefix;
+	void *data;
+	char *dst;
+
+	/*
+	 * raw track access needs to be mutiple of 64k and on 64k boundary
+	 * For read requests we can fix an incorrect alignment by padding
+	 * the request with dummy pages.
+	 */
+	start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
+	end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
+		DASD_RAW_SECTORS_PER_TRACK;
+	end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
+		DASD_RAW_SECTORS_PER_TRACK;
+	basedev = block->base;
+	if ((start_padding_sectors || end_padding_sectors) &&
+	    (rq_data_dir(req) == WRITE)) {
+		DBF_DEV_EVENT(DBF_ERR, basedev,
+			      "raw write not track aligned (%lu,%lu) req %p",
+			      start_padding_sectors, end_padding_sectors, req);
+		return ERR_PTR(-EINVAL);
+	}
+
+	first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
+	last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
+		DASD_RAW_SECTORS_PER_TRACK;
+	trkcount = last_trk - first_trk + 1;
+	first_offs = 0;
+
+	if (rq_data_dir(req) == READ)
+		cmd = DASD_ECKD_CCW_READ_TRACK;
+	else if (rq_data_dir(req) == WRITE)
+		cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
+	else
+		return ERR_PTR(-EINVAL);
+
+	/*
+	 * Raw track based I/O needs IDAWs for each page,
+	 * and not just for 64 bit addresses.
+	 */
+	cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
+
+	/*
+	 * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
+	 * of extended parameter. This is needed for write full track.
+	 */
+	base_priv = basedev->private;
+	use_prefix = base_priv->features.feature[8] & 0x01;
+	if (use_prefix) {
+		cplength = 1 + trkcount;
+		size = sizeof(struct PFX_eckd_data) + 2;
+	} else {
+		cplength = 2 + trkcount;
+		size = sizeof(struct DE_eckd_data) +
+			sizeof(struct LRE_eckd_data) + 2;
+	}
+	size = ALIGN(size, 8);
+
+	datasize = size + cidaw * sizeof(unsigned long);
+
+	/* Allocate the ccw request. */
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
+				   datasize, startdev, blk_mq_rq_to_pdu(req));
+	if (IS_ERR(cqr))
+		return cqr;
+
+	ccw = cqr->cpaddr;
+	data = cqr->data;
+
+	if (use_prefix) {
+		prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
+			   startdev, 1, first_offs + 1, trkcount, 0, 0);
+	} else {
+		define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
+		ccw[-1].flags |= CCW_FLAG_CC;
+
+		data += sizeof(struct DE_eckd_data);
+		locate_record_ext(ccw++, data, first_trk, first_offs + 1,
+				  trkcount, cmd, basedev, 0, 0);
+	}
+
+	idaws = (unsigned long *)(cqr->data + size);
+	len_to_track_end = 0;
+	if (start_padding_sectors) {
+		ccw[-1].flags |= CCW_FLAG_CC;
+		ccw->cmd_code = cmd;
+		/* maximum 3390 track size */
+		ccw->count = 57326;
+		/* 64k map to one track */
+		len_to_track_end = 65536 - start_padding_sectors * 512;
+		ccw->cda = (__u32)(addr_t)idaws;
+		ccw->flags |= CCW_FLAG_IDA;
+		ccw->flags |= CCW_FLAG_SLI;
+		ccw++;
+		for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
+			idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
+	}
+	rq_for_each_segment(bv, req, iter) {
+		dst = page_address(bv.bv_page) + bv.bv_offset;
+		seg_len = bv.bv_len;
+		if (cmd == DASD_ECKD_CCW_READ_TRACK)
+			memset(dst, 0, seg_len);
+		if (!len_to_track_end) {
+			ccw[-1].flags |= CCW_FLAG_CC;
+			ccw->cmd_code = cmd;
+			/* maximum 3390 track size */
+			ccw->count = 57326;
+			/* 64k map to one track */
+			len_to_track_end = 65536;
+			ccw->cda = (__u32)(addr_t)idaws;
+			ccw->flags |= CCW_FLAG_IDA;
+			ccw->flags |= CCW_FLAG_SLI;
+			ccw++;
+		}
+		len_to_track_end -= seg_len;
+		idaws = idal_create_words(idaws, dst, seg_len);
+	}
+	for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
+		idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
+		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->startdev = startdev;
+	cqr->memdev = startdev;
+	cqr->block = block;
+	cqr->expires = startdev->default_expires * HZ;
+	cqr->lpm = dasd_path_get_ppm(startdev);
+	cqr->retries = startdev->default_retries;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+
+	return cqr;
+}
+
+
+static int
+dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+{
+	struct dasd_eckd_private *private;
+	struct ccw1 *ccw;
+	struct req_iterator iter;
+	struct bio_vec bv;
+	char *dst, *cda;
+	unsigned int blksize, blk_per_trk, off;
+	sector_t recid;
+	int status;
+
+	if (!dasd_page_cache)
+		goto out;
+	private = cqr->block->base->private;
+	blksize = cqr->block->bp_block;
+	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+	recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
+	ccw = cqr->cpaddr;
+	/* Skip over define extent & locate record. */
+	ccw++;
+	if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
+		ccw++;
+	rq_for_each_segment(bv, req, iter) {
+		dst = page_address(bv.bv_page) + bv.bv_offset;
+		for (off = 0; off < bv.bv_len; off += blksize) {
+			/* Skip locate record. */
+			if (private->uses_cdl && recid <= 2*blk_per_trk)
+				ccw++;
+			if (dst) {
+				if (ccw->flags & CCW_FLAG_IDA)
+					cda = *((char **)((addr_t) ccw->cda));
+				else
+					cda = (char *)((addr_t) ccw->cda);
+				if (dst != cda) {
+					if (rq_data_dir(req) == READ)
+						memcpy(dst, cda, bv.bv_len);
+					kmem_cache_free(dasd_page_cache,
+					    (void *)((addr_t)cda & PAGE_MASK));
+				}
+				dst = NULL;
+			}
+			ccw++;
+			recid++;
+		}
+	}
+out:
+	status = cqr->status == DASD_CQR_DONE;
+	dasd_sfree_request(cqr, cqr->memdev);
+	return status;
+}
+
+/*
+ * Modify ccw/tcw in cqr so it can be started on a base device.
+ *
+ * Note that this is not enough to restart the cqr!
+ * Either reset cqr->startdev as well (summary unit check handling)
+ * or restart via separate cqr (as in ERP handling).
+ */
+void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
+{
+	struct ccw1 *ccw;
+	struct PFX_eckd_data *pfxdata;
+	struct tcw *tcw;
+	struct tccb *tccb;
+	struct dcw *dcw;
+
+	if (cqr->cpmode == 1) {
+		tcw = cqr->cpaddr;
+		tccb = tcw_get_tccb(tcw);
+		dcw = (struct dcw *)&tccb->tca[0];
+		pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
+		pfxdata->validity.verify_base = 0;
+		pfxdata->validity.hyper_pav = 0;
+	} else {
+		ccw = cqr->cpaddr;
+		pfxdata = cqr->data;
+		if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
+			pfxdata->validity.verify_base = 0;
+			pfxdata->validity.hyper_pav = 0;
+		}
+	}
+}
+
+#define DASD_ECKD_CHANQ_MAX_SIZE 4
+
+static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
+						     struct dasd_block *block,
+						     struct request *req)
+{
+	struct dasd_eckd_private *private;
+	struct dasd_device *startdev;
+	unsigned long flags;
+	struct dasd_ccw_req *cqr;
+
+	startdev = dasd_alias_get_start_dev(base);
+	if (!startdev)
+		startdev = base;
+	private = startdev->private;
+	if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
+		return ERR_PTR(-EBUSY);
+
+	spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
+	private->count++;
+	if ((base->features & DASD_FEATURE_USERAW))
+		cqr = dasd_eckd_build_cp_raw(startdev, block, req);
+	else
+		cqr = dasd_eckd_build_cp(startdev, block, req);
+	if (IS_ERR(cqr))
+		private->count--;
+	spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
+	return cqr;
+}
+
+static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
+				   struct request *req)
+{
+	struct dasd_eckd_private *private;
+	unsigned long flags;
+
+	spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
+	private = cqr->memdev->private;
+	private->count--;
+	spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
+	return dasd_eckd_free_cp(cqr, req);
+}
+
+static int
+dasd_eckd_fill_info(struct dasd_device * device,
+		    struct dasd_information2_t * info)
+{
+	struct dasd_eckd_private *private = device->private;
+
+	info->label_block = 2;
+	info->FBA_layout = private->uses_cdl ? 0 : 1;
+	info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
+	info->characteristics_size = sizeof(private->rdc_data);
+	memcpy(info->characteristics, &private->rdc_data,
+	       sizeof(private->rdc_data));
+	info->confdata_size = min((unsigned long)private->conf_len,
+				  sizeof(info->configuration_data));
+	memcpy(info->configuration_data, private->conf_data,
+	       info->confdata_size);
+	return 0;
+}
+
+/*
+ * SECTION: ioctl functions for eckd devices.
+ */
+
+/*
+ * Release device ioctl.
+ * Buils a channel programm to releases a prior reserved
+ * (see dasd_eckd_reserve) device.
+ */
+static int
+dasd_eckd_release(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+	int rc;
+	struct ccw1 *ccw;
+	int useglobal;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	useglobal = 0;
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
+	if (IS_ERR(cqr)) {
+		mutex_lock(&dasd_reserve_mutex);
+		useglobal = 1;
+		cqr = &dasd_reserve_req->cqr;
+		memset(cqr, 0, sizeof(*cqr));
+		memset(&dasd_reserve_req->ccw, 0,
+		       sizeof(dasd_reserve_req->ccw));
+		cqr->cpaddr = &dasd_reserve_req->ccw;
+		cqr->data = &dasd_reserve_req->data;
+		cqr->magic = DASD_ECKD_MAGIC;
+	}
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
+	ccw->flags |= CCW_FLAG_SLI;
+	ccw->count = 32;
+	ccw->cda = (__u32)(addr_t) cqr->data;
+	cqr->startdev = device;
+	cqr->memdev = device;
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->retries = 2;	/* set retry counter to enable basic ERP */
+	cqr->expires = 2 * HZ;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+
+	rc = dasd_sleep_on_immediatly(cqr);
+	if (!rc)
+		clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
+
+	if (useglobal)
+		mutex_unlock(&dasd_reserve_mutex);
+	else
+		dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+/*
+ * Reserve device ioctl.
+ * Options are set to 'synchronous wait for interrupt' and
+ * 'timeout the request'. This leads to a terminate IO if
+ * the interrupt is outstanding for a certain time.
+ */
+static int
+dasd_eckd_reserve(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+	int rc;
+	struct ccw1 *ccw;
+	int useglobal;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	useglobal = 0;
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
+	if (IS_ERR(cqr)) {
+		mutex_lock(&dasd_reserve_mutex);
+		useglobal = 1;
+		cqr = &dasd_reserve_req->cqr;
+		memset(cqr, 0, sizeof(*cqr));
+		memset(&dasd_reserve_req->ccw, 0,
+		       sizeof(dasd_reserve_req->ccw));
+		cqr->cpaddr = &dasd_reserve_req->ccw;
+		cqr->data = &dasd_reserve_req->data;
+		cqr->magic = DASD_ECKD_MAGIC;
+	}
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
+	ccw->flags |= CCW_FLAG_SLI;
+	ccw->count = 32;
+	ccw->cda = (__u32)(addr_t) cqr->data;
+	cqr->startdev = device;
+	cqr->memdev = device;
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->retries = 2;	/* set retry counter to enable basic ERP */
+	cqr->expires = 2 * HZ;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+
+	rc = dasd_sleep_on_immediatly(cqr);
+	if (!rc)
+		set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
+
+	if (useglobal)
+		mutex_unlock(&dasd_reserve_mutex);
+	else
+		dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+/*
+ * Steal lock ioctl - unconditional reserve device.
+ * Buils a channel programm to break a device's reservation.
+ * (unconditional reserve)
+ */
+static int
+dasd_eckd_steal_lock(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+	int rc;
+	struct ccw1 *ccw;
+	int useglobal;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	useglobal = 0;
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
+	if (IS_ERR(cqr)) {
+		mutex_lock(&dasd_reserve_mutex);
+		useglobal = 1;
+		cqr = &dasd_reserve_req->cqr;
+		memset(cqr, 0, sizeof(*cqr));
+		memset(&dasd_reserve_req->ccw, 0,
+		       sizeof(dasd_reserve_req->ccw));
+		cqr->cpaddr = &dasd_reserve_req->ccw;
+		cqr->data = &dasd_reserve_req->data;
+		cqr->magic = DASD_ECKD_MAGIC;
+	}
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_SLCK;
+	ccw->flags |= CCW_FLAG_SLI;
+	ccw->count = 32;
+	ccw->cda = (__u32)(addr_t) cqr->data;
+	cqr->startdev = device;
+	cqr->memdev = device;
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->retries = 2;	/* set retry counter to enable basic ERP */
+	cqr->expires = 2 * HZ;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+
+	rc = dasd_sleep_on_immediatly(cqr);
+	if (!rc)
+		set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
+
+	if (useglobal)
+		mutex_unlock(&dasd_reserve_mutex);
+	else
+		dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+/*
+ * SNID - Sense Path Group ID
+ * This ioctl may be used in situations where I/O is stalled due to
+ * a reserve, so if the normal dasd_smalloc_request fails, we use the
+ * preallocated dasd_reserve_req.
+ */
+static int dasd_eckd_snid(struct dasd_device *device,
+			  void __user *argp)
+{
+	struct dasd_ccw_req *cqr;
+	int rc;
+	struct ccw1 *ccw;
+	int useglobal;
+	struct dasd_snid_ioctl_data usrparm;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
+		return -EFAULT;
+
+	useglobal = 0;
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
+				   sizeof(struct dasd_snid_data), device,
+				   NULL);
+	if (IS_ERR(cqr)) {
+		mutex_lock(&dasd_reserve_mutex);
+		useglobal = 1;
+		cqr = &dasd_reserve_req->cqr;
+		memset(cqr, 0, sizeof(*cqr));
+		memset(&dasd_reserve_req->ccw, 0,
+		       sizeof(dasd_reserve_req->ccw));
+		cqr->cpaddr = &dasd_reserve_req->ccw;
+		cqr->data = &dasd_reserve_req->data;
+		cqr->magic = DASD_ECKD_MAGIC;
+	}
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_SNID;
+	ccw->flags |= CCW_FLAG_SLI;
+	ccw->count = 12;
+	ccw->cda = (__u32)(addr_t) cqr->data;
+	cqr->startdev = device;
+	cqr->memdev = device;
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
+	cqr->retries = 5;
+	cqr->expires = 10 * HZ;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	cqr->lpm = usrparm.path_mask;
+
+	rc = dasd_sleep_on_immediatly(cqr);
+	/* verify that I/O processing didn't modify the path mask */
+	if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
+		rc = -EIO;
+	if (!rc) {
+		usrparm.data = *((struct dasd_snid_data *)cqr->data);
+		if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
+			rc = -EFAULT;
+	}
+
+	if (useglobal)
+		mutex_unlock(&dasd_reserve_mutex);
+	else
+		dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+/*
+ * Read performance statistics
+ */
+static int
+dasd_eckd_performance(struct dasd_device *device, void __user *argp)
+{
+	struct dasd_psf_prssd_data *prssdp;
+	struct dasd_rssd_perf_stats_t *stats;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	int rc;
+
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
+				   (sizeof(struct dasd_psf_prssd_data) +
+				    sizeof(struct dasd_rssd_perf_stats_t)),
+				   device, NULL);
+	if (IS_ERR(cqr)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "Could not allocate initialization request");
+		return PTR_ERR(cqr);
+	}
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->retries = 0;
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	cqr->expires = 10 * HZ;
+
+	/* Prepare for Read Subsystem Data */
+	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+	prssdp->order = PSF_ORDER_PRSSD;
+	prssdp->suborder = 0x01;	/* Performance Statistics */
+	prssdp->varies[1] = 0x01;	/* Perf Statistics for the Subsystem */
+
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_PSF;
+	ccw->count = sizeof(struct dasd_psf_prssd_data);
+	ccw->flags |= CCW_FLAG_CC;
+	ccw->cda = (__u32)(addr_t) prssdp;
+
+	/* Read Subsystem Data - Performance Statistics */
+	stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
+	memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
+
+	ccw++;
+	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+	ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
+	ccw->cda = (__u32)(addr_t) stats;
+
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	rc = dasd_sleep_on(cqr);
+	if (rc == 0) {
+		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+		stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
+		if (copy_to_user(argp, stats,
+				 sizeof(struct dasd_rssd_perf_stats_t)))
+			rc = -EFAULT;
+	}
+	dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+/*
+ * Get attributes (cache operations)
+ * Returnes the cache attributes used in Define Extend (DE).
+ */
+static int
+dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
+{
+	struct dasd_eckd_private *private = device->private;
+	struct attrib_data_t attrib = private->attrib;
+	int rc;
+
+        if (!capable(CAP_SYS_ADMIN))
+                return -EACCES;
+	if (!argp)
+                return -EINVAL;
+
+	rc = 0;
+	if (copy_to_user(argp, (long *) &attrib,
+			 sizeof(struct attrib_data_t)))
+		rc = -EFAULT;
+
+	return rc;
+}
+
+/*
+ * Set attributes (cache operations)
+ * Stores the attributes for cache operation to be used in Define Extend (DE).
+ */
+static int
+dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
+{
+	struct dasd_eckd_private *private = device->private;
+	struct attrib_data_t attrib;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+	if (!argp)
+		return -EINVAL;
+
+	if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
+		return -EFAULT;
+	private->attrib = attrib;
+
+	dev_info(&device->cdev->dev,
+		 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
+		 private->attrib.operation, private->attrib.nr_cyl);
+	return 0;
+}
+
+/*
+ * Issue syscall I/O to EMC Symmetrix array.
+ * CCWs are PSF and RSSD
+ */
+static int dasd_symm_io(struct dasd_device *device, void __user *argp)
+{
+	struct dasd_symmio_parms usrparm;
+	char *psf_data, *rssd_result;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	char psf0, psf1;
+	int rc;
+
+	if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
+		return -EACCES;
+	psf0 = psf1 = 0;
+
+	/* Copy parms from caller */
+	rc = -EFAULT;
+	if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
+		goto out;
+	if (is_compat_task()) {
+		/* Make sure pointers are sane even on 31 bit. */
+		rc = -EINVAL;
+		if ((usrparm.psf_data >> 32) != 0)
+			goto out;
+		if ((usrparm.rssd_result >> 32) != 0)
+			goto out;
+		usrparm.psf_data &= 0x7fffffffULL;
+		usrparm.rssd_result &= 0x7fffffffULL;
+	}
+	/* alloc I/O data area */
+	psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
+	rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
+	if (!psf_data || !rssd_result) {
+		rc = -ENOMEM;
+		goto out_free;
+	}
+
+	/* get syscall header from user space */
+	rc = -EFAULT;
+	if (copy_from_user(psf_data,
+			   (void __user *)(unsigned long) usrparm.psf_data,
+			   usrparm.psf_data_len))
+		goto out_free;
+	psf0 = psf_data[0];
+	psf1 = psf_data[1];
+
+	/* setup CCWs for PSF + RSSD */
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
+	if (IS_ERR(cqr)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			"Could not allocate initialization request");
+		rc = PTR_ERR(cqr);
+		goto out_free;
+	}
+
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->retries = 3;
+	cqr->expires = 10 * HZ;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+
+	/* Build the ccws */
+	ccw = cqr->cpaddr;
+
+	/* PSF ccw */
+	ccw->cmd_code = DASD_ECKD_CCW_PSF;
+	ccw->count = usrparm.psf_data_len;
+	ccw->flags |= CCW_FLAG_CC;
+	ccw->cda = (__u32)(addr_t) psf_data;
+
+	ccw++;
+
+	/* RSSD ccw  */
+	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+	ccw->count = usrparm.rssd_result_len;
+	ccw->flags = CCW_FLAG_SLI ;
+	ccw->cda = (__u32)(addr_t) rssd_result;
+
+	rc = dasd_sleep_on(cqr);
+	if (rc)
+		goto out_sfree;
+
+	rc = -EFAULT;
+	if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
+			   rssd_result, usrparm.rssd_result_len))
+		goto out_sfree;
+	rc = 0;
+
+out_sfree:
+	dasd_sfree_request(cqr, cqr->memdev);
+out_free:
+	kfree(rssd_result);
+	kfree(psf_data);
+out:
+	DBF_DEV_EVENT(DBF_WARNING, device,
+		      "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
+		      (int) psf0, (int) psf1, rc);
+	return rc;
+}
+
+static int
+dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
+{
+	struct dasd_device *device = block->base;
+
+	switch (cmd) {
+	case BIODASDGATTR:
+		return dasd_eckd_get_attrib(device, argp);
+	case BIODASDSATTR:
+		return dasd_eckd_set_attrib(device, argp);
+	case BIODASDPSRD:
+		return dasd_eckd_performance(device, argp);
+	case BIODASDRLSE:
+		return dasd_eckd_release(device);
+	case BIODASDRSRV:
+		return dasd_eckd_reserve(device);
+	case BIODASDSLCK:
+		return dasd_eckd_steal_lock(device);
+	case BIODASDSNID:
+		return dasd_eckd_snid(device, argp);
+	case BIODASDSYMMIO:
+		return dasd_symm_io(device, argp);
+	default:
+		return -ENOTTY;
+	}
+}
+
+/*
+ * Dump the range of CCWs into 'page' buffer
+ * and return number of printed chars.
+ */
+static int
+dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
+{
+	int len, count;
+	char *datap;
+
+	len = 0;
+	while (from <= to) {
+		len += sprintf(page + len, PRINTK_HEADER
+			       " CCW %p: %08X %08X DAT:",
+			       from, ((int *) from)[0], ((int *) from)[1]);
+
+		/* get pointer to data (consider IDALs) */
+		if (from->flags & CCW_FLAG_IDA)
+			datap = (char *) *((addr_t *) (addr_t) from->cda);
+		else
+			datap = (char *) ((addr_t) from->cda);
+
+		/* dump data (max 32 bytes) */
+		for (count = 0; count < from->count && count < 32; count++) {
+			if (count % 8 == 0) len += sprintf(page + len, " ");
+			if (count % 4 == 0) len += sprintf(page + len, " ");
+			len += sprintf(page + len, "%02x", datap[count]);
+		}
+		len += sprintf(page + len, "\n");
+		from++;
+	}
+	return len;
+}
+
+static void
+dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
+			 char *reason)
+{
+	u64 *sense;
+	u64 *stat;
+
+	sense = (u64 *) dasd_get_sense(irb);
+	stat = (u64 *) &irb->scsw;
+	if (sense) {
+		DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
+			      "%016llx %016llx %016llx %016llx",
+			      reason, *stat, *((u32 *) (stat + 1)),
+			      sense[0], sense[1], sense[2], sense[3]);
+	} else {
+		DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
+			      reason, *stat, *((u32 *) (stat + 1)),
+			      "NO VALID SENSE");
+	}
+}
+
+/*
+ * Print sense data and related channel program.
+ * Parts are printed because printk buffer is only 1024 bytes.
+ */
+static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
+				 struct dasd_ccw_req *req, struct irb *irb)
+{
+	char *page;
+	struct ccw1 *first, *last, *fail, *from, *to;
+	int len, sl, sct;
+
+	page = (char *) get_zeroed_page(GFP_ATOMIC);
+	if (page == NULL) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "No memory to dump sense data\n");
+		return;
+	}
+	/* dump the sense data */
+	len = sprintf(page, PRINTK_HEADER
+		      " I/O status report for device %s:\n",
+		      dev_name(&device->cdev->dev));
+	len += sprintf(page + len, PRINTK_HEADER
+		       " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
+		       "CS:%02X RC:%d\n",
+		       req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
+		       scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
+		       scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
+		       req ? req->intrc : 0);
+	len += sprintf(page + len, PRINTK_HEADER
+		       " device %s: Failing CCW: %p\n",
+		       dev_name(&device->cdev->dev),
+		       (void *) (addr_t) irb->scsw.cmd.cpa);
+	if (irb->esw.esw0.erw.cons) {
+		for (sl = 0; sl < 4; sl++) {
+			len += sprintf(page + len, PRINTK_HEADER
+				       " Sense(hex) %2d-%2d:",
+				       (8 * sl), ((8 * sl) + 7));
+
+			for (sct = 0; sct < 8; sct++) {
+				len += sprintf(page + len, " %02x",
+					       irb->ecw[8 * sl + sct]);
+			}
+			len += sprintf(page + len, "\n");
+		}
+
+		if (irb->ecw[27] & DASD_SENSE_BIT_0) {
+			/* 24 Byte Sense Data */
+			sprintf(page + len, PRINTK_HEADER
+				" 24 Byte: %x MSG %x, "
+				"%s MSGb to SYSOP\n",
+				irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
+				irb->ecw[1] & 0x10 ? "" : "no");
+		} else {
+			/* 32 Byte Sense Data */
+			sprintf(page + len, PRINTK_HEADER
+				" 32 Byte: Format: %x "
+				"Exception class %x\n",
+				irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
+		}
+	} else {
+		sprintf(page + len, PRINTK_HEADER
+			" SORRY - NO VALID SENSE AVAILABLE\n");
+	}
+	printk(KERN_ERR "%s", page);
+
+	if (req) {
+		/* req == NULL for unsolicited interrupts */
+		/* dump the Channel Program (max 140 Bytes per line) */
+		/* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
+		first = req->cpaddr;
+		for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
+		to = min(first + 6, last);
+		len = sprintf(page, PRINTK_HEADER
+			      " Related CP in req: %p\n", req);
+		dasd_eckd_dump_ccw_range(first, to, page + len);
+		printk(KERN_ERR "%s", page);
+
+		/* print failing CCW area (maximum 4) */
+		/* scsw->cda is either valid or zero  */
+		len = 0;
+		from = ++to;
+		fail = (struct ccw1 *)(addr_t)
+				irb->scsw.cmd.cpa; /* failing CCW */
+		if (from <  fail - 2) {
+			from = fail - 2;     /* there is a gap - print header */
+			len += sprintf(page, PRINTK_HEADER "......\n");
+		}
+		to = min(fail + 1, last);
+		len += dasd_eckd_dump_ccw_range(from, to, page + len);
+
+		/* print last CCWs (maximum 2) */
+		from = max(from, ++to);
+		if (from < last - 1) {
+			from = last - 1;     /* there is a gap - print header */
+			len += sprintf(page + len, PRINTK_HEADER "......\n");
+		}
+		len += dasd_eckd_dump_ccw_range(from, last, page + len);
+		if (len > 0)
+			printk(KERN_ERR "%s", page);
+	}
+	free_page((unsigned long) page);
+}
+
+
+/*
+ * Print sense data from a tcw.
+ */
+static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
+				 struct dasd_ccw_req *req, struct irb *irb)
+{
+	char *page;
+	int len, sl, sct, residual;
+	struct tsb *tsb;
+	u8 *sense, *rcq;
+
+	page = (char *) get_zeroed_page(GFP_ATOMIC);
+	if (page == NULL) {
+		DBF_DEV_EVENT(DBF_WARNING, device, " %s",
+			    "No memory to dump sense data");
+		return;
+	}
+	/* dump the sense data */
+	len = sprintf(page, PRINTK_HEADER
+		      " I/O status report for device %s:\n",
+		      dev_name(&device->cdev->dev));
+	len += sprintf(page + len, PRINTK_HEADER
+		       " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
+		       "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
+		       req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
+		       scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
+		       scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
+		       irb->scsw.tm.fcxs,
+		       (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
+		       req ? req->intrc : 0);
+	len += sprintf(page + len, PRINTK_HEADER
+		       " device %s: Failing TCW: %p\n",
+		       dev_name(&device->cdev->dev),
+		       (void *) (addr_t) irb->scsw.tm.tcw);
+
+	tsb = NULL;
+	sense = NULL;
+	if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
+		tsb = tcw_get_tsb(
+			(struct tcw *)(unsigned long)irb->scsw.tm.tcw);
+
+	if (tsb) {
+		len += sprintf(page + len, PRINTK_HEADER
+			       " tsb->length %d\n", tsb->length);
+		len += sprintf(page + len, PRINTK_HEADER
+			       " tsb->flags %x\n", tsb->flags);
+		len += sprintf(page + len, PRINTK_HEADER
+			       " tsb->dcw_offset %d\n", tsb->dcw_offset);
+		len += sprintf(page + len, PRINTK_HEADER
+			       " tsb->count %d\n", tsb->count);
+		residual = tsb->count - 28;
+		len += sprintf(page + len, PRINTK_HEADER
+			       " residual %d\n", residual);
+
+		switch (tsb->flags & 0x07) {
+		case 1:	/* tsa_iostat */
+			len += sprintf(page + len, PRINTK_HEADER
+			       " tsb->tsa.iostat.dev_time %d\n",
+				       tsb->tsa.iostat.dev_time);
+			len += sprintf(page + len, PRINTK_HEADER
+			       " tsb->tsa.iostat.def_time %d\n",
+				       tsb->tsa.iostat.def_time);
+			len += sprintf(page + len, PRINTK_HEADER
+			       " tsb->tsa.iostat.queue_time %d\n",
+				       tsb->tsa.iostat.queue_time);
+			len += sprintf(page + len, PRINTK_HEADER
+			       " tsb->tsa.iostat.dev_busy_time %d\n",
+				       tsb->tsa.iostat.dev_busy_time);
+			len += sprintf(page + len, PRINTK_HEADER
+			       " tsb->tsa.iostat.dev_act_time %d\n",
+				       tsb->tsa.iostat.dev_act_time);
+			sense = tsb->tsa.iostat.sense;
+			break;
+		case 2: /* ts_ddpc */
+			len += sprintf(page + len, PRINTK_HEADER
+			       " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
+			for (sl = 0; sl < 2; sl++) {
+				len += sprintf(page + len, PRINTK_HEADER
+					       " tsb->tsa.ddpc.rcq %2d-%2d: ",
+					       (8 * sl), ((8 * sl) + 7));
+				rcq = tsb->tsa.ddpc.rcq;
+				for (sct = 0; sct < 8; sct++) {
+					len += sprintf(page + len, " %02x",
+						       rcq[8 * sl + sct]);
+				}
+				len += sprintf(page + len, "\n");
+			}
+			sense = tsb->tsa.ddpc.sense;
+			break;
+		case 3: /* tsa_intrg */
+			len += sprintf(page + len, PRINTK_HEADER
+				      " tsb->tsa.intrg.: not supported yet\n");
+			break;
+		}
+
+		if (sense) {
+			for (sl = 0; sl < 4; sl++) {
+				len += sprintf(page + len, PRINTK_HEADER
+					       " Sense(hex) %2d-%2d:",
+					       (8 * sl), ((8 * sl) + 7));
+				for (sct = 0; sct < 8; sct++) {
+					len += sprintf(page + len, " %02x",
+						       sense[8 * sl + sct]);
+				}
+				len += sprintf(page + len, "\n");
+			}
+
+			if (sense[27] & DASD_SENSE_BIT_0) {
+				/* 24 Byte Sense Data */
+				sprintf(page + len, PRINTK_HEADER
+					" 24 Byte: %x MSG %x, "
+					"%s MSGb to SYSOP\n",
+					sense[7] >> 4, sense[7] & 0x0f,
+					sense[1] & 0x10 ? "" : "no");
+			} else {
+				/* 32 Byte Sense Data */
+				sprintf(page + len, PRINTK_HEADER
+					" 32 Byte: Format: %x "
+					"Exception class %x\n",
+					sense[6] & 0x0f, sense[22] >> 4);
+			}
+		} else {
+			sprintf(page + len, PRINTK_HEADER
+				" SORRY - NO VALID SENSE AVAILABLE\n");
+		}
+	} else {
+		sprintf(page + len, PRINTK_HEADER
+			" SORRY - NO TSB DATA AVAILABLE\n");
+	}
+	printk(KERN_ERR "%s", page);
+	free_page((unsigned long) page);
+}
+
+static void dasd_eckd_dump_sense(struct dasd_device *device,
+				 struct dasd_ccw_req *req, struct irb *irb)
+{
+	u8 *sense = dasd_get_sense(irb);
+
+	if (scsw_is_tm(&irb->scsw)) {
+		/*
+		 * In some cases the 'File Protected' or 'Incorrect Length'
+		 * error might be expected and log messages shouldn't be written
+		 * then. Check if the according suppress bit is set.
+		 */
+		if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
+		    test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
+			return;
+		if (scsw_cstat(&irb->scsw) == 0x40 &&
+		    test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
+			return;
+
+		dasd_eckd_dump_sense_tcw(device, req, irb);
+	} else {
+		/*
+		 * In some cases the 'Command Reject' or 'No Record Found'
+		 * error might be expected and log messages shouldn't be
+		 * written then. Check if the according suppress bit is set.
+		 */
+		if (sense && sense[0] & SNS0_CMD_REJECT &&
+		    test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
+			return;
+
+		if (sense && sense[1] & SNS1_NO_REC_FOUND &&
+		    test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
+			return;
+
+		dasd_eckd_dump_sense_ccw(device, req, irb);
+	}
+}
+
+static int dasd_eckd_pm_freeze(struct dasd_device *device)
+{
+	/*
+	 * the device should be disconnected from our LCU structure
+	 * on restore we will reconnect it and reread LCU specific
+	 * information like PAV support that might have changed
+	 */
+	dasd_alias_remove_device(device);
+	dasd_alias_disconnect_device_from_lcu(device);
+
+	return 0;
+}
+
+static int dasd_eckd_restore_device(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+	struct dasd_eckd_characteristics temp_rdc_data;
+	int rc;
+	struct dasd_uid temp_uid;
+	unsigned long flags;
+	unsigned long cqr_flags = 0;
+
+	/* Read Configuration Data */
+	rc = dasd_eckd_read_conf(device);
+	if (rc) {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+				"Read configuration data failed, rc=%d", rc);
+		goto out_err;
+	}
+
+	dasd_eckd_get_uid(device, &temp_uid);
+	/* Generate device unique id */
+	rc = dasd_eckd_generate_uid(device);
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
+		dev_err(&device->cdev->dev, "The UID of the DASD has "
+			"changed\n");
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	if (rc)
+		goto out_err;
+
+	/* register lcu with alias handling, enable PAV if this is a new lcu */
+	rc = dasd_alias_make_device_known_to_lcu(device);
+	if (rc)
+		goto out_err;
+
+	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags);
+	dasd_eckd_validate_server(device, cqr_flags);
+
+	/* RE-Read Configuration Data */
+	rc = dasd_eckd_read_conf(device);
+	if (rc) {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+			"Read configuration data failed, rc=%d", rc);
+		goto out_err2;
+	}
+
+	/* Read Feature Codes */
+	dasd_eckd_read_features(device);
+
+	/* Read Device Characteristics */
+	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
+					 &temp_rdc_data, 64);
+	if (rc) {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+				"Read device characteristic failed, rc=%d", rc);
+		goto out_err2;
+	}
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+	/* add device to alias management */
+	dasd_alias_add_device(device);
+
+	return 0;
+
+out_err2:
+	dasd_alias_disconnect_device_from_lcu(device);
+out_err:
+	return -1;
+}
+
+static int dasd_eckd_reload_device(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+	int rc, old_base;
+	char print_uid[60];
+	struct dasd_uid uid;
+	unsigned long flags;
+
+	/*
+	 * remove device from alias handling to prevent new requests
+	 * from being scheduled on the wrong alias device
+	 */
+	dasd_alias_remove_device(device);
+
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	old_base = private->uid.base_unit_addr;
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+	/* Read Configuration Data */
+	rc = dasd_eckd_read_conf(device);
+	if (rc)
+		goto out_err;
+
+	rc = dasd_eckd_generate_uid(device);
+	if (rc)
+		goto out_err;
+	/*
+	 * update unit address configuration and
+	 * add device to alias management
+	 */
+	dasd_alias_update_add_device(device);
+
+	dasd_eckd_get_uid(device, &uid);
+
+	if (old_base != uid.base_unit_addr) {
+		if (strlen(uid.vduit) > 0)
+			snprintf(print_uid, sizeof(print_uid),
+				 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
+				 uid.ssid, uid.base_unit_addr, uid.vduit);
+		else
+			snprintf(print_uid, sizeof(print_uid),
+				 "%s.%s.%04x.%02x", uid.vendor, uid.serial,
+				 uid.ssid, uid.base_unit_addr);
+
+		dev_info(&device->cdev->dev,
+			 "An Alias device was reassigned to a new base device "
+			 "with UID: %s\n", print_uid);
+	}
+	return 0;
+
+out_err:
+	return -1;
+}
+
+static int dasd_eckd_read_message_buffer(struct dasd_device *device,
+					 struct dasd_rssd_messages *messages,
+					 __u8 lpum)
+{
+	struct dasd_rssd_messages *message_buf;
+	struct dasd_psf_prssd_data *prssdp;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	int rc;
+
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
+				   (sizeof(struct dasd_psf_prssd_data) +
+				    sizeof(struct dasd_rssd_messages)),
+				   device, NULL);
+	if (IS_ERR(cqr)) {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+				"Could not allocate read message buffer request");
+		return PTR_ERR(cqr);
+	}
+
+	cqr->lpm = lpum;
+retry:
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->block = NULL;
+	cqr->expires = 10 * HZ;
+	set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
+	/* dasd_sleep_on_immediatly does not do complex error
+	 * recovery so clear erp flag and set retry counter to
+	 * do basic erp */
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	cqr->retries = 256;
+
+	/* Prepare for Read Subsystem Data */
+	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+	prssdp->order = PSF_ORDER_PRSSD;
+	prssdp->suborder = 0x03;	/* Message Buffer */
+	/* all other bytes of prssdp must be zero */
+
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_PSF;
+	ccw->count = sizeof(struct dasd_psf_prssd_data);
+	ccw->flags |= CCW_FLAG_CC;
+	ccw->flags |= CCW_FLAG_SLI;
+	ccw->cda = (__u32)(addr_t) prssdp;
+
+	/* Read Subsystem Data - message buffer */
+	message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
+	memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
+
+	ccw++;
+	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+	ccw->count = sizeof(struct dasd_rssd_messages);
+	ccw->flags |= CCW_FLAG_SLI;
+	ccw->cda = (__u32)(addr_t) message_buf;
+
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	rc = dasd_sleep_on_immediatly(cqr);
+	if (rc == 0) {
+		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+		message_buf = (struct dasd_rssd_messages *)
+			(prssdp + 1);
+		memcpy(messages, message_buf,
+		       sizeof(struct dasd_rssd_messages));
+	} else if (cqr->lpm) {
+		/*
+		 * on z/VM we might not be able to do I/O on the requested path
+		 * but instead we get the required information on any path
+		 * so retry with open path mask
+		 */
+		cqr->lpm = 0;
+		goto retry;
+	} else
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+				"Reading messages failed with rc=%d\n"
+				, rc);
+	dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+static int dasd_eckd_query_host_access(struct dasd_device *device,
+				       struct dasd_psf_query_host_access *data)
+{
+	struct dasd_eckd_private *private = device->private;
+	struct dasd_psf_query_host_access *host_access;
+	struct dasd_psf_prssd_data *prssdp;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	int rc;
+
+	/* not available for HYPER PAV alias devices */
+	if (!device->block && private->lcu->pav == HYPER_PAV)
+		return -EOPNOTSUPP;
+
+	/* may not be supported by the storage server */
+	if (!(private->features.feature[14] & 0x80))
+		return -EOPNOTSUPP;
+
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
+				   sizeof(struct dasd_psf_prssd_data) + 1,
+				   device, NULL);
+	if (IS_ERR(cqr)) {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+				"Could not allocate read message buffer request");
+		return PTR_ERR(cqr);
+	}
+	host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
+	if (!host_access) {
+		dasd_sfree_request(cqr, device);
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+				"Could not allocate host_access buffer");
+		return -ENOMEM;
+	}
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->block = NULL;
+	cqr->retries = 256;
+	cqr->expires = 10 * HZ;
+
+	/* Prepare for Read Subsystem Data */
+	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+	prssdp->order = PSF_ORDER_PRSSD;
+	prssdp->suborder = PSF_SUBORDER_QHA;	/* query host access */
+	/* LSS and Volume that will be queried */
+	prssdp->lss = private->ned->ID;
+	prssdp->volume = private->ned->unit_addr;
+	/* all other bytes of prssdp must be zero */
+
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_PSF;
+	ccw->count = sizeof(struct dasd_psf_prssd_data);
+	ccw->flags |= CCW_FLAG_CC;
+	ccw->flags |= CCW_FLAG_SLI;
+	ccw->cda = (__u32)(addr_t) prssdp;
+
+	/* Read Subsystem Data - query host access */
+	ccw++;
+	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+	ccw->count = sizeof(struct dasd_psf_query_host_access);
+	ccw->flags |= CCW_FLAG_SLI;
+	ccw->cda = (__u32)(addr_t) host_access;
+
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	/* the command might not be supported, suppress error message */
+	__set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
+	rc = dasd_sleep_on_interruptible(cqr);
+	if (rc == 0) {
+		*data = *host_access;
+	} else {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+				"Reading host access data failed with rc=%d\n",
+				rc);
+		rc = -EOPNOTSUPP;
+	}
+
+	dasd_sfree_request(cqr, cqr->memdev);
+	kfree(host_access);
+	return rc;
+}
+/*
+ * return number of grouped devices
+ */
+static int dasd_eckd_host_access_count(struct dasd_device *device)
+{
+	struct dasd_psf_query_host_access *access;
+	struct dasd_ckd_path_group_entry *entry;
+	struct dasd_ckd_host_information *info;
+	int count = 0;
+	int rc, i;
+
+	access = kzalloc(sizeof(*access), GFP_NOIO);
+	if (!access) {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+				"Could not allocate access buffer");
+		return -ENOMEM;
+	}
+	rc = dasd_eckd_query_host_access(device, access);
+	if (rc) {
+		kfree(access);
+		return rc;
+	}
+
+	info = (struct dasd_ckd_host_information *)
+		access->host_access_information;
+	for (i = 0; i < info->entry_count; i++) {
+		entry = (struct dasd_ckd_path_group_entry *)
+			(info->entry + i * info->entry_size);
+		if (entry->status_flags & DASD_ECKD_PG_GROUPED)
+			count++;
+	}
+
+	kfree(access);
+	return count;
+}
+
+/*
+ * write host access information to a sequential file
+ */
+static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
+{
+	struct dasd_psf_query_host_access *access;
+	struct dasd_ckd_path_group_entry *entry;
+	struct dasd_ckd_host_information *info;
+	char sysplex[9] = "";
+	int rc, i;
+
+	access = kzalloc(sizeof(*access), GFP_NOIO);
+	if (!access) {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+				"Could not allocate access buffer");
+		return -ENOMEM;
+	}
+	rc = dasd_eckd_query_host_access(device, access);
+	if (rc) {
+		kfree(access);
+		return rc;
+	}
+
+	info = (struct dasd_ckd_host_information *)
+		access->host_access_information;
+	for (i = 0; i < info->entry_count; i++) {
+		entry = (struct dasd_ckd_path_group_entry *)
+			(info->entry + i * info->entry_size);
+		/* PGID */
+		seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
+		/* FLAGS */
+		seq_printf(m, "status_flags %02x\n", entry->status_flags);
+		/* SYSPLEX NAME */
+		memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
+		EBCASC(sysplex, sizeof(sysplex));
+		seq_printf(m, "sysplex_name %8s\n", sysplex);
+		/* SUPPORTED CYLINDER */
+		seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
+		/* TIMESTAMP */
+		seq_printf(m, "timestamp %lu\n", (unsigned long)
+			   entry->timestamp);
+	}
+	kfree(access);
+
+	return 0;
+}
+
+/*
+ * Perform Subsystem Function - CUIR response
+ */
+static int
+dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
+			    __u32 message_id, __u8 lpum)
+{
+	struct dasd_psf_cuir_response *psf_cuir;
+	int pos = pathmask_to_pos(lpum);
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	int rc;
+
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
+				   sizeof(struct dasd_psf_cuir_response),
+				   device, NULL);
+
+	if (IS_ERR(cqr)) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			   "Could not allocate PSF-CUIR request");
+		return PTR_ERR(cqr);
+	}
+
+	psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
+	psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
+	psf_cuir->cc = response;
+	psf_cuir->chpid = device->path[pos].chpid;
+	psf_cuir->message_id = message_id;
+	psf_cuir->cssid = device->path[pos].cssid;
+	psf_cuir->ssid = device->path[pos].ssid;
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_PSF;
+	ccw->cda = (__u32)(addr_t)psf_cuir;
+	ccw->flags = CCW_FLAG_SLI;
+	ccw->count = sizeof(struct dasd_psf_cuir_response);
+
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->block = NULL;
+	cqr->retries = 256;
+	cqr->expires = 10*HZ;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
+
+	rc = dasd_sleep_on(cqr);
+
+	dasd_sfree_request(cqr, cqr->memdev);
+	return rc;
+}
+
+/*
+ * return configuration data that is referenced by record selector
+ * if a record selector is specified or per default return the
+ * conf_data pointer for the path specified by lpum
+ */
+static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
+						     __u8 lpum,
+						     struct dasd_cuir_message *cuir)
+{
+	struct dasd_conf_data *conf_data;
+	int path, pos;
+
+	if (cuir->record_selector == 0)
+		goto out;
+	for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
+		conf_data = device->path[pos].conf_data;
+		if (conf_data->gneq.record_selector ==
+		    cuir->record_selector)
+			return conf_data;
+	}
+out:
+	return device->path[pathmask_to_pos(lpum)].conf_data;
+}
+
+/*
+ * This function determines the scope of a reconfiguration request by
+ * analysing the path and device selection data provided in the CUIR request.
+ * Returns a path mask containing CUIR affected paths for the give device.
+ *
+ * If the CUIR request does not contain the required information return the
+ * path mask of the path the attention message for the CUIR request was reveived
+ * on.
+ */
+static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
+				struct dasd_cuir_message *cuir)
+{
+	struct dasd_conf_data *ref_conf_data;
+	unsigned long bitmask = 0, mask = 0;
+	struct dasd_conf_data *conf_data;
+	unsigned int pos, path;
+	char *ref_gneq, *gneq;
+	char *ref_ned, *ned;
+	int tbcpm = 0;
+
+	/* if CUIR request does not specify the scope use the path
+	   the attention message was presented on */
+	if (!cuir->ned_map ||
+	    !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
+		return lpum;
+
+	/* get reference conf data */
+	ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
+	/* reference ned is determined by ned_map field */
+	pos = 8 - ffs(cuir->ned_map);
+	ref_ned = (char *)&ref_conf_data->neds[pos];
+	ref_gneq = (char *)&ref_conf_data->gneq;
+	/* transfer 24 bit neq_map to mask */
+	mask = cuir->neq_map[2];
+	mask |= cuir->neq_map[1] << 8;
+	mask |= cuir->neq_map[0] << 16;
+
+	for (path = 0; path < 8; path++) {
+		/* initialise data per path */
+		bitmask = mask;
+		conf_data = device->path[path].conf_data;
+		pos = 8 - ffs(cuir->ned_map);
+		ned = (char *) &conf_data->neds[pos];
+		/* compare reference ned and per path ned */
+		if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
+			continue;
+		gneq = (char *)&conf_data->gneq;
+		/* compare reference gneq and per_path gneq under
+		   24 bit mask where mask bit 0 equals byte 7 of
+		   the gneq and mask bit 24 equals byte 31 */
+		while (bitmask) {
+			pos = ffs(bitmask) - 1;
+			if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
+			    != 0)
+				break;
+			clear_bit(pos, &bitmask);
+		}
+		if (bitmask)
+			continue;
+		/* device and path match the reference values
+		   add path to CUIR scope */
+		tbcpm |= 0x80 >> path;
+	}
+	return tbcpm;
+}
+
+static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
+				       unsigned long paths, int action)
+{
+	int pos;
+
+	while (paths) {
+		/* get position of bit in mask */
+		pos = 8 - ffs(paths);
+		/* get channel path descriptor from this position */
+		if (action == CUIR_QUIESCE)
+			pr_warn("Service on the storage server caused path %x.%02x to go offline",
+				device->path[pos].cssid,
+				device->path[pos].chpid);
+		else if (action == CUIR_RESUME)
+			pr_info("Path %x.%02x is back online after service on the storage server",
+				device->path[pos].cssid,
+				device->path[pos].chpid);
+		clear_bit(7 - pos, &paths);
+	}
+}
+
+static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
+				      struct dasd_cuir_message *cuir)
+{
+	unsigned long tbcpm;
+
+	tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
+	/* nothing to do if path is not in use */
+	if (!(dasd_path_get_opm(device) & tbcpm))
+		return 0;
+	if (!(dasd_path_get_opm(device) & ~tbcpm)) {
+		/* no path would be left if the CUIR action is taken
+		   return error */
+		return -EINVAL;
+	}
+	/* remove device from operational path mask */
+	dasd_path_remove_opm(device, tbcpm);
+	dasd_path_add_cuirpm(device, tbcpm);
+	return tbcpm;
+}
+
+/*
+ * walk through all devices and build a path mask to quiesce them
+ * return an error if the last path to a device would be removed
+ *
+ * if only part of the devices are quiesced and an error
+ * occurs no onlining necessary, the storage server will
+ * notify the already set offline devices again
+ */
+static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
+				  struct dasd_cuir_message *cuir)
+{
+	struct dasd_eckd_private *private = device->private;
+	struct alias_pav_group *pavgroup, *tempgroup;
+	struct dasd_device *dev, *n;
+	unsigned long paths = 0;
+	unsigned long flags;
+	int tbcpm;
+
+	/* active devices */
+	list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
+				 alias_list) {
+		spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
+		tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
+		spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
+		if (tbcpm < 0)
+			goto out_err;
+		paths |= tbcpm;
+	}
+	/* inactive devices */
+	list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
+				 alias_list) {
+		spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
+		tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
+		spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
+		if (tbcpm < 0)
+			goto out_err;
+		paths |= tbcpm;
+	}
+	/* devices in PAV groups */
+	list_for_each_entry_safe(pavgroup, tempgroup,
+				 &private->lcu->grouplist, group) {
+		list_for_each_entry_safe(dev, n, &pavgroup->baselist,
+					 alias_list) {
+			spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
+			tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
+			spin_unlock_irqrestore(
+				get_ccwdev_lock(dev->cdev), flags);
+			if (tbcpm < 0)
+				goto out_err;
+			paths |= tbcpm;
+		}
+		list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
+					 alias_list) {
+			spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
+			tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
+			spin_unlock_irqrestore(
+				get_ccwdev_lock(dev->cdev), flags);
+			if (tbcpm < 0)
+				goto out_err;
+			paths |= tbcpm;
+		}
+	}
+	/* notify user about all paths affected by CUIR action */
+	dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
+	return 0;
+out_err:
+	return tbcpm;
+}
+
+static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
+				 struct dasd_cuir_message *cuir)
+{
+	struct dasd_eckd_private *private = device->private;
+	struct alias_pav_group *pavgroup, *tempgroup;
+	struct dasd_device *dev, *n;
+	unsigned long paths = 0;
+	int tbcpm;
+
+	/*
+	 * the path may have been added through a generic path event before
+	 * only trigger path verification if the path is not already in use
+	 */
+	list_for_each_entry_safe(dev, n,
+				 &private->lcu->active_devices,
+				 alias_list) {
+		tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
+		paths |= tbcpm;
+		if (!(dasd_path_get_opm(dev) & tbcpm)) {
+			dasd_path_add_tbvpm(dev, tbcpm);
+			dasd_schedule_device_bh(dev);
+		}
+	}
+	list_for_each_entry_safe(dev, n,
+				 &private->lcu->inactive_devices,
+				 alias_list) {
+		tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
+		paths |= tbcpm;
+		if (!(dasd_path_get_opm(dev) & tbcpm)) {
+			dasd_path_add_tbvpm(dev, tbcpm);
+			dasd_schedule_device_bh(dev);
+		}
+	}
+	/* devices in PAV groups */
+	list_for_each_entry_safe(pavgroup, tempgroup,
+				 &private->lcu->grouplist,
+				 group) {
+		list_for_each_entry_safe(dev, n,
+					 &pavgroup->baselist,
+					 alias_list) {
+			tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
+			paths |= tbcpm;
+			if (!(dasd_path_get_opm(dev) & tbcpm)) {
+				dasd_path_add_tbvpm(dev, tbcpm);
+				dasd_schedule_device_bh(dev);
+			}
+		}
+		list_for_each_entry_safe(dev, n,
+					 &pavgroup->aliaslist,
+					 alias_list) {
+			tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
+			paths |= tbcpm;
+			if (!(dasd_path_get_opm(dev) & tbcpm)) {
+				dasd_path_add_tbvpm(dev, tbcpm);
+				dasd_schedule_device_bh(dev);
+			}
+		}
+	}
+	/* notify user about all paths affected by CUIR action */
+	dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
+	return 0;
+}
+
+static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
+				 __u8 lpum)
+{
+	struct dasd_cuir_message *cuir = messages;
+	int response;
+
+	DBF_DEV_EVENT(DBF_WARNING, device,
+		      "CUIR request: %016llx %016llx %016llx %08x",
+		      ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
+		      ((u32 *)cuir)[3]);
+
+	if (cuir->code == CUIR_QUIESCE) {
+		/* quiesce */
+		if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
+			response = PSF_CUIR_LAST_PATH;
+		else
+			response = PSF_CUIR_COMPLETED;
+	} else if (cuir->code == CUIR_RESUME) {
+		/* resume */
+		dasd_eckd_cuir_resume(device, lpum, cuir);
+		response = PSF_CUIR_COMPLETED;
+	} else
+		response = PSF_CUIR_NOT_SUPPORTED;
+
+	dasd_eckd_psf_cuir_response(device, response,
+				    cuir->message_id, lpum);
+	DBF_DEV_EVENT(DBF_WARNING, device,
+		      "CUIR response: %d on message ID %08x", response,
+		      cuir->message_id);
+	/* to make sure there is no attention left schedule work again */
+	device->discipline->check_attention(device, lpum);
+}
+
+static void dasd_eckd_check_attention_work(struct work_struct *work)
+{
+	struct check_attention_work_data *data;
+	struct dasd_rssd_messages *messages;
+	struct dasd_device *device;
+	int rc;
+
+	data = container_of(work, struct check_attention_work_data, worker);
+	device = data->device;
+	messages = kzalloc(sizeof(*messages), GFP_KERNEL);
+	if (!messages) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "Could not allocate attention message buffer");
+		goto out;
+	}
+	rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
+	if (rc)
+		goto out;
+	if (messages->length == ATTENTION_LENGTH_CUIR &&
+	    messages->format == ATTENTION_FORMAT_CUIR)
+		dasd_eckd_handle_cuir(device, messages, data->lpum);
+out:
+	dasd_put_device(device);
+	kfree(messages);
+	kfree(data);
+}
+
+static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
+{
+	struct check_attention_work_data *data;
+
+	data = kzalloc(sizeof(*data), GFP_ATOMIC);
+	if (!data)
+		return -ENOMEM;
+	INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
+	dasd_get_device(device);
+	data->device = device;
+	data->lpum = lpum;
+	schedule_work(&data->worker);
+	return 0;
+}
+
+static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
+{
+	if (~lpum & dasd_path_get_opm(device)) {
+		dasd_path_add_nohpfpm(device, lpum);
+		dasd_path_remove_opm(device, lpum);
+		dev_err(&device->cdev->dev,
+			"Channel path %02X lost HPF functionality and is disabled\n",
+			lpum);
+		return 1;
+	}
+	return 0;
+}
+
+static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+
+	dev_err(&device->cdev->dev,
+		"High Performance FICON disabled\n");
+	private->fcx_max_data = 0;
+}
+
+static int dasd_eckd_hpf_enabled(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+
+	return private->fcx_max_data ? 1 : 0;
+}
+
+static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
+				       struct irb *irb)
+{
+	struct dasd_eckd_private *private = device->private;
+
+	if (!private->fcx_max_data) {
+		/* sanity check for no HPF, the error makes no sense */
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "Trying to disable HPF for a non HPF device");
+		return;
+	}
+	if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
+		dasd_eckd_disable_hpf_device(device);
+	} else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
+		if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
+			return;
+		dasd_eckd_disable_hpf_device(device);
+		dasd_path_set_tbvpm(device,
+				  dasd_path_get_hpfpm(device));
+	}
+	/*
+	 * prevent that any new I/O ist started on the device and schedule a
+	 * requeue of existing requests
+	 */
+	dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
+	dasd_schedule_requeue(device);
+}
+
+static struct ccw_driver dasd_eckd_driver = {
+	.driver = {
+		.name	= "dasd-eckd",
+		.owner	= THIS_MODULE,
+	},
+	.ids	     = dasd_eckd_ids,
+	.probe	     = dasd_eckd_probe,
+	.remove      = dasd_generic_remove,
+	.set_offline = dasd_generic_set_offline,
+	.set_online  = dasd_eckd_set_online,
+	.notify      = dasd_generic_notify,
+	.path_event  = dasd_generic_path_event,
+	.shutdown    = dasd_generic_shutdown,
+	.freeze      = dasd_generic_pm_freeze,
+	.thaw	     = dasd_generic_restore_device,
+	.restore     = dasd_generic_restore_device,
+	.uc_handler  = dasd_generic_uc_handler,
+	.int_class   = IRQIO_DAS,
+};
+
+/*
+ * max_blocks is dependent on the amount of storage that is available
+ * in the static io buffer for each device. Currently each device has
+ * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
+ * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
+ * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
+ * addition we have one define extent ccw + 16 bytes of data and one
+ * locate record ccw + 16 bytes of data. That makes:
+ * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
+ * We want to fit two into the available memory so that we can immediately
+ * start the next request if one finishes off. That makes 249.5 blocks
+ * for one request. Give a little safety and the result is 240.
+ */
+static struct dasd_discipline dasd_eckd_discipline = {
+	.owner = THIS_MODULE,
+	.name = "ECKD",
+	.ebcname = "ECKD",
+	.max_blocks = 190,
+	.check_device = dasd_eckd_check_characteristics,
+	.uncheck_device = dasd_eckd_uncheck_device,
+	.do_analysis = dasd_eckd_do_analysis,
+	.verify_path = dasd_eckd_verify_path,
+	.basic_to_ready = dasd_eckd_basic_to_ready,
+	.online_to_ready = dasd_eckd_online_to_ready,
+	.basic_to_known = dasd_eckd_basic_to_known,
+	.fill_geometry = dasd_eckd_fill_geometry,
+	.start_IO = dasd_start_IO,
+	.term_IO = dasd_term_IO,
+	.handle_terminated_request = dasd_eckd_handle_terminated_request,
+	.format_device = dasd_eckd_format_device,
+	.check_device_format = dasd_eckd_check_device_format,
+	.erp_action = dasd_eckd_erp_action,
+	.erp_postaction = dasd_eckd_erp_postaction,
+	.check_for_device_change = dasd_eckd_check_for_device_change,
+	.build_cp = dasd_eckd_build_alias_cp,
+	.free_cp = dasd_eckd_free_alias_cp,
+	.dump_sense = dasd_eckd_dump_sense,
+	.dump_sense_dbf = dasd_eckd_dump_sense_dbf,
+	.fill_info = dasd_eckd_fill_info,
+	.ioctl = dasd_eckd_ioctl,
+	.freeze = dasd_eckd_pm_freeze,
+	.restore = dasd_eckd_restore_device,
+	.reload = dasd_eckd_reload_device,
+	.get_uid = dasd_eckd_get_uid,
+	.kick_validate = dasd_eckd_kick_validate_server,
+	.check_attention = dasd_eckd_check_attention,
+	.host_access_count = dasd_eckd_host_access_count,
+	.hosts_print = dasd_hosts_print,
+	.handle_hpf_error = dasd_eckd_handle_hpf_error,
+	.disable_hpf = dasd_eckd_disable_hpf_device,
+	.hpf_enabled = dasd_eckd_hpf_enabled,
+	.reset_path = dasd_eckd_reset_path,
+};
+
+static int __init
+dasd_eckd_init(void)
+{
+	int ret;
+
+	ASCEBC(dasd_eckd_discipline.ebcname, 4);
+	dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
+				   GFP_KERNEL | GFP_DMA);
+	if (!dasd_reserve_req)
+		return -ENOMEM;
+	path_verification_worker = kmalloc(sizeof(*path_verification_worker),
+				   GFP_KERNEL | GFP_DMA);
+	if (!path_verification_worker) {
+		kfree(dasd_reserve_req);
+		return -ENOMEM;
+	}
+	rawpadpage = (void *)__get_free_page(GFP_KERNEL);
+	if (!rawpadpage) {
+		kfree(path_verification_worker);
+		kfree(dasd_reserve_req);
+		return -ENOMEM;
+	}
+	ret = ccw_driver_register(&dasd_eckd_driver);
+	if (!ret)
+		wait_for_device_probe();
+	else {
+		kfree(path_verification_worker);
+		kfree(dasd_reserve_req);
+		free_page((unsigned long)rawpadpage);
+	}
+	return ret;
+}
+
+static void __exit
+dasd_eckd_cleanup(void)
+{
+	ccw_driver_unregister(&dasd_eckd_driver);
+	kfree(path_verification_worker);
+	kfree(dasd_reserve_req);
+	free_page((unsigned long)rawpadpage);
+}
+
+module_init(dasd_eckd_init);
+module_exit(dasd_eckd_cleanup);
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
new file mode 100644
index 0000000..5869d2f
--- /dev/null
+++ b/drivers/s390/block/dasd_eckd.h
@@ -0,0 +1,578 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2000
+ *
+ */
+
+#ifndef DASD_ECKD_H
+#define DASD_ECKD_H
+
+/*****************************************************************************
+ * SECTION: CCW Definitions
+ ****************************************************************************/
+#define DASD_ECKD_CCW_WRITE		 0x05
+#define DASD_ECKD_CCW_READ		 0x06
+#define DASD_ECKD_CCW_WRITE_HOME_ADDRESS 0x09
+#define DASD_ECKD_CCW_READ_HOME_ADDRESS	 0x0a
+#define DASD_ECKD_CCW_WRITE_KD		 0x0d
+#define DASD_ECKD_CCW_READ_KD		 0x0e
+#define DASD_ECKD_CCW_ERASE		 0x11
+#define DASD_ECKD_CCW_READ_COUNT	 0x12
+#define DASD_ECKD_CCW_SLCK		 0x14
+#define DASD_ECKD_CCW_WRITE_RECORD_ZERO	 0x15
+#define DASD_ECKD_CCW_READ_RECORD_ZERO	 0x16
+#define DASD_ECKD_CCW_WRITE_CKD		 0x1d
+#define DASD_ECKD_CCW_READ_CKD		 0x1e
+#define DASD_ECKD_CCW_PSF		 0x27
+#define DASD_ECKD_CCW_SNID		 0x34
+#define DASD_ECKD_CCW_RSSD		 0x3e
+#define DASD_ECKD_CCW_LOCATE_RECORD	 0x47
+#define DASD_ECKD_CCW_LOCATE_RECORD_EXT	 0x4b
+#define DASD_ECKD_CCW_SNSS		 0x54
+#define DASD_ECKD_CCW_DEFINE_EXTENT	 0x63
+#define DASD_ECKD_CCW_WRITE_MT		 0x85
+#define DASD_ECKD_CCW_READ_MT		 0x86
+#define DASD_ECKD_CCW_WRITE_KD_MT	 0x8d
+#define DASD_ECKD_CCW_READ_KD_MT	 0x8e
+#define DASD_ECKD_CCW_READ_COUNT_MT	 0x92
+#define DASD_ECKD_CCW_RELEASE		 0x94
+#define DASD_ECKD_CCW_WRITE_FULL_TRACK	 0x95
+#define DASD_ECKD_CCW_READ_CKD_MT	 0x9e
+#define DASD_ECKD_CCW_WRITE_CKD_MT	 0x9d
+#define DASD_ECKD_CCW_WRITE_TRACK_DATA	 0xA5
+#define DASD_ECKD_CCW_READ_TRACK_DATA	 0xA6
+#define DASD_ECKD_CCW_RESERVE		 0xB4
+#define DASD_ECKD_CCW_READ_TRACK	 0xDE
+#define DASD_ECKD_CCW_PFX		 0xE7
+#define DASD_ECKD_CCW_PFX_READ		 0xEA
+#define DASD_ECKD_CCW_RSCK		 0xF9
+#define DASD_ECKD_CCW_RCD		 0xFA
+
+/*
+ * Perform Subsystem Function / Sub-Orders
+ */
+#define PSF_ORDER_PRSSD			 0x18
+#define PSF_ORDER_CUIR_RESPONSE		 0x1A
+#define PSF_SUBORDER_QHA		 0x1C
+#define PSF_ORDER_SSC			 0x1D
+
+/*
+ * CUIR response condition codes
+ */
+#define PSF_CUIR_INVALID		 0x00
+#define PSF_CUIR_COMPLETED		 0x01
+#define PSF_CUIR_NOT_SUPPORTED		 0x02
+#define PSF_CUIR_ERROR_IN_REQ		 0x03
+#define PSF_CUIR_DENIED			 0x04
+#define PSF_CUIR_LAST_PATH		 0x05
+#define PSF_CUIR_DEVICE_ONLINE		 0x06
+#define PSF_CUIR_VARY_FAILURE		 0x07
+#define PSF_CUIR_SOFTWARE_FAILURE	 0x08
+#define PSF_CUIR_NOT_RECOGNIZED		 0x09
+
+/*
+ * CUIR codes
+ */
+#define CUIR_QUIESCE			 0x01
+#define CUIR_RESUME			 0x02
+
+/*
+ * attention message definitions
+ */
+#define ATTENTION_LENGTH_CUIR		 0x0e
+#define ATTENTION_FORMAT_CUIR		 0x01
+
+#define DASD_ECKD_PG_GROUPED		 0x10
+
+/*
+ * Size that is reportet for large volumes in the old 16-bit no_cyl field
+ */
+#define LV_COMPAT_CYL 0xFFFE
+
+
+#define FCX_MAX_DATA_FACTOR 65536
+#define DASD_ECKD_RCD_DATA_SIZE 256
+
+#define DASD_ECKD_PATH_THRHLD		 256
+#define DASD_ECKD_PATH_INTERVAL		 300
+
+/*****************************************************************************
+ * SECTION: Type Definitions
+ ****************************************************************************/
+
+struct eckd_count {
+	__u16 cyl;
+	__u16 head;
+	__u8 record;
+	__u8 kl;
+	__u16 dl;
+} __attribute__ ((packed));
+
+struct ch_t {
+	__u16 cyl;
+	__u16 head;
+} __attribute__ ((packed));
+
+struct chs_t {
+	__u16 cyl;
+	__u16 head;
+	__u32 sector;
+} __attribute__ ((packed));
+
+struct chr_t {
+	__u16 cyl;
+	__u16 head;
+	__u8 record;
+} __attribute__ ((packed));
+
+struct geom_t {
+	__u16 cyl;
+	__u16 head;
+	__u32 sector;
+} __attribute__ ((packed));
+
+struct eckd_home {
+	__u8 skip_control[14];
+	__u16 cell_number;
+	__u8 physical_addr[3];
+	__u8 flag;
+	struct ch_t track_addr;
+	__u8 reserved;
+	__u8 key_length;
+	__u8 reserved2[2];
+} __attribute__ ((packed));
+
+struct DE_eckd_data {
+	struct {
+		unsigned char perm:2;	/* Permissions on this extent */
+		unsigned char reserved:1;
+		unsigned char seek:2;	/* Seek control */
+		unsigned char auth:2;	/* Access authorization */
+		unsigned char pci:1;	/* PCI Fetch mode */
+	} __attribute__ ((packed)) mask;
+	struct {
+		unsigned char mode:2;	/* Architecture mode */
+		unsigned char ckd:1;	/* CKD Conversion */
+		unsigned char operation:3;	/* Operation mode */
+		unsigned char cfw:1;	/* Cache fast write */
+		unsigned char dfw:1;	/* DASD fast write */
+	} __attribute__ ((packed)) attributes;
+	__u16 blk_size;		/* Blocksize */
+	__u16 fast_write_id;
+	__u8 ga_additional;	/* Global Attributes Additional */
+	__u8 ga_extended;	/* Global Attributes Extended	*/
+	struct ch_t beg_ext;
+	struct ch_t end_ext;
+	unsigned long ep_sys_time; /* Ext Parameter - System Time Stamp */
+	__u8 ep_format;        /* Extended Parameter format byte       */
+	__u8 ep_prio;          /* Extended Parameter priority I/O byte */
+	__u8 ep_reserved1;     /* Extended Parameter Reserved	       */
+	__u8 ep_rec_per_track; /* Number of records on a track	       */
+	__u8 ep_reserved[4];   /* Extended Parameter Reserved	       */
+} __attribute__ ((packed));
+
+struct LO_eckd_data {
+	struct {
+		unsigned char orientation:2;
+		unsigned char operation:6;
+	} __attribute__ ((packed)) operation;
+	struct {
+		unsigned char last_bytes_used:1;
+		unsigned char reserved:6;
+		unsigned char read_count_suffix:1;
+	} __attribute__ ((packed)) auxiliary;
+	__u8 unused;
+	__u8 count;
+	struct ch_t seek_addr;
+	struct chr_t search_arg;
+	__u8 sector;
+	__u16 length;
+} __attribute__ ((packed));
+
+struct LRE_eckd_data {
+	struct {
+		unsigned char orientation:2;
+		unsigned char operation:6;
+	} __attribute__ ((packed)) operation;
+	struct {
+		unsigned char length_valid:1;
+		unsigned char length_scope:1;
+		unsigned char imbedded_ccw_valid:1;
+		unsigned char check_bytes:2;
+		unsigned char imbedded_count_valid:1;
+		unsigned char reserved:1;
+		unsigned char read_count_suffix:1;
+	} __attribute__ ((packed)) auxiliary;
+	__u8 imbedded_ccw;
+	__u8 count;
+	struct ch_t seek_addr;
+	struct chr_t search_arg;
+	__u8 sector;
+	__u16 length;
+	__u8 imbedded_count;
+	__u8 extended_operation;
+	__u16 extended_parameter_length;
+	__u8 extended_parameter[0];
+} __attribute__ ((packed));
+
+/* Prefix data for format 0x00 and 0x01 */
+struct PFX_eckd_data {
+	unsigned char format;
+	struct {
+		unsigned char define_extent:1;
+		unsigned char time_stamp:1;
+		unsigned char verify_base:1;
+		unsigned char hyper_pav:1;
+		unsigned char reserved:4;
+	} __attribute__ ((packed)) validity;
+	__u8 base_address;
+	__u8 aux;
+	__u8 base_lss;
+	__u8 reserved[7];
+	struct DE_eckd_data define_extent;
+	struct LRE_eckd_data locate_record;
+} __attribute__ ((packed));
+
+struct dasd_eckd_characteristics {
+	__u16 cu_type;
+	struct {
+		unsigned char support:2;
+		unsigned char async:1;
+		unsigned char reserved:1;
+		unsigned char cache_info:1;
+		unsigned char model:3;
+	} __attribute__ ((packed)) cu_model;
+	__u16 dev_type;
+	__u8 dev_model;
+	struct {
+		unsigned char mult_burst:1;
+		unsigned char RT_in_LR:1;
+		unsigned char reserved1:1;
+		unsigned char RD_IN_LR:1;
+		unsigned char reserved2:4;
+		unsigned char reserved3:8;
+		unsigned char defect_wr:1;
+		unsigned char XRC_supported:1;
+		unsigned char reserved4:1;
+		unsigned char striping:1;
+		unsigned char reserved5:4;
+		unsigned char cfw:1;
+		unsigned char reserved6:2;
+		unsigned char cache:1;
+		unsigned char dual_copy:1;
+		unsigned char dfw:1;
+		unsigned char reset_alleg:1;
+		unsigned char sense_down:1;
+	} __attribute__ ((packed)) facilities;
+	__u8 dev_class;
+	__u8 unit_type;
+	__u16 no_cyl;
+	__u16 trk_per_cyl;
+	__u8 sec_per_trk;
+	__u8 byte_per_track[3];
+	__u16 home_bytes;
+	__u8 formula;
+	union {
+		struct {
+			__u8 f1;
+			__u16 f2;
+			__u16 f3;
+		} __attribute__ ((packed)) f_0x01;
+		struct {
+			__u8 f1;
+			__u8 f2;
+			__u8 f3;
+			__u8 f4;
+			__u8 f5;
+		} __attribute__ ((packed)) f_0x02;
+	} __attribute__ ((packed)) factors;
+	__u16 first_alt_trk;
+	__u16 no_alt_trk;
+	__u16 first_dia_trk;
+	__u16 no_dia_trk;
+	__u16 first_sup_trk;
+	__u16 no_sup_trk;
+	__u8 MDR_ID;
+	__u8 OBR_ID;
+	__u8 director;
+	__u8 rd_trk_set;
+	__u16 max_rec_zero;
+	__u8 reserved1;
+	__u8 RWANY_in_LR;
+	__u8 factor6;
+	__u8 factor7;
+	__u8 factor8;
+	__u8 reserved2[3];
+	__u8 reserved3[6];
+	__u32 long_no_cyl;
+} __attribute__ ((packed));
+
+/* elements of the configuration data */
+struct dasd_ned {
+	struct {
+		__u8 identifier:2;
+		__u8 token_id:1;
+		__u8 sno_valid:1;
+		__u8 subst_sno:1;
+		__u8 recNED:1;
+		__u8 emuNED:1;
+		__u8 reserved:1;
+	} __attribute__ ((packed)) flags;
+	__u8 descriptor;
+	__u8 dev_class;
+	__u8 reserved;
+	__u8 dev_type[6];
+	__u8 dev_model[3];
+	__u8 HDA_manufacturer[3];
+	__u8 HDA_location[2];
+	__u8 HDA_seqno[12];
+	__u8 ID;
+	__u8 unit_addr;
+} __attribute__ ((packed));
+
+struct dasd_sneq {
+	struct {
+		__u8 identifier:2;
+		__u8 reserved:6;
+	} __attribute__ ((packed)) flags;
+	__u8 res1;
+	__u16 format;
+	__u8 res2[4];		/* byte  4- 7 */
+	__u8 sua_flags;		/* byte  8    */
+	__u8 base_unit_addr;	/* byte  9    */
+	__u8 res3[22];		/* byte 10-31 */
+} __attribute__ ((packed));
+
+struct vd_sneq {
+	struct {
+		__u8 identifier:2;
+		__u8 reserved:6;
+	} __attribute__ ((packed)) flags;
+	__u8 res1;
+	__u16 format;
+	__u8 res2[4];	/* byte  4- 7 */
+	__u8 uit[16];	/* byte  8-23 */
+	__u8 res3[8];	/* byte 24-31 */
+} __attribute__ ((packed));
+
+struct dasd_gneq {
+	struct {
+		__u8 identifier:2;
+		__u8 reserved:6;
+	} __attribute__ ((packed)) flags;
+	__u8 record_selector;
+	__u8 reserved[4];
+	struct {
+		__u8 value:2;
+		__u8 number:6;
+	} __attribute__ ((packed)) timeout;
+	__u8 reserved3;
+	__u16 subsystemID;
+	__u8 reserved2[22];
+} __attribute__ ((packed));
+
+struct dasd_rssd_features {
+	char feature[256];
+} __attribute__((packed));
+
+struct dasd_rssd_messages {
+	__u16 length;
+	__u8 format;
+	__u8 code;
+	__u32 message_id;
+	__u8 flags;
+	char messages[4087];
+} __packed;
+
+struct dasd_cuir_message {
+	__u16 length;
+	__u8 format;
+	__u8 code;
+	__u32 message_id;
+	__u8 flags;
+	__u8 neq_map[3];
+	__u8 ned_map;
+	__u8 record_selector;
+} __packed;
+
+struct dasd_psf_cuir_response {
+	__u8 order;
+	__u8 flags;
+	__u8 cc;
+	__u8 chpid;
+	__u16 device_nr;
+	__u16 reserved;
+	__u32 message_id;
+	__u64 system_id;
+	__u8 cssid;
+	__u8 ssid;
+} __packed;
+
+struct dasd_ckd_path_group_entry {
+	__u8 status_flags;
+	__u8 pgid[11];
+	__u8 sysplex_name[8];
+	__u32 timestamp;
+	__u32 cylinder;
+	__u8 reserved[4];
+} __packed;
+
+struct dasd_ckd_host_information {
+	__u8 access_flags;
+	__u8 entry_size;
+	__u16 entry_count;
+	__u8 entry[16390];
+} __packed;
+
+struct dasd_psf_query_host_access {
+	__u8 access_flag;
+	__u8 version;
+	__u16 CKD_length;
+	__u16 SCSI_length;
+	__u8 unused[10];
+	__u8 host_access_information[16394];
+} __packed;
+
+/*
+ * Perform Subsystem Function - Prepare for Read Subsystem Data
+ */
+struct dasd_psf_prssd_data {
+	unsigned char order;
+	unsigned char flags;
+	unsigned char reserved1;
+	unsigned char reserved2;
+	unsigned char lss;
+	unsigned char volume;
+	unsigned char suborder;
+	unsigned char varies[5];
+} __attribute__ ((packed));
+
+/*
+ * Perform Subsystem Function - Set Subsystem Characteristics
+ */
+struct dasd_psf_ssc_data {
+	unsigned char order;
+	unsigned char flags;
+	unsigned char cu_type[4];
+	unsigned char suborder;
+	unsigned char reserved[59];
+} __attribute__((packed));
+
+
+/*
+ * some structures and definitions for alias handling
+ */
+struct dasd_unit_address_configuration {
+	struct {
+		char ua_type;
+		char base_ua;
+	} unit[256];
+} __attribute__((packed));
+
+
+#define MAX_DEVICES_PER_LCU 256
+
+/* flags on the LCU  */
+#define NEED_UAC_UPDATE  0x01
+#define UPDATE_PENDING	0x02
+
+enum pavtype {NO_PAV, BASE_PAV, HYPER_PAV};
+
+
+struct alias_root {
+	struct list_head serverlist;
+	spinlock_t lock;
+};
+
+struct alias_server {
+	struct list_head server;
+	struct dasd_uid uid;
+	struct list_head lculist;
+};
+
+struct summary_unit_check_work_data {
+	char reason;
+	struct dasd_device *device;
+	struct work_struct worker;
+};
+
+struct read_uac_work_data {
+	struct dasd_device *device;
+	struct delayed_work dwork;
+};
+
+struct alias_lcu {
+	struct list_head lcu;
+	struct dasd_uid uid;
+	enum pavtype pav;
+	char flags;
+	spinlock_t lock;
+	struct list_head grouplist;
+	struct list_head active_devices;
+	struct list_head inactive_devices;
+	struct dasd_unit_address_configuration *uac;
+	struct summary_unit_check_work_data suc_data;
+	struct read_uac_work_data ruac_data;
+	struct dasd_ccw_req *rsu_cqr;
+	struct completion lcu_setup;
+};
+
+struct alias_pav_group {
+	struct list_head group;
+	struct dasd_uid uid;
+	struct alias_lcu *lcu;
+	struct list_head baselist;
+	struct list_head aliaslist;
+	struct dasd_device *next;
+};
+
+struct dasd_conf_data {
+	struct dasd_ned neds[5];
+	u8 reserved[64];
+	struct dasd_gneq gneq;
+} __packed;
+
+struct dasd_eckd_private {
+	struct dasd_eckd_characteristics rdc_data;
+	u8 *conf_data;
+	int conf_len;
+
+	/* pointers to specific parts in the conf_data */
+	struct dasd_ned *ned;
+	struct dasd_sneq *sneq;
+	struct vd_sneq *vdsneq;
+	struct dasd_gneq *gneq;
+
+	struct eckd_count count_area[5];
+	int init_cqr_status;
+	int uses_cdl;
+	struct attrib_data_t attrib;	/* e.g. cache operations */
+	struct dasd_rssd_features features;
+	u32 real_cyl;
+
+	/* alias managemnet */
+	struct dasd_uid uid;
+	struct alias_pav_group *pavgroup;
+	struct alias_lcu *lcu;
+	int count;
+
+	u32 fcx_max_data;
+	char suc_reason;
+};
+
+
+
+int dasd_alias_make_device_known_to_lcu(struct dasd_device *);
+void dasd_alias_disconnect_device_from_lcu(struct dasd_device *);
+int dasd_alias_add_device(struct dasd_device *);
+int dasd_alias_remove_device(struct dasd_device *);
+struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
+void dasd_alias_handle_summary_unit_check(struct work_struct *);
+void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
+void dasd_alias_lcu_setup_complete(struct dasd_device *);
+void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
+int dasd_alias_update_add_device(struct dasd_device *);
+#endif				/* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
new file mode 100644
index 0000000..93bb09d
--- /dev/null
+++ b/drivers/s390/block/dasd_eer.c
@@ -0,0 +1,723 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Character device driver for extended error reporting.
+ *
+ *  Copyright IBM Corp. 2005
+ *  extended error reporting for DASD ECKD devices
+ *  Author(s): Stefan Weinhuber <wein@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "dasd-eckd"
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+#include <asm/ebcdic.h>
+
+#include "dasd_int.h"
+#include "dasd_eckd.h"
+
+#ifdef PRINTK_HEADER
+#undef PRINTK_HEADER
+#endif				/* PRINTK_HEADER */
+#define PRINTK_HEADER "dasd(eer):"
+
+/*
+ * SECTION: the internal buffer
+ */
+
+/*
+ * The internal buffer is meant to store obaque blobs of data, so it does
+ * not know of higher level concepts like triggers.
+ * It consists of a number of pages that are used as a ringbuffer. Each data
+ * blob is stored in a simple record that consists of an integer, which
+ * contains the size of the following data, and the data bytes themselfes.
+ *
+ * To allow for multiple independent readers we create one internal buffer
+ * each time the device is opened and destroy the buffer when the file is
+ * closed again. The number of pages used for this buffer is determined by
+ * the module parmeter eer_pages.
+ *
+ * One record can be written to a buffer by using the functions
+ * - dasd_eer_start_record (one time per record to write the size to the
+ *                          buffer and reserve the space for the data)
+ * - dasd_eer_write_buffer (one or more times per record to write the data)
+ * The data can be written in several steps but you will have to compute
+ * the total size up front for the invocation of dasd_eer_start_record.
+ * If the ringbuffer is full, dasd_eer_start_record will remove the required
+ * number of old records.
+ *
+ * A record is typically read in two steps, first read the integer that
+ * specifies the size of the following data, then read the data.
+ * Both can be done by
+ * - dasd_eer_read_buffer
+ *
+ * For all mentioned functions you need to get the bufferlock first and keep
+ * it until a complete record is written or read.
+ *
+ * All information necessary to keep track of an internal buffer is kept in
+ * a struct eerbuffer. The buffer specific to a file pointer is strored in
+ * the private_data field of that file. To be able to write data to all
+ * existing buffers, each buffer is also added to the bufferlist.
+ * If the user does not want to read a complete record in one go, we have to
+ * keep track of the rest of the record. residual stores the number of bytes
+ * that are still to deliver. If the rest of the record is invalidated between
+ * two reads then residual will be set to -1 so that the next read will fail.
+ * All entries in the eerbuffer structure are protected with the bufferlock.
+ * To avoid races between writing to a buffer on the one side and creating
+ * and destroying buffers on the other side, the bufferlock must also be used
+ * to protect the bufferlist.
+ */
+
+static int eer_pages = 5;
+module_param(eer_pages, int, S_IRUGO|S_IWUSR);
+
+struct eerbuffer {
+	struct list_head list;
+	char **buffer;
+	int buffersize;
+	int buffer_page_count;
+	int head;
+        int tail;
+	int residual;
+};
+
+static LIST_HEAD(bufferlist);
+static DEFINE_SPINLOCK(bufferlock);
+static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
+
+/*
+ * How many free bytes are available on the buffer.
+ * Needs to be called with bufferlock held.
+ */
+static int dasd_eer_get_free_bytes(struct eerbuffer *eerb)
+{
+	if (eerb->head < eerb->tail)
+		return eerb->tail - eerb->head - 1;
+	return eerb->buffersize - eerb->head + eerb->tail -1;
+}
+
+/*
+ * How many bytes of buffer space are used.
+ * Needs to be called with bufferlock held.
+ */
+static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
+{
+
+	if (eerb->head >= eerb->tail)
+		return eerb->head - eerb->tail;
+	return eerb->buffersize - eerb->tail + eerb->head;
+}
+
+/*
+ * The dasd_eer_write_buffer function just copies count bytes of data
+ * to the buffer. Make sure to call dasd_eer_start_record first, to
+ * make sure that enough free space is available.
+ * Needs to be called with bufferlock held.
+ */
+static void dasd_eer_write_buffer(struct eerbuffer *eerb,
+				  char *data, int count)
+{
+
+	unsigned long headindex,localhead;
+	unsigned long rest, len;
+	char *nextdata;
+
+	nextdata = data;
+	rest = count;
+	while (rest > 0) {
+ 		headindex = eerb->head / PAGE_SIZE;
+ 		localhead = eerb->head % PAGE_SIZE;
+		len = min(rest, PAGE_SIZE - localhead);
+		memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
+		nextdata += len;
+		rest -= len;
+		eerb->head += len;
+		if (eerb->head == eerb->buffersize)
+			eerb->head = 0; /* wrap around */
+		BUG_ON(eerb->head > eerb->buffersize);
+	}
+}
+
+/*
+ * Needs to be called with bufferlock held.
+ */
+static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count)
+{
+
+	unsigned long tailindex,localtail;
+	unsigned long rest, len, finalcount;
+	char *nextdata;
+
+	finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
+	nextdata = data;
+	rest = finalcount;
+	while (rest > 0) {
+ 		tailindex = eerb->tail / PAGE_SIZE;
+ 		localtail = eerb->tail % PAGE_SIZE;
+		len = min(rest, PAGE_SIZE - localtail);
+		memcpy(nextdata, eerb->buffer[tailindex] + localtail, len);
+		nextdata += len;
+		rest -= len;
+		eerb->tail += len;
+		if (eerb->tail == eerb->buffersize)
+			eerb->tail = 0; /* wrap around */
+		BUG_ON(eerb->tail > eerb->buffersize);
+	}
+	return finalcount;
+}
+
+/*
+ * Whenever you want to write a blob of data to the internal buffer you
+ * have to start by using this function first. It will write the number
+ * of bytes that will be written to the buffer. If necessary it will remove
+ * old records to make room for the new one.
+ * Needs to be called with bufferlock held.
+ */
+static int dasd_eer_start_record(struct eerbuffer *eerb, int count)
+{
+	int tailcount;
+
+	if (count + sizeof(count) > eerb->buffersize)
+		return -ENOMEM;
+	while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
+		if (eerb->residual > 0) {
+			eerb->tail += eerb->residual;
+			if (eerb->tail >= eerb->buffersize)
+				eerb->tail -= eerb->buffersize;
+			eerb->residual = -1;
+		}
+		dasd_eer_read_buffer(eerb, (char *) &tailcount,
+				     sizeof(tailcount));
+		eerb->tail += tailcount;
+		if (eerb->tail >= eerb->buffersize)
+			eerb->tail -= eerb->buffersize;
+	}
+	dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count));
+
+	return 0;
+};
+
+/*
+ * Release pages that are not used anymore.
+ */
+static void dasd_eer_free_buffer_pages(char **buf, int no_pages)
+{
+	int i;
+
+	for (i = 0; i < no_pages; i++)
+		free_page((unsigned long) buf[i]);
+}
+
+/*
+ * Allocate a new set of memory pages.
+ */
+static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
+{
+	int i;
+
+	for (i = 0; i < no_pages; i++) {
+		buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
+		if (!buf[i]) {
+			dasd_eer_free_buffer_pages(buf, i);
+			return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+/*
+ * SECTION: The extended error reporting functionality
+ */
+
+/*
+ * When a DASD device driver wants to report an error, it calls the
+ * function dasd_eer_write and gives the respective trigger ID as
+ * parameter. Currently there are four kinds of triggers:
+ *
+ * DASD_EER_FATALERROR:  all kinds of unrecoverable I/O problems
+ * DASD_EER_PPRCSUSPEND: PPRC was suspended
+ * DASD_EER_NOPATH:      There is no path to the device left.
+ * DASD_EER_STATECHANGE: The state of the device has changed.
+ *
+ * For the first three triggers all required information can be supplied by
+ * the caller. For these triggers a record is written by the function
+ * dasd_eer_write_standard_trigger.
+ *
+ * The DASD_EER_STATECHANGE trigger is special since a sense subsystem
+ * status ccw need to be executed to gather the necessary sense data first.
+ * The dasd_eer_snss function will queue the SNSS request and the request
+ * callback will then call dasd_eer_write with the DASD_EER_STATCHANGE
+ * trigger.
+ *
+ * To avoid memory allocations at runtime, the necessary memory is allocated
+ * when the extended error reporting is enabled for a device (by
+ * dasd_eer_probe). There is one sense subsystem status request for each
+ * eer enabled DASD device. The presence of the cqr in device->eer_cqr
+ * indicates that eer is enable for the device. The use of the snss request
+ * is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates
+ * that the cqr is currently in use, dasd_eer_snss cannot start a second
+ * request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of
+ * the SNSS request will check the bit and call dasd_eer_snss again.
+ */
+
+#define SNSS_DATA_SIZE 44
+
+#define DASD_EER_BUSID_SIZE 10
+struct dasd_eer_header {
+	__u32 total_size;
+	__u32 trigger;
+	__u64 tv_sec;
+	__u64 tv_usec;
+	char busid[DASD_EER_BUSID_SIZE];
+} __attribute__ ((packed));
+
+/*
+ * The following function can be used for those triggers that have
+ * all necessary data available when the function is called.
+ * If the parameter cqr is not NULL, the chain of requests will be searched
+ * for valid sense data, and all valid sense data sets will be added to
+ * the triggers data.
+ */
+static void dasd_eer_write_standard_trigger(struct dasd_device *device,
+					    struct dasd_ccw_req *cqr,
+					    int trigger)
+{
+	struct dasd_ccw_req *temp_cqr;
+	int data_size;
+	struct timespec64 ts;
+	struct dasd_eer_header header;
+	unsigned long flags;
+	struct eerbuffer *eerb;
+	char *sense;
+
+	/* go through cqr chain and count the valid sense data sets */
+	data_size = 0;
+	for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
+		if (dasd_get_sense(&temp_cqr->irb))
+			data_size += 32;
+
+	header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
+	header.trigger = trigger;
+	ktime_get_real_ts64(&ts);
+	header.tv_sec = ts.tv_sec;
+	header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+	strlcpy(header.busid, dev_name(&device->cdev->dev),
+		DASD_EER_BUSID_SIZE);
+
+	spin_lock_irqsave(&bufferlock, flags);
+	list_for_each_entry(eerb, &bufferlist, list) {
+		dasd_eer_start_record(eerb, header.total_size);
+		dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
+		for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) {
+			sense = dasd_get_sense(&temp_cqr->irb);
+			if (sense)
+				dasd_eer_write_buffer(eerb, sense, 32);
+		}
+		dasd_eer_write_buffer(eerb, "EOR", 4);
+	}
+	spin_unlock_irqrestore(&bufferlock, flags);
+	wake_up_interruptible(&dasd_eer_read_wait_queue);
+}
+
+/*
+ * This function writes a DASD_EER_STATECHANGE trigger.
+ */
+static void dasd_eer_write_snss_trigger(struct dasd_device *device,
+					struct dasd_ccw_req *cqr,
+					int trigger)
+{
+	int data_size;
+	int snss_rc;
+	struct timespec64 ts;
+	struct dasd_eer_header header;
+	unsigned long flags;
+	struct eerbuffer *eerb;
+
+	snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
+	if (snss_rc)
+		data_size = 0;
+	else
+		data_size = SNSS_DATA_SIZE;
+
+	header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
+	header.trigger = DASD_EER_STATECHANGE;
+	ktime_get_real_ts64(&ts);
+	header.tv_sec = ts.tv_sec;
+	header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+	strlcpy(header.busid, dev_name(&device->cdev->dev),
+		DASD_EER_BUSID_SIZE);
+
+	spin_lock_irqsave(&bufferlock, flags);
+	list_for_each_entry(eerb, &bufferlist, list) {
+		dasd_eer_start_record(eerb, header.total_size);
+		dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header));
+		if (!snss_rc)
+			dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE);
+		dasd_eer_write_buffer(eerb, "EOR", 4);
+	}
+	spin_unlock_irqrestore(&bufferlock, flags);
+	wake_up_interruptible(&dasd_eer_read_wait_queue);
+}
+
+/*
+ * This function is called for all triggers. It calls the appropriate
+ * function that writes the actual trigger records.
+ */
+void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
+		    unsigned int id)
+{
+	if (!device->eer_cqr)
+		return;
+	switch (id) {
+	case DASD_EER_FATALERROR:
+	case DASD_EER_PPRCSUSPEND:
+		dasd_eer_write_standard_trigger(device, cqr, id);
+		break;
+	case DASD_EER_NOPATH:
+		dasd_eer_write_standard_trigger(device, NULL, id);
+		break;
+	case DASD_EER_STATECHANGE:
+		dasd_eer_write_snss_trigger(device, cqr, id);
+		break;
+	default: /* unknown trigger, so we write it without any sense data */
+		dasd_eer_write_standard_trigger(device, NULL, id);
+		break;
+	}
+}
+EXPORT_SYMBOL(dasd_eer_write);
+
+/*
+ * Start a sense subsystem status request.
+ * Needs to be called with the device held.
+ */
+void dasd_eer_snss(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+
+	cqr = device->eer_cqr;
+	if (!cqr)	/* Device not eer enabled. */
+		return;
+	if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) {
+		/* Sense subsystem status request in use. */
+		set_bit(DASD_FLAG_EER_SNSS, &device->flags);
+		return;
+	}
+	/* cdev is already locked, can't use dasd_add_request_head */
+	clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
+	cqr->status = DASD_CQR_QUEUED;
+	list_add(&cqr->devlist, &device->ccw_queue);
+	dasd_schedule_device_bh(device);
+}
+
+/*
+ * Callback function for use with sense subsystem status request.
+ */
+static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
+{
+	struct dasd_device *device = cqr->startdev;
+	unsigned long flags;
+
+	dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	if (device->eer_cqr == cqr) {
+		clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
+		if (test_bit(DASD_FLAG_EER_SNSS, &device->flags))
+			/* Another SNSS has been requested in the meantime. */
+			dasd_eer_snss(device);
+		cqr = NULL;
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	if (cqr)
+		/*
+		 * Extended error recovery has been switched off while
+		 * the SNSS request was running. It could even have
+		 * been switched off and on again in which case there
+		 * is a new ccw in device->eer_cqr. Free the "old"
+		 * snss request now.
+		 */
+		dasd_sfree_request(cqr, device);
+}
+
+/*
+ * Enable error reporting on a given device.
+ */
+int dasd_eer_enable(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr = NULL;
+	unsigned long flags;
+	struct ccw1 *ccw;
+	int rc = 0;
+
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	if (device->eer_cqr)
+		goto out;
+	else if (!device->discipline ||
+		 strcmp(device->discipline->name, "ECKD"))
+		rc = -EMEDIUMTYPE;
+	else if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
+		rc = -EBUSY;
+
+	if (rc)
+		goto out;
+
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
+				   SNSS_DATA_SIZE, device, NULL);
+	if (IS_ERR(cqr)) {
+		rc = -ENOMEM;
+		cqr = NULL;
+		goto out;
+	}
+
+	cqr->startdev = device;
+	cqr->retries = 255;
+	cqr->expires = 10 * HZ;
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
+
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_SNSS;
+	ccw->count = SNSS_DATA_SIZE;
+	ccw->flags = 0;
+	ccw->cda = (__u32)(addr_t) cqr->data;
+
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	cqr->callback = dasd_eer_snss_cb;
+
+	if (!device->eer_cqr) {
+		device->eer_cqr = cqr;
+		cqr = NULL;
+	}
+
+out:
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+	if (cqr)
+		dasd_sfree_request(cqr, device);
+
+	return rc;
+}
+
+/*
+ * Disable error reporting on a given device.
+ */
+void dasd_eer_disable(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+	unsigned long flags;
+	int in_use;
+
+	if (!device->eer_cqr)
+		return;
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	cqr = device->eer_cqr;
+	device->eer_cqr = NULL;
+	clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
+	in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	if (cqr && !in_use)
+		dasd_sfree_request(cqr, device);
+}
+
+/*
+ * SECTION: the device operations
+ */
+
+/*
+ * On the one side we need a lock to access our internal buffer, on the
+ * other side a copy_to_user can sleep. So we need to copy the data we have
+ * to transfer in a readbuffer, which is protected by the readbuffer_mutex.
+ */
+static char readbuffer[PAGE_SIZE];
+static DEFINE_MUTEX(readbuffer_mutex);
+
+static int dasd_eer_open(struct inode *inp, struct file *filp)
+{
+	struct eerbuffer *eerb;
+	unsigned long flags;
+
+	eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
+	if (!eerb)
+		return -ENOMEM;
+	eerb->buffer_page_count = eer_pages;
+	if (eerb->buffer_page_count < 1 ||
+	    eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
+		kfree(eerb);
+		DBF_EVENT(DBF_WARNING, "can't open device since module "
+			"parameter eer_pages is smaller than 1 or"
+			" bigger than %d", (int)(INT_MAX / PAGE_SIZE));
+		return -EINVAL;
+	}
+	eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
+	eerb->buffer = kmalloc_array(eerb->buffer_page_count, sizeof(char *),
+				     GFP_KERNEL);
+        if (!eerb->buffer) {
+		kfree(eerb);
+                return -ENOMEM;
+	}
+	if (dasd_eer_allocate_buffer_pages(eerb->buffer,
+					   eerb->buffer_page_count)) {
+		kfree(eerb->buffer);
+		kfree(eerb);
+		return -ENOMEM;
+	}
+	filp->private_data = eerb;
+	spin_lock_irqsave(&bufferlock, flags);
+	list_add(&eerb->list, &bufferlist);
+	spin_unlock_irqrestore(&bufferlock, flags);
+
+	return nonseekable_open(inp,filp);
+}
+
+static int dasd_eer_close(struct inode *inp, struct file *filp)
+{
+	struct eerbuffer *eerb;
+	unsigned long flags;
+
+	eerb = (struct eerbuffer *) filp->private_data;
+	spin_lock_irqsave(&bufferlock, flags);
+	list_del(&eerb->list);
+	spin_unlock_irqrestore(&bufferlock, flags);
+	dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
+	kfree(eerb->buffer);
+	kfree(eerb);
+
+	return 0;
+}
+
+static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
+			     size_t count, loff_t *ppos)
+{
+	int tc,rc;
+	int tailcount,effective_count;
+        unsigned long flags;
+	struct eerbuffer *eerb;
+
+	eerb = (struct eerbuffer *) filp->private_data;
+	if (mutex_lock_interruptible(&readbuffer_mutex))
+		return -ERESTARTSYS;
+
+	spin_lock_irqsave(&bufferlock, flags);
+
+	if (eerb->residual < 0) { /* the remainder of this record */
+		                  /* has been deleted             */
+		eerb->residual = 0;
+		spin_unlock_irqrestore(&bufferlock, flags);
+		mutex_unlock(&readbuffer_mutex);
+		return -EIO;
+	} else if (eerb->residual > 0) {
+		/* OK we still have a second half of a record to deliver */
+		effective_count = min(eerb->residual, (int) count);
+		eerb->residual -= effective_count;
+	} else {
+		tc = 0;
+		while (!tc) {
+			tc = dasd_eer_read_buffer(eerb, (char *) &tailcount,
+						  sizeof(tailcount));
+			if (!tc) {
+				/* no data available */
+				spin_unlock_irqrestore(&bufferlock, flags);
+				mutex_unlock(&readbuffer_mutex);
+				if (filp->f_flags & O_NONBLOCK)
+					return -EAGAIN;
+				rc = wait_event_interruptible(
+					dasd_eer_read_wait_queue,
+					eerb->head != eerb->tail);
+				if (rc)
+					return rc;
+				if (mutex_lock_interruptible(&readbuffer_mutex))
+					return -ERESTARTSYS;
+				spin_lock_irqsave(&bufferlock, flags);
+			}
+		}
+		WARN_ON(tc != sizeof(tailcount));
+		effective_count = min(tailcount,(int)count);
+		eerb->residual = tailcount - effective_count;
+	}
+
+	tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count);
+	WARN_ON(tc != effective_count);
+
+	spin_unlock_irqrestore(&bufferlock, flags);
+
+	if (copy_to_user(buf, readbuffer, effective_count)) {
+		mutex_unlock(&readbuffer_mutex);
+		return -EFAULT;
+	}
+
+	mutex_unlock(&readbuffer_mutex);
+	return effective_count;
+}
+
+static __poll_t dasd_eer_poll(struct file *filp, poll_table *ptable)
+{
+	__poll_t mask;
+	unsigned long flags;
+	struct eerbuffer *eerb;
+
+	eerb = (struct eerbuffer *) filp->private_data;
+	poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
+	spin_lock_irqsave(&bufferlock, flags);
+	if (eerb->head != eerb->tail)
+		mask = EPOLLIN | EPOLLRDNORM ;
+	else
+		mask = 0;
+	spin_unlock_irqrestore(&bufferlock, flags);
+	return mask;
+}
+
+static const struct file_operations dasd_eer_fops = {
+	.open		= &dasd_eer_open,
+	.release	= &dasd_eer_close,
+	.read		= &dasd_eer_read,
+	.poll		= &dasd_eer_poll,
+	.owner		= THIS_MODULE,
+	.llseek		= noop_llseek,
+};
+
+static struct miscdevice *dasd_eer_dev = NULL;
+
+int __init dasd_eer_init(void)
+{
+	int rc;
+
+	dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
+	if (!dasd_eer_dev)
+		return -ENOMEM;
+
+	dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
+	dasd_eer_dev->name  = "dasd_eer";
+	dasd_eer_dev->fops  = &dasd_eer_fops;
+
+	rc = misc_register(dasd_eer_dev);
+	if (rc) {
+		kfree(dasd_eer_dev);
+		dasd_eer_dev = NULL;
+		DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not "
+		       "register misc device");
+		return rc;
+	}
+
+	return 0;
+}
+
+void dasd_eer_exit(void)
+{
+	if (dasd_eer_dev) {
+		misc_deregister(dasd_eer_dev);
+		kfree(dasd_eer_dev);
+		dasd_eer_dev = NULL;
+	}
+}
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
new file mode 100644
index 0000000..ba4fa37
--- /dev/null
+++ b/drivers/s390/block/dasd_erp.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ *		    Carsten Otte <Cotte@de.ibm.com>
+ *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2001
+ *
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/ctype.h>
+#include <linux/init.h>
+
+#include <asm/debug.h>
+#include <asm/ebcdic.h>
+#include <linux/uaccess.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_erp:"
+
+#include "dasd_int.h"
+
+struct dasd_ccw_req *
+dasd_alloc_erp_request(char *magic, int cplength, int datasize,
+		       struct dasd_device * device)
+{
+	unsigned long flags;
+	struct dasd_ccw_req *cqr;
+	char *data;
+	int size;
+
+	/* Sanity checks */
+	BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
+	     (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
+
+	size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
+	if (cplength > 0)
+		size += cplength * sizeof(struct ccw1);
+	if (datasize > 0)
+		size += datasize;
+	spin_lock_irqsave(&device->mem_lock, flags);
+	cqr = (struct dasd_ccw_req *)
+		dasd_alloc_chunk(&device->erp_chunks, size);
+	spin_unlock_irqrestore(&device->mem_lock, flags);
+	if (cqr == NULL)
+		return ERR_PTR(-ENOMEM);
+	memset(cqr, 0, sizeof(struct dasd_ccw_req));
+	INIT_LIST_HEAD(&cqr->devlist);
+	INIT_LIST_HEAD(&cqr->blocklist);
+	data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
+	cqr->cpaddr = NULL;
+	if (cplength > 0) {
+		cqr->cpaddr = (struct ccw1 *) data;
+		data += cplength*sizeof(struct ccw1);
+		memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
+	}
+	cqr->data = NULL;
+	if (datasize > 0) {
+		cqr->data = data;
+ 		memset(cqr->data, 0, datasize);
+	}
+	strncpy((char *) &cqr->magic, magic, 4);
+	ASCEBC((char *) &cqr->magic, 4);
+	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	dasd_get_device(device);
+	return cqr;
+}
+
+void
+dasd_free_erp_request(struct dasd_ccw_req *cqr, struct dasd_device * device)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&device->mem_lock, flags);
+	dasd_free_chunk(&device->erp_chunks, cqr);
+	spin_unlock_irqrestore(&device->mem_lock, flags);
+	atomic_dec(&device->ref_count);
+}
+
+
+/*
+ * dasd_default_erp_action just retries the current cqr
+ */
+struct dasd_ccw_req *
+dasd_default_erp_action(struct dasd_ccw_req *cqr)
+{
+	struct dasd_device *device;
+
+	device = cqr->startdev;
+
+        /* just retry - there is nothing to save ... I got no sense data.... */
+        if (cqr->retries > 0) {
+		DBF_DEV_EVENT(DBF_DEBUG, device,
+                             "default ERP called (%i retries left)",
+                             cqr->retries);
+		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
+			cqr->lpm = dasd_path_get_opm(device);
+		cqr->status = DASD_CQR_FILLED;
+        } else {
+		pr_err("%s: default ERP has run out of retries and failed\n",
+		       dev_name(&device->cdev->dev));
+		cqr->status = DASD_CQR_FAILED;
+		cqr->stopclk = get_tod_clock();
+        }
+        return cqr;
+}				/* end dasd_default_erp_action */
+
+/*
+ * DESCRIPTION
+ *   Frees all ERPs of the current ERP Chain and set the status
+ *   of the original CQR either to DASD_CQR_DONE if ERP was successful
+ *   or to DASD_CQR_FAILED if ERP was NOT successful.
+ *   NOTE: This function is only called if no discipline postaction
+ *	   is available
+ *
+ * PARAMETER
+ *   erp		current erp_head
+ *
+ * RETURN VALUES
+ *   cqr		pointer to the original CQR
+ */
+struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
+{
+	int success;
+	unsigned long startclk, stopclk;
+	struct dasd_device *startdev;
+
+	BUG_ON(cqr->refers == NULL || cqr->function == NULL);
+
+	success = cqr->status == DASD_CQR_DONE;
+	startclk = cqr->startclk;
+	stopclk = cqr->stopclk;
+	startdev = cqr->startdev;
+
+	/* free all ERPs - but NOT the original cqr */
+	while (cqr->refers != NULL) {
+		struct dasd_ccw_req *refers;
+
+		refers = cqr->refers;
+		/* remove the request from the block queue */
+		list_del(&cqr->blocklist);
+		/* free the finished erp request */
+		dasd_free_erp_request(cqr, cqr->memdev);
+		cqr = refers;
+	}
+
+	/* set corresponding status to original cqr */
+	cqr->startclk = startclk;
+	cqr->stopclk = stopclk;
+	cqr->startdev = startdev;
+	if (success)
+		cqr->status = DASD_CQR_DONE;
+	else {
+		cqr->status = DASD_CQR_FAILED;
+		cqr->stopclk = get_tod_clock();
+	}
+
+	return cqr;
+
+}				/* end default_erp_postaction */
+
+void
+dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
+{
+	struct dasd_device *device;
+
+	device = cqr->startdev;
+	if (cqr->intrc == -ETIMEDOUT) {
+		dev_err(&device->cdev->dev,
+			"A timeout error occurred for cqr %p\n", cqr);
+		return;
+	}
+	if (cqr->intrc == -ENOLINK) {
+		dev_err(&device->cdev->dev,
+			"A transport error occurred for cqr %p\n", cqr);
+		return;
+	}
+	/* dump sense data */
+	if (device->discipline && device->discipline->dump_sense)
+		device->discipline->dump_sense(device, cqr, irb);
+}
+
+void
+dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb)
+{
+	struct dasd_device *device;
+
+	device = cqr->startdev;
+	/* dump sense data to s390 debugfeature*/
+	if (device->discipline && device->discipline->dump_sense_dbf)
+		device->discipline->dump_sense_dbf(device, irb, "log");
+}
+EXPORT_SYMBOL(dasd_log_sense_dbf);
+
+EXPORT_SYMBOL(dasd_default_erp_action);
+EXPORT_SYMBOL(dasd_default_erp_postaction);
+EXPORT_SYMBOL(dasd_alloc_erp_request);
+EXPORT_SYMBOL(dasd_free_erp_request);
+EXPORT_SYMBOL(dasd_log_sense);
+
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
new file mode 100644
index 0000000..56007a3
--- /dev/null
+++ b/drivers/s390/block/dasd_fba.c
@@ -0,0 +1,828 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ */
+
+#define KMSG_COMPONENT "dasd-fba"
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <asm/debug.h>
+
+#include <linux/slab.h>
+#include <linux/hdreg.h>	/* HDIO_GETGEO			    */
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/idals.h>
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/ccwdev.h>
+
+#include "dasd_int.h"
+#include "dasd_fba.h"
+
+#ifdef PRINTK_HEADER
+#undef PRINTK_HEADER
+#endif				/* PRINTK_HEADER */
+#define PRINTK_HEADER "dasd(fba):"
+
+#define FBA_DEFAULT_RETRIES 32
+
+#define DASD_FBA_CCW_WRITE 0x41
+#define DASD_FBA_CCW_READ 0x42
+#define DASD_FBA_CCW_LOCATE 0x43
+#define DASD_FBA_CCW_DEFINE_EXTENT 0x63
+
+MODULE_LICENSE("GPL");
+
+static struct dasd_discipline dasd_fba_discipline;
+
+struct dasd_fba_private {
+	struct dasd_fba_characteristics rdc_data;
+};
+
+static struct ccw_device_id dasd_fba_ids[] = {
+	{ CCW_DEVICE_DEVTYPE (0x6310, 0, 0x9336, 0), .driver_info = 0x1},
+	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3370, 0), .driver_info = 0x2},
+	{ /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ccw, dasd_fba_ids);
+
+static struct ccw_driver dasd_fba_driver; /* see below */
+static int
+dasd_fba_probe(struct ccw_device *cdev)
+{
+	return dasd_generic_probe(cdev, &dasd_fba_discipline);
+}
+
+static int
+dasd_fba_set_online(struct ccw_device *cdev)
+{
+	return dasd_generic_set_online(cdev, &dasd_fba_discipline);
+}
+
+static struct ccw_driver dasd_fba_driver = {
+	.driver = {
+		.name	= "dasd-fba",
+		.owner	= THIS_MODULE,
+	},
+	.ids         = dasd_fba_ids,
+	.probe       = dasd_fba_probe,
+	.remove      = dasd_generic_remove,
+	.set_offline = dasd_generic_set_offline,
+	.set_online  = dasd_fba_set_online,
+	.notify      = dasd_generic_notify,
+	.path_event  = dasd_generic_path_event,
+	.freeze      = dasd_generic_pm_freeze,
+	.thaw	     = dasd_generic_restore_device,
+	.restore     = dasd_generic_restore_device,
+	.int_class   = IRQIO_DAS,
+};
+
+static void
+define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
+	      int blksize, int beg, int nr)
+{
+	ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT;
+	ccw->flags = 0;
+	ccw->count = 16;
+	ccw->cda = (__u32) __pa(data);
+	memset(data, 0, sizeof (struct DE_fba_data));
+	if (rw == WRITE)
+		(data->mask).perm = 0x0;
+	else if (rw == READ)
+		(data->mask).perm = 0x1;
+	else
+		data->mask.perm = 0x2;
+	data->blk_size = blksize;
+	data->ext_loc = beg;
+	data->ext_end = nr - 1;
+}
+
+static void
+locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
+	      int block_nr, int block_ct)
+{
+	ccw->cmd_code = DASD_FBA_CCW_LOCATE;
+	ccw->flags = 0;
+	ccw->count = 8;
+	ccw->cda = (__u32) __pa(data);
+	memset(data, 0, sizeof (struct LO_fba_data));
+	if (rw == WRITE)
+		data->operation.cmd = 0x5;
+	else if (rw == READ)
+		data->operation.cmd = 0x6;
+	else
+		data->operation.cmd = 0x8;
+	data->blk_nr = block_nr;
+	data->blk_ct = block_ct;
+}
+
+static int
+dasd_fba_check_characteristics(struct dasd_device *device)
+{
+	struct dasd_fba_private *private = device->private;
+	struct ccw_device *cdev = device->cdev;
+	struct dasd_block *block;
+	int readonly, rc;
+
+	if (!private) {
+		private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+		if (!private) {
+			dev_warn(&device->cdev->dev,
+				 "Allocating memory for private DASD "
+				 "data failed\n");
+			return -ENOMEM;
+		}
+		device->private = private;
+	} else {
+		memset(private, 0, sizeof(*private));
+	}
+	block = dasd_alloc_block();
+	if (IS_ERR(block)) {
+		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate "
+				"dasd block structure");
+		device->private = NULL;
+		kfree(private);
+		return PTR_ERR(block);
+	}
+	device->block = block;
+	block->base = device;
+
+	/* Read Device Characteristics */
+	rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
+					 &private->rdc_data, 32);
+	if (rc) {
+		DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device "
+				"characteristics returned error %d", rc);
+		device->block = NULL;
+		dasd_free_block(block);
+		device->private = NULL;
+		kfree(private);
+		return rc;
+	}
+
+	device->default_expires = DASD_EXPIRES;
+	device->default_retries = FBA_DEFAULT_RETRIES;
+	dasd_path_set_opm(device, LPM_ANYPATH);
+
+	readonly = dasd_device_is_ro(device);
+	if (readonly)
+		set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+
+	/* FBA supports discard, set the according feature bit */
+	dasd_set_feature(cdev, DASD_FEATURE_DISCARD, 1);
+
+	dev_info(&device->cdev->dev,
+		 "New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB "
+		 "and %d B/blk%s\n",
+		 cdev->id.dev_type,
+		 cdev->id.dev_model,
+		 cdev->id.cu_type,
+		 cdev->id.cu_model,
+		 ((private->rdc_data.blk_bdsa *
+		   (private->rdc_data.blk_size >> 9)) >> 11),
+		 private->rdc_data.blk_size,
+		 readonly ? ", read-only device" : "");
+	return 0;
+}
+
+static int dasd_fba_do_analysis(struct dasd_block *block)
+{
+	struct dasd_fba_private *private = block->base->private;
+	int sb, rc;
+
+	rc = dasd_check_blocksize(private->rdc_data.blk_size);
+	if (rc) {
+		DBF_DEV_EVENT(DBF_WARNING, block->base, "unknown blocksize %d",
+			    private->rdc_data.blk_size);
+		return rc;
+	}
+	block->blocks = private->rdc_data.blk_bdsa;
+	block->bp_block = private->rdc_data.blk_size;
+	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
+	for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1)
+		block->s2b_shift++;
+	return 0;
+}
+
+static int dasd_fba_fill_geometry(struct dasd_block *block,
+				  struct hd_geometry *geo)
+{
+	if (dasd_check_blocksize(block->bp_block) != 0)
+		return -EINVAL;
+	geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
+	geo->heads = 16;
+	geo->sectors = 128 >> block->s2b_shift;
+	return 0;
+}
+
+static dasd_erp_fn_t
+dasd_fba_erp_action(struct dasd_ccw_req * cqr)
+{
+	return dasd_default_erp_action;
+}
+
+static dasd_erp_fn_t
+dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
+{
+	if (cqr->function == dasd_default_erp_action)
+		return dasd_default_erp_postaction;
+
+	DBF_DEV_EVENT(DBF_WARNING, cqr->startdev, "unknown ERP action %p",
+		    cqr->function);
+	return NULL;
+}
+
+static void dasd_fba_check_for_device_change(struct dasd_device *device,
+					     struct dasd_ccw_req *cqr,
+					     struct irb *irb)
+{
+	char mask;
+
+	/* first of all check for state change pending interrupt */
+	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
+	if ((irb->scsw.cmd.dstat & mask) == mask)
+		dasd_generic_handle_state_change(device);
+};
+
+
+/*
+ * Builds a CCW with no data payload
+ */
+static void ccw_write_no_data(struct ccw1 *ccw)
+{
+	ccw->cmd_code = DASD_FBA_CCW_WRITE;
+	ccw->flags |= CCW_FLAG_SLI;
+	ccw->count = 0;
+}
+
+/*
+ * Builds a CCW that writes only zeroes.
+ */
+static void ccw_write_zero(struct ccw1 *ccw, int count)
+{
+	ccw->cmd_code = DASD_FBA_CCW_WRITE;
+	ccw->flags |= CCW_FLAG_SLI;
+	ccw->count = count;
+	ccw->cda = (__u32) (addr_t) page_to_phys(ZERO_PAGE(0));
+}
+
+/*
+ * Helper function to count the amount of necessary CCWs within a given range
+ * with 4k alignment and command chaining in mind.
+ */
+static int count_ccws(sector_t first_rec, sector_t last_rec,
+		      unsigned int blocks_per_page)
+{
+	sector_t wz_stop = 0, d_stop = 0;
+	int cur_pos = 0;
+	int count = 0;
+
+	if (first_rec % blocks_per_page != 0) {
+		wz_stop = first_rec + blocks_per_page -
+			(first_rec % blocks_per_page) - 1;
+		if (wz_stop > last_rec)
+			wz_stop = last_rec;
+		cur_pos = wz_stop - first_rec + 1;
+		count++;
+	}
+
+	if (last_rec - (first_rec + cur_pos) + 1 >= blocks_per_page) {
+		if ((last_rec - blocks_per_page + 1) % blocks_per_page != 0)
+			d_stop = last_rec - ((last_rec - blocks_per_page + 1) %
+					     blocks_per_page);
+		else
+			d_stop = last_rec;
+
+		cur_pos += d_stop - (first_rec + cur_pos) + 1;
+		count++;
+	}
+
+	if (cur_pos == 0 || first_rec + cur_pos - 1 < last_rec)
+		count++;
+
+	return count;
+}
+
+/*
+ * This function builds a CCW request for block layer discard requests.
+ * Each page in the z/VM hypervisor that represents certain records of an FBA
+ * device will be padded with zeros. This is a special behaviour of the WRITE
+ * command which is triggered when no data payload is added to the CCW.
+ *
+ * Note: Due to issues in some z/VM versions, we can't fully utilise this
+ * special behaviour. We have to keep a 4k (or 8 block) alignment in mind to
+ * work around those issues and write actual zeroes to the unaligned parts in
+ * the request. This workaround might be removed in the future.
+ */
+static struct dasd_ccw_req *dasd_fba_build_cp_discard(
+						struct dasd_device *memdev,
+						struct dasd_block *block,
+						struct request *req)
+{
+	struct LO_fba_data *LO_data;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+
+	sector_t wz_stop = 0, d_stop = 0;
+	sector_t first_rec, last_rec;
+
+	unsigned int blksize = block->bp_block;
+	unsigned int blocks_per_page;
+	int wz_count = 0;
+	int d_count = 0;
+	int cur_pos = 0; /* Current position within the extent */
+	int count = 0;
+	int cplength;
+	int datasize;
+	int nr_ccws;
+
+	first_rec = blk_rq_pos(req) >> block->s2b_shift;
+	last_rec =
+		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+	count = last_rec - first_rec + 1;
+
+	blocks_per_page = BLOCKS_PER_PAGE(blksize);
+	nr_ccws = count_ccws(first_rec, last_rec, blocks_per_page);
+
+	/* define extent + nr_ccws * locate record + nr_ccws * single CCW */
+	cplength = 1 + 2 * nr_ccws;
+	datasize = sizeof(struct DE_fba_data) +
+		nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1));
+
+	cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
+				   blk_mq_rq_to_pdu(req));
+	if (IS_ERR(cqr))
+		return cqr;
+
+	ccw = cqr->cpaddr;
+
+	define_extent(ccw++, cqr->data, WRITE, blksize, first_rec, count);
+	LO_data = cqr->data + sizeof(struct DE_fba_data);
+
+	/* First part is not aligned. Calculate range to write zeroes. */
+	if (first_rec % blocks_per_page != 0) {
+		wz_stop = first_rec + blocks_per_page -
+			(first_rec % blocks_per_page) - 1;
+		if (wz_stop > last_rec)
+			wz_stop = last_rec;
+		wz_count = wz_stop - first_rec + 1;
+
+		ccw[-1].flags |= CCW_FLAG_CC;
+		locate_record(ccw++, LO_data++, WRITE, cur_pos, wz_count);
+
+		ccw[-1].flags |= CCW_FLAG_CC;
+		ccw_write_zero(ccw++, wz_count * blksize);
+
+		cur_pos = wz_count;
+	}
+
+	/* We can do proper discard when we've got at least blocks_per_page blocks. */
+	if (last_rec - (first_rec + cur_pos) + 1 >= blocks_per_page) {
+		/* is last record at page boundary? */
+		if ((last_rec - blocks_per_page + 1) % blocks_per_page != 0)
+			d_stop = last_rec - ((last_rec - blocks_per_page + 1) %
+					     blocks_per_page);
+		else
+			d_stop = last_rec;
+
+		d_count = d_stop - (first_rec + cur_pos) + 1;
+
+		ccw[-1].flags |= CCW_FLAG_CC;
+		locate_record(ccw++, LO_data++, WRITE, cur_pos, d_count);
+
+		ccw[-1].flags |= CCW_FLAG_CC;
+		ccw_write_no_data(ccw++);
+
+		cur_pos += d_count;
+	}
+
+	/* We might still have some bits left which need to be zeroed. */
+	if (cur_pos == 0 || first_rec + cur_pos - 1 < last_rec) {
+		if (d_stop != 0)
+			wz_count = last_rec - d_stop;
+		else if (wz_stop != 0)
+			wz_count = last_rec - wz_stop;
+		else
+			wz_count = count;
+
+		ccw[-1].flags |= CCW_FLAG_CC;
+		locate_record(ccw++, LO_data++, WRITE, cur_pos, wz_count);
+
+		ccw[-1].flags |= CCW_FLAG_CC;
+		ccw_write_zero(ccw++, wz_count * blksize);
+	}
+
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
+		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+
+	cqr->startdev = memdev;
+	cqr->memdev = memdev;
+	cqr->block = block;
+	cqr->expires = memdev->default_expires * HZ;	/* default 5 minutes */
+	cqr->retries = memdev->default_retries;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+
+	return cqr;
+}
+
+static struct dasd_ccw_req *dasd_fba_build_cp_regular(
+						struct dasd_device *memdev,
+						struct dasd_block *block,
+						struct request *req)
+{
+	struct dasd_fba_private *private = block->base->private;
+	unsigned long *idaws;
+	struct LO_fba_data *LO_data;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	struct req_iterator iter;
+	struct bio_vec bv;
+	char *dst;
+	int count, cidaw, cplength, datasize;
+	sector_t recid, first_rec, last_rec;
+	unsigned int blksize, off;
+	unsigned char cmd;
+
+	if (rq_data_dir(req) == READ) {
+		cmd = DASD_FBA_CCW_READ;
+	} else if (rq_data_dir(req) == WRITE) {
+		cmd = DASD_FBA_CCW_WRITE;
+	} else
+		return ERR_PTR(-EINVAL);
+	blksize = block->bp_block;
+	/* Calculate record id of first and last block. */
+	first_rec = blk_rq_pos(req) >> block->s2b_shift;
+	last_rec =
+		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+	/* Check struct bio and count the number of blocks for the request. */
+	count = 0;
+	cidaw = 0;
+	rq_for_each_segment(bv, req, iter) {
+		if (bv.bv_len & (blksize - 1))
+			/* Fba can only do full blocks. */
+			return ERR_PTR(-EINVAL);
+		count += bv.bv_len >> (block->s2b_shift + 9);
+		if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
+			cidaw += bv.bv_len / blksize;
+	}
+	/* Paranoia. */
+	if (count != last_rec - first_rec + 1)
+		return ERR_PTR(-EINVAL);
+	/* 1x define extent + 1x locate record + number of blocks */
+	cplength = 2 + count;
+	/* 1x define extent + 1x locate record */
+	datasize = sizeof(struct DE_fba_data) + sizeof(struct LO_fba_data) +
+		cidaw * sizeof(unsigned long);
+	/*
+	 * Find out number of additional locate record ccws if the device
+	 * can't do data chaining.
+	 */
+	if (private->rdc_data.mode.bits.data_chain == 0) {
+		cplength += count - 1;
+		datasize += (count - 1)*sizeof(struct LO_fba_data);
+	}
+	/* Allocate the ccw request. */
+	cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
+				   blk_mq_rq_to_pdu(req));
+	if (IS_ERR(cqr))
+		return cqr;
+	ccw = cqr->cpaddr;
+	/* First ccw is define extent. */
+	define_extent(ccw++, cqr->data, rq_data_dir(req),
+		      block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
+	/* Build locate_record + read/write ccws. */
+	idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
+	LO_data = (struct LO_fba_data *) (idaws + cidaw);
+	/* Locate record for all blocks for smart devices. */
+	if (private->rdc_data.mode.bits.data_chain != 0) {
+		ccw[-1].flags |= CCW_FLAG_CC;
+		locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
+	}
+	recid = first_rec;
+	rq_for_each_segment(bv, req, iter) {
+		dst = page_address(bv.bv_page) + bv.bv_offset;
+		if (dasd_page_cache) {
+			char *copy = kmem_cache_alloc(dasd_page_cache,
+						      GFP_DMA | __GFP_NOWARN);
+			if (copy && rq_data_dir(req) == WRITE)
+				memcpy(copy + bv.bv_offset, dst, bv.bv_len);
+			if (copy)
+				dst = copy + bv.bv_offset;
+		}
+		for (off = 0; off < bv.bv_len; off += blksize) {
+			/* Locate record for stupid devices. */
+			if (private->rdc_data.mode.bits.data_chain == 0) {
+				ccw[-1].flags |= CCW_FLAG_CC;
+				locate_record(ccw, LO_data++,
+					      rq_data_dir(req),
+					      recid - first_rec, 1);
+				ccw->flags = CCW_FLAG_CC;
+				ccw++;
+			} else {
+				if (recid > first_rec)
+					ccw[-1].flags |= CCW_FLAG_DC;
+				else
+					ccw[-1].flags |= CCW_FLAG_CC;
+			}
+			ccw->cmd_code = cmd;
+			ccw->count = block->bp_block;
+			if (idal_is_needed(dst, blksize)) {
+				ccw->cda = (__u32)(addr_t) idaws;
+				ccw->flags = CCW_FLAG_IDA;
+				idaws = idal_create_words(idaws, dst, blksize);
+			} else {
+				ccw->cda = (__u32)(addr_t) dst;
+				ccw->flags = 0;
+			}
+			ccw++;
+			dst += blksize;
+			recid++;
+		}
+	}
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
+		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->startdev = memdev;
+	cqr->memdev = memdev;
+	cqr->block = block;
+	cqr->expires = memdev->default_expires * HZ;	/* default 5 minutes */
+	cqr->retries = memdev->default_retries;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	return cqr;
+}
+
+static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device *memdev,
+					      struct dasd_block *block,
+					      struct request *req)
+{
+	if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_ZEROES)
+		return dasd_fba_build_cp_discard(memdev, block, req);
+	else
+		return dasd_fba_build_cp_regular(memdev, block, req);
+}
+
+static int
+dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+{
+	struct dasd_fba_private *private = cqr->block->base->private;
+	struct ccw1 *ccw;
+	struct req_iterator iter;
+	struct bio_vec bv;
+	char *dst, *cda;
+	unsigned int blksize, off;
+	int status;
+
+	if (!dasd_page_cache)
+		goto out;
+	blksize = cqr->block->bp_block;
+	ccw = cqr->cpaddr;
+	/* Skip over define extent & locate record. */
+	ccw++;
+	if (private->rdc_data.mode.bits.data_chain != 0)
+		ccw++;
+	rq_for_each_segment(bv, req, iter) {
+		dst = page_address(bv.bv_page) + bv.bv_offset;
+		for (off = 0; off < bv.bv_len; off += blksize) {
+			/* Skip locate record. */
+			if (private->rdc_data.mode.bits.data_chain == 0)
+				ccw++;
+			if (dst) {
+				if (ccw->flags & CCW_FLAG_IDA)
+					cda = *((char **)((addr_t) ccw->cda));
+				else
+					cda = (char *)((addr_t) ccw->cda);
+				if (dst != cda) {
+					if (rq_data_dir(req) == READ)
+						memcpy(dst, cda, bv.bv_len);
+					kmem_cache_free(dasd_page_cache,
+					    (void *)((addr_t)cda & PAGE_MASK));
+				}
+				dst = NULL;
+			}
+			ccw++;
+		}
+	}
+out:
+	status = cqr->status == DASD_CQR_DONE;
+	dasd_sfree_request(cqr, cqr->memdev);
+	return status;
+}
+
+static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr)
+{
+	if (cqr->retries < 0)
+		cqr->status = DASD_CQR_FAILED;
+	else
+		cqr->status = DASD_CQR_FILLED;
+};
+
+static int
+dasd_fba_fill_info(struct dasd_device * device,
+		   struct dasd_information2_t * info)
+{
+	struct dasd_fba_private *private = device->private;
+
+	info->label_block = 1;
+	info->FBA_layout = 1;
+	info->format = DASD_FORMAT_LDL;
+	info->characteristics_size = sizeof(private->rdc_data);
+	memcpy(info->characteristics, &private->rdc_data,
+	       sizeof(private->rdc_data));
+	info->confdata_size = 0;
+	return 0;
+}
+
+static void
+dasd_fba_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
+			char *reason)
+{
+	u64 *sense;
+
+	sense = (u64 *) dasd_get_sense(irb);
+	if (sense) {
+		DBF_DEV_EVENT(DBF_EMERG, device,
+			      "%s: %s %02x%02x%02x %016llx %016llx %016llx "
+			      "%016llx", reason,
+			      scsw_is_tm(&irb->scsw) ? "t" : "c",
+			      scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
+			      scsw_dstat(&irb->scsw), sense[0], sense[1],
+			      sense[2], sense[3]);
+	} else {
+		DBF_DEV_EVENT(DBF_EMERG, device, "%s",
+			      "SORRY - NO VALID SENSE AVAILABLE\n");
+	}
+}
+
+
+static void
+dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
+		    struct irb *irb)
+{
+	char *page;
+	struct ccw1 *act, *end, *last;
+	int len, sl, sct, count;
+
+	page = (char *) get_zeroed_page(GFP_ATOMIC);
+	if (page == NULL) {
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			    "No memory to dump sense data");
+		return;
+	}
+	len = sprintf(page, PRINTK_HEADER
+		      " I/O status report for device %s:\n",
+		      dev_name(&device->cdev->dev));
+	len += sprintf(page + len, PRINTK_HEADER
+		       " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
+		       irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
+	len += sprintf(page + len, PRINTK_HEADER
+		       " device %s: Failing CCW: %p\n",
+		       dev_name(&device->cdev->dev),
+		       (void *) (addr_t) irb->scsw.cmd.cpa);
+	if (irb->esw.esw0.erw.cons) {
+		for (sl = 0; sl < 4; sl++) {
+			len += sprintf(page + len, PRINTK_HEADER
+				       " Sense(hex) %2d-%2d:",
+				       (8 * sl), ((8 * sl) + 7));
+
+			for (sct = 0; sct < 8; sct++) {
+				len += sprintf(page + len, " %02x",
+					       irb->ecw[8 * sl + sct]);
+			}
+			len += sprintf(page + len, "\n");
+		}
+	} else {
+		len += sprintf(page + len, PRINTK_HEADER
+			       " SORRY - NO VALID SENSE AVAILABLE\n");
+	}
+	printk(KERN_ERR "%s", page);
+
+	/* dump the Channel Program */
+	/* print first CCWs (maximum 8) */
+	act = req->cpaddr;
+        for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
+	end = min(act + 8, last);
+	len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req);
+	while (act <= end) {
+		len += sprintf(page + len, PRINTK_HEADER
+			       " CCW %p: %08X %08X DAT:",
+			       act, ((int *) act)[0], ((int *) act)[1]);
+		for (count = 0; count < 32 && count < act->count;
+		     count += sizeof(int))
+			len += sprintf(page + len, " %08X",
+				       ((int *) (addr_t) act->cda)
+				       [(count>>2)]);
+		len += sprintf(page + len, "\n");
+		act++;
+	}
+	printk(KERN_ERR "%s", page);
+
+
+	/* print failing CCW area */
+	len = 0;
+	if (act <  ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
+		act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
+		len += sprintf(page + len, PRINTK_HEADER "......\n");
+	}
+	end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
+	while (act <= end) {
+		len += sprintf(page + len, PRINTK_HEADER
+			       " CCW %p: %08X %08X DAT:",
+			       act, ((int *) act)[0], ((int *) act)[1]);
+		for (count = 0; count < 32 && count < act->count;
+		     count += sizeof(int))
+			len += sprintf(page + len, " %08X",
+				       ((int *) (addr_t) act->cda)
+				       [(count>>2)]);
+		len += sprintf(page + len, "\n");
+		act++;
+	}
+
+	/* print last CCWs */
+	if (act <  last - 2) {
+		act = last - 2;
+		len += sprintf(page + len, PRINTK_HEADER "......\n");
+	}
+	while (act <= last) {
+		len += sprintf(page + len, PRINTK_HEADER
+			       " CCW %p: %08X %08X DAT:",
+			       act, ((int *) act)[0], ((int *) act)[1]);
+		for (count = 0; count < 32 && count < act->count;
+		     count += sizeof(int))
+			len += sprintf(page + len, " %08X",
+				       ((int *) (addr_t) act->cda)
+				       [(count>>2)]);
+		len += sprintf(page + len, "\n");
+		act++;
+	}
+	if (len > 0)
+		printk(KERN_ERR "%s", page);
+	free_page((unsigned long) page);
+}
+
+/*
+ * max_blocks is dependent on the amount of storage that is available
+ * in the static io buffer for each device. Currently each device has
+ * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
+ * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
+ * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
+ * addition we have one define extent ccw + 16 bytes of data and a
+ * locate record ccw for each block (stupid devices!) + 16 bytes of data.
+ * That makes:
+ * (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum.
+ * We want to fit two into the available memory so that we can immediately
+ * start the next request if one finishes off. That makes 100.1 blocks
+ * for one request. Give a little safety and the result is 96.
+ */
+static struct dasd_discipline dasd_fba_discipline = {
+	.owner = THIS_MODULE,
+	.name = "FBA ",
+	.ebcname = "FBA ",
+	.max_blocks = 96,
+	.check_device = dasd_fba_check_characteristics,
+	.do_analysis = dasd_fba_do_analysis,
+	.verify_path = dasd_generic_verify_path,
+	.fill_geometry = dasd_fba_fill_geometry,
+	.start_IO = dasd_start_IO,
+	.term_IO = dasd_term_IO,
+	.handle_terminated_request = dasd_fba_handle_terminated_request,
+	.erp_action = dasd_fba_erp_action,
+	.erp_postaction = dasd_fba_erp_postaction,
+	.check_for_device_change = dasd_fba_check_for_device_change,
+	.build_cp = dasd_fba_build_cp,
+	.free_cp = dasd_fba_free_cp,
+	.dump_sense = dasd_fba_dump_sense,
+	.dump_sense_dbf = dasd_fba_dump_sense_dbf,
+	.fill_info = dasd_fba_fill_info,
+};
+
+static int __init
+dasd_fba_init(void)
+{
+	int ret;
+
+	ASCEBC(dasd_fba_discipline.ebcname, 4);
+	ret = ccw_driver_register(&dasd_fba_driver);
+	if (!ret)
+		wait_for_device_probe();
+
+	return ret;
+}
+
+static void __exit
+dasd_fba_cleanup(void)
+{
+	ccw_driver_unregister(&dasd_fba_driver);
+}
+
+module_init(dasd_fba_init);
+module_exit(dasd_fba_cleanup);
diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h
new file mode 100644
index 0000000..b14bf1b
--- /dev/null
+++ b/drivers/s390/block/dasd_fba.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Coypright IBM Corp. 1999, 2000
+ *
+ */
+
+#ifndef DASD_FBA_H
+#define DASD_FBA_H
+
+struct DE_fba_data {
+	struct {
+		unsigned char perm:2;	/* Permissions on this extent */
+		unsigned char zero:2;	/* Must be zero */
+		unsigned char da:1;	/* usually zero */
+		unsigned char diag:1;	/* allow diagnose */
+		unsigned char zero2:2;	/* zero */
+	} __attribute__ ((packed)) mask;
+	__u8 zero;		/* Must be zero */
+	__u16 blk_size;		/* Blocksize */
+	__u32 ext_loc;		/* Extent locator */
+	__u32 ext_beg;		/* logical number of block 0 in extent */
+	__u32 ext_end;		/* logocal number of last block in extent */
+} __attribute__ ((packed));
+
+struct LO_fba_data {
+	struct {
+		unsigned char zero:4;
+		unsigned char cmd:4;
+	} __attribute__ ((packed)) operation;
+	__u8 auxiliary;
+	__u16 blk_ct;
+	__u32 blk_nr;
+} __attribute__ ((packed));
+
+struct dasd_fba_characteristics {
+	union {
+		__u8 c;
+		struct {
+			unsigned char reserved:1;
+			unsigned char overrunnable:1;
+			unsigned char burst_byte:1;
+			unsigned char data_chain:1;
+			unsigned char zeros:4;
+		} __attribute__ ((packed)) bits;
+	} __attribute__ ((packed)) mode;
+	union {
+		__u8 c;
+		struct {
+			unsigned char zero0:1;
+			unsigned char removable:1;
+			unsigned char shared:1;
+			unsigned char zero1:1;
+			unsigned char mam:1;
+			unsigned char zeros:3;
+		} __attribute__ ((packed)) bits;
+	} __attribute__ ((packed)) features;
+	__u8 dev_class;
+	__u8 unit_type;
+	__u16 blk_size;
+	__u32 blk_per_cycl;
+	__u32 blk_per_bound;
+	__u32 blk_bdsa;
+	__u32 reserved0;
+	__u16 reserved1;
+	__u16 blk_ce;
+	__u32 reserved2;
+	__u16 reserved3;
+} __attribute__ ((packed));
+
+#endif				/* DASD_FBA_H */
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
new file mode 100644
index 0000000..7036a6c
--- /dev/null
+++ b/drivers/s390/block/dasd_genhd.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ *		    Carsten Otte <Cotte@de.ibm.com>
+ *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2001
+ *
+ * gendisk related functions for the dasd driver.
+ *
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/blkpg.h>
+
+#include <linux/uaccess.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_gendisk:"
+
+#include "dasd_int.h"
+
+/*
+ * Allocate and register gendisk structure for device.
+ */
+int dasd_gendisk_alloc(struct dasd_block *block)
+{
+	struct gendisk *gdp;
+	struct dasd_device *base;
+	int len;
+
+	/* Make sure the minor for this device exists. */
+	base = block->base;
+	if (base->devindex >= DASD_PER_MAJOR)
+		return -EBUSY;
+
+	gdp = alloc_disk(1 << DASD_PARTN_BITS);
+	if (!gdp)
+		return -ENOMEM;
+
+	/* Initialize gendisk structure. */
+	gdp->major = DASD_MAJOR;
+	gdp->first_minor = base->devindex << DASD_PARTN_BITS;
+	gdp->fops = &dasd_device_operations;
+
+	/*
+	 * Set device name.
+	 *   dasda - dasdz : 26 devices
+	 *   dasdaa - dasdzz : 676 devices, added up = 702
+	 *   dasdaaa - dasdzzz : 17576 devices, added up = 18278
+	 *   dasdaaaa - dasdzzzz : 456976 devices, added up = 475252
+	 */
+	len = sprintf(gdp->disk_name, "dasd");
+	if (base->devindex > 25) {
+		if (base->devindex > 701) {
+			if (base->devindex > 18277)
+			        len += sprintf(gdp->disk_name + len, "%c",
+					       'a'+(((base->devindex-18278)
+						     /17576)%26));
+			len += sprintf(gdp->disk_name + len, "%c",
+				       'a'+(((base->devindex-702)/676)%26));
+		}
+		len += sprintf(gdp->disk_name + len, "%c",
+			       'a'+(((base->devindex-26)/26)%26));
+	}
+	len += sprintf(gdp->disk_name + len, "%c", 'a'+(base->devindex%26));
+
+	if (base->features & DASD_FEATURE_READONLY ||
+	    test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
+		set_disk_ro(gdp, 1);
+	dasd_add_link_to_gendisk(gdp, base);
+	gdp->queue = block->request_queue;
+	block->gdp = gdp;
+	set_capacity(block->gdp, 0);
+	device_add_disk(&base->cdev->dev, block->gdp);
+	return 0;
+}
+
+/*
+ * Unregister and free gendisk structure for device.
+ */
+void dasd_gendisk_free(struct dasd_block *block)
+{
+	if (block->gdp) {
+		del_gendisk(block->gdp);
+		block->gdp->private_data = NULL;
+		put_disk(block->gdp);
+		block->gdp = NULL;
+	}
+}
+
+/*
+ * Trigger a partition detection.
+ */
+int dasd_scan_partitions(struct dasd_block *block)
+{
+	struct block_device *bdev;
+	int rc;
+
+	bdev = bdget_disk(block->gdp, 0);
+	if (!bdev) {
+		DBF_DEV_EVENT(DBF_ERR, block->base, "%s",
+			      "scan partitions error, bdget returned NULL");
+		return -ENODEV;
+	}
+
+	rc = blkdev_get(bdev, FMODE_READ, NULL);
+	if (rc < 0) {
+		DBF_DEV_EVENT(DBF_ERR, block->base,
+			      "scan partitions error, blkdev_get returned %d",
+			      rc);
+		return -ENODEV;
+	}
+
+	rc = blkdev_reread_part(bdev);
+	if (rc)
+		DBF_DEV_EVENT(DBF_ERR, block->base,
+				"scan partitions error, rc %d", rc);
+
+	/*
+	 * Since the matching blkdev_put call to the blkdev_get in
+	 * this function is not called before dasd_destroy_partitions
+	 * the offline open_count limit needs to be increased from
+	 * 0 to 1. This is done by setting device->bdev (see
+	 * dasd_generic_set_offline). As long as the partition
+	 * detection is running no offline should be allowed. That
+	 * is why the assignment to device->bdev is done AFTER
+	 * the BLKRRPART ioctl.
+	 */
+	block->bdev = bdev;
+	return 0;
+}
+
+/*
+ * Remove all inodes in the system for a device, delete the
+ * partitions and make device unusable by setting its size to zero.
+ */
+void dasd_destroy_partitions(struct dasd_block *block)
+{
+	/* The two structs have 168/176 byte on 31/64 bit. */
+	struct blkpg_partition bpart;
+	struct blkpg_ioctl_arg barg;
+	struct block_device *bdev;
+
+	/*
+	 * Get the bdev pointer from the device structure and clear
+	 * device->bdev to lower the offline open_count limit again.
+	 */
+	bdev = block->bdev;
+	block->bdev = NULL;
+
+	/*
+	 * See fs/partition/check.c:delete_partition
+	 * Can't call delete_partitions directly. Use ioctl.
+	 * The ioctl also does locking and invalidation.
+	 */
+	memset(&bpart, 0, sizeof(struct blkpg_partition));
+	memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
+	barg.data = (void __force __user *) &bpart;
+	barg.op = BLKPG_DEL_PARTITION;
+	for (bpart.pno = block->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
+		ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
+
+	invalidate_partition(block->gdp, 0);
+	/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
+	blkdev_put(bdev, FMODE_READ);
+	set_capacity(block->gdp, 0);
+}
+
+int dasd_gendisk_init(void)
+{
+	int rc;
+
+	/* Register to static dasd major 94 */
+	rc = register_blkdev(DASD_MAJOR, "dasd");
+	if (rc != 0) {
+		pr_warn("Registering the device driver with major number %d failed\n",
+			DASD_MAJOR);
+		return rc;
+	}
+	return 0;
+}
+
+void dasd_gendisk_exit(void)
+{
+	unregister_blkdev(DASD_MAJOR, "dasd");
+}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
new file mode 100644
index 0000000..de6b960
--- /dev/null
+++ b/drivers/s390/block/dasd_int.h
@@ -0,0 +1,1254 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ */
+
+#ifndef DASD_INT_H
+#define DASD_INT_H
+
+/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
+#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
+#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
+
+/*
+ * States a dasd device can have:
+ *   new: the dasd_device structure is allocated.
+ *   known: the discipline for the device is identified.
+ *   basic: the device can do basic i/o.
+ *   unfmt: the device could not be analyzed (format is unknown).
+ *   ready: partition detection is done and the device is can do block io.
+ *   online: the device accepts requests from the block device queue.
+ *
+ * Things to do for startup state transitions:
+ *   new -> known: find discipline for the device and create devfs entries.
+ *   known -> basic: request irq line for the device.
+ *   basic -> ready: do the initial analysis, e.g. format detection,
+ *                   do block device setup and detect partitions.
+ *   ready -> online: schedule the device tasklet.
+ * Things to do for shutdown state transitions:
+ *   online -> ready: just set the new device state.
+ *   ready -> basic: flush requests from the block device layer, clear
+ *                   partition information and reset format information.
+ *   basic -> known: terminate all requests and free irq.
+ *   known -> new: remove devfs entries and forget discipline.
+ */
+
+#define DASD_STATE_NEW	  0
+#define DASD_STATE_KNOWN  1
+#define DASD_STATE_BASIC  2
+#define DASD_STATE_UNFMT  3
+#define DASD_STATE_READY  4
+#define DASD_STATE_ONLINE 5
+
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <linux/hdreg.h>
+#include <linux/interrupt.h>
+#include <linux/log2.h>
+#include <asm/ccwdev.h>
+#include <linux/workqueue.h>
+#include <asm/debug.h>
+#include <asm/dasd.h>
+#include <asm/idals.h>
+#include <linux/bitops.h>
+#include <linux/blk-mq.h>
+
+/* DASD discipline magic */
+#define DASD_ECKD_MAGIC 0xC5C3D2C4
+#define DASD_DIAG_MAGIC 0xC4C9C1C7
+#define DASD_FBA_MAGIC 0xC6C2C140
+
+/*
+ * SECTION: Type definitions
+ */
+struct dasd_device;
+struct dasd_block;
+
+/* BIT DEFINITIONS FOR SENSE DATA */
+#define DASD_SENSE_BIT_0 0x80
+#define DASD_SENSE_BIT_1 0x40
+#define DASD_SENSE_BIT_2 0x20
+#define DASD_SENSE_BIT_3 0x10
+
+/* BIT DEFINITIONS FOR SIM SENSE */
+#define DASD_SIM_SENSE 0x0F
+#define DASD_SIM_MSG_TO_OP 0x03
+#define DASD_SIM_LOG 0x0C
+
+/* lock class for nested cdev lock */
+#define CDEV_NESTED_FIRST 1
+#define CDEV_NESTED_SECOND 2
+
+/*
+ * SECTION: MACROs for klogd and s390 debug feature (dbf)
+ */
+#define DBF_DEV_EVENT(d_level, d_device, d_str, d_data...) \
+do { \
+	debug_sprintf_event(d_device->debug_area, \
+			    d_level, \
+			    d_str "\n", \
+			    d_data); \
+} while(0)
+
+#define DBF_EVENT(d_level, d_str, d_data...)\
+do { \
+	debug_sprintf_event(dasd_debug_area, \
+			    d_level,\
+			    d_str "\n", \
+			    d_data); \
+} while(0)
+
+#define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...)	\
+do { \
+	struct ccw_dev_id __dev_id;			\
+	ccw_device_get_id(d_cdev, &__dev_id);		\
+	debug_sprintf_event(dasd_debug_area,		\
+			    d_level,					\
+			    "0.%x.%04x " d_str "\n",			\
+			    __dev_id.ssid, __dev_id.devno, d_data);	\
+} while (0)
+
+/* limit size for an errorstring */
+#define ERRORLENGTH 30
+
+/* definition of dbf debug levels */
+#define	DBF_EMERG	0	/* system is unusable			*/
+#define	DBF_ALERT	1	/* action must be taken immediately	*/
+#define	DBF_CRIT	2	/* critical conditions			*/
+#define	DBF_ERR		3	/* error conditions			*/
+#define	DBF_WARNING	4	/* warning conditions			*/
+#define	DBF_NOTICE	5	/* normal but significant condition	*/
+#define	DBF_INFO	6	/* informational			*/
+#define	DBF_DEBUG	6	/* debug-level messages			*/
+
+/* messages to be written via klogd and dbf */
+#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\
+do { \
+	printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
+	       dev_name(&d_device->cdev->dev), d_args); \
+	DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \
+} while(0)
+
+#define MESSAGE(d_loglevel,d_string,d_args...)\
+do { \
+	printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
+	DBF_EVENT(DBF_ALERT, d_string, d_args); \
+} while(0)
+
+/* messages to be written via klogd only */
+#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\
+do { \
+	printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
+	       dev_name(&d_device->cdev->dev), d_args); \
+} while(0)
+
+#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\
+do { \
+	printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
+} while(0)
+
+/* Macro to calculate number of blocks per page */
+#define BLOCKS_PER_PAGE(blksize) (PAGE_SIZE / blksize)
+
+struct dasd_ccw_req {
+	unsigned int magic;		/* Eye catcher */
+	int intrc;			/* internal error, e.g. from start_IO */
+	struct list_head devlist;	/* for dasd_device request queue */
+	struct list_head blocklist;	/* for dasd_block request queue */
+	struct dasd_block *block;	/* the originating block device */
+	struct dasd_device *memdev;	/* the device used to allocate this */
+	struct dasd_device *startdev;	/* device the request is started on */
+	struct dasd_device *basedev;	/* base device if no block->base */
+	void *cpaddr;			/* address of ccw or tcw */
+	short retries;			/* A retry counter */
+	unsigned char cpmode;		/* 0 = cmd mode, 1 = itcw */
+	char status;			/* status of this request */
+	char lpm;			/* logical path mask */
+	unsigned long flags;        	/* flags of this request */
+	struct dasd_queue *dq;
+	unsigned long starttime;	/* jiffies time of request start */
+	unsigned long expires;		/* expiration period in jiffies */
+	void *data;			/* pointer to data area */
+	struct irb irb;			/* device status in case of an error */
+	struct dasd_ccw_req *refers;	/* ERP-chain queueing. */
+	void *function; 		/* originating ERP action */
+	void *mem_chunk;
+
+	unsigned long buildclk;		/* TOD-clock of request generation */
+	unsigned long startclk;		/* TOD-clock of request start */
+	unsigned long stopclk;		/* TOD-clock of request interrupt */
+	unsigned long endclk;		/* TOD-clock of request termination */
+
+	void (*callback)(struct dasd_ccw_req *, void *data);
+	void *callback_data;
+};
+
+/*
+ * dasd_ccw_req -> status can be:
+ */
+#define DASD_CQR_FILLED 	0x00	/* request is ready to be processed */
+#define DASD_CQR_DONE		0x01	/* request is completed successfully */
+#define DASD_CQR_NEED_ERP	0x02	/* request needs recovery action */
+#define DASD_CQR_IN_ERP 	0x03	/* request is in recovery */
+#define DASD_CQR_FAILED 	0x04	/* request is finally failed */
+#define DASD_CQR_TERMINATED	0x05	/* request was stopped by driver */
+
+#define DASD_CQR_QUEUED 	0x80	/* request is queued to be processed */
+#define DASD_CQR_IN_IO		0x81	/* request is currently in IO */
+#define DASD_CQR_ERROR		0x82	/* request is completed with error */
+#define DASD_CQR_CLEAR_PENDING	0x83	/* request is clear pending */
+#define DASD_CQR_CLEARED	0x84	/* request was cleared */
+#define DASD_CQR_SUCCESS	0x85	/* request was successful */
+
+/* default expiration time*/
+#define DASD_EXPIRES	  300
+#define DASD_EXPIRES_MAX  40000000
+#define DASD_RETRIES	  256
+#define DASD_RETRIES_MAX  32768
+
+/* per dasd_ccw_req flags */
+#define DASD_CQR_FLAGS_USE_ERP   0	/* use ERP for this request */
+#define DASD_CQR_FLAGS_FAILFAST  1	/* FAILFAST */
+#define DASD_CQR_VERIFY_PATH	 2	/* path verification request */
+#define DASD_CQR_ALLOW_SLOCK	 3	/* Try this request even when lock was
+					 * stolen. Should not be combined with
+					 * DASD_CQR_FLAGS_USE_ERP
+					 */
+/*
+ * The following flags are used to suppress output of certain errors.
+ */
+#define DASD_CQR_SUPPRESS_NRF	4	/* Suppress 'No Record Found' error */
+#define DASD_CQR_SUPPRESS_FP	5	/* Suppress 'File Protected' error*/
+#define DASD_CQR_SUPPRESS_IL	6	/* Suppress 'Incorrect Length' error */
+#define DASD_CQR_SUPPRESS_CR	7	/* Suppress 'Command Reject' error */
+
+#define DASD_REQ_PER_DEV 4
+
+/* Signature for error recovery functions. */
+typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
+
+/*
+ * A single CQR can only contain a maximum of 255 CCWs. It is limited by
+ * the locate record and locate record extended count value which can only hold
+ * 1 Byte max.
+ */
+#define DASD_CQR_MAX_CCW 255
+
+/*
+ * Unique identifier for dasd device.
+ */
+#define UA_NOT_CONFIGURED  0x00
+#define UA_BASE_DEVICE	   0x01
+#define UA_BASE_PAV_ALIAS  0x02
+#define UA_HYPER_PAV_ALIAS 0x03
+
+struct dasd_uid {
+	__u8 type;
+	char vendor[4];
+	char serial[15];
+	__u16 ssid;
+	__u8 real_unit_addr;
+	__u8 base_unit_addr;
+	char vduit[33];
+};
+
+/*
+ * the struct dasd_discipline is
+ * sth like a table of virtual functions, if you think of dasd_eckd
+ * inheriting dasd...
+ * no, currently we are not planning to reimplement the driver in C++
+ */
+struct dasd_discipline {
+	struct module *owner;
+	char ebcname[8];	/* a name used for tagging and printks */
+	char name[8];		/* a name used for tagging and printks */
+	int max_blocks;		/* maximum number of blocks to be chained */
+
+	struct list_head list;	/* used for list of disciplines */
+
+	/*
+	 * Device recognition functions. check_device is used to verify
+	 * the sense data and the information returned by read device
+	 * characteristics. It returns 0 if the discipline can be used
+	 * for the device in question. uncheck_device is called during
+	 * device shutdown to deregister a device from its discipline.
+	 */
+	int (*check_device) (struct dasd_device *);
+	void (*uncheck_device) (struct dasd_device *);
+
+	/*
+	 * do_analysis is used in the step from device state "basic" to
+	 * state "accept". It returns 0 if the device can be made ready,
+	 * it returns -EMEDIUMTYPE if the device can't be made ready or
+	 * -EAGAIN if do_analysis started a ccw that needs to complete
+	 * before the analysis may be repeated.
+	 */
+	int (*do_analysis) (struct dasd_block *);
+
+	/*
+	 * This function is called, when new paths become available.
+	 * Disciplins may use this callback to do necessary setup work,
+	 * e.g. verify that new path is compatible with the current
+	 * configuration.
+	 */
+	int (*verify_path)(struct dasd_device *, __u8);
+
+	/*
+	 * Last things to do when a device is set online, and first things
+	 * when it is set offline.
+	 */
+	int (*basic_to_ready) (struct dasd_device *);
+	int (*online_to_ready) (struct dasd_device *);
+	int (*basic_to_known)(struct dasd_device *);
+
+	/* (struct dasd_device *);
+	 * Device operation functions. build_cp creates a ccw chain for
+	 * a block device request, start_io starts the request and
+	 * term_IO cancels it (e.g. in case of a timeout). format_device
+	 * formats the device and check_device_format compares the format of
+	 * a device with the expected format_data.
+	 * handle_terminated_request allows to examine a cqr and prepare
+	 * it for retry.
+	 */
+	struct dasd_ccw_req *(*build_cp) (struct dasd_device *,
+					  struct dasd_block *,
+					  struct request *);
+	int (*start_IO) (struct dasd_ccw_req *);
+	int (*term_IO) (struct dasd_ccw_req *);
+	void (*handle_terminated_request) (struct dasd_ccw_req *);
+	int (*format_device) (struct dasd_device *,
+			      struct format_data_t *, int);
+	int (*check_device_format)(struct dasd_device *,
+				   struct format_check_t *, int);
+	int (*free_cp) (struct dasd_ccw_req *, struct request *);
+
+	/*
+	 * Error recovery functions. examine_error() returns a value that
+	 * indicates what to do for an error condition. If examine_error()
+	 * returns 'dasd_era_recover' erp_action() is called to create a
+	 * special error recovery ccw. erp_postaction() is called after
+	 * an error recovery ccw has finished its execution. dump_sense
+	 * is called for every error condition to print the sense data
+	 * to the console.
+	 */
+	dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *);
+	dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
+	void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
+			    struct irb *);
+	void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *);
+	void (*check_for_device_change) (struct dasd_device *,
+					 struct dasd_ccw_req *,
+					 struct irb *);
+
+        /* i/o control functions. */
+	int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
+	int (*fill_info) (struct dasd_device *, struct dasd_information2_t *);
+	int (*ioctl) (struct dasd_block *, unsigned int, void __user *);
+
+	/* suspend/resume functions */
+	int (*freeze) (struct dasd_device *);
+	int (*restore) (struct dasd_device *);
+
+	/* reload device after state change */
+	int (*reload) (struct dasd_device *);
+
+	int (*get_uid) (struct dasd_device *, struct dasd_uid *);
+	void (*kick_validate) (struct dasd_device *);
+	int (*check_attention)(struct dasd_device *, __u8);
+	int (*host_access_count)(struct dasd_device *);
+	int (*hosts_print)(struct dasd_device *, struct seq_file *);
+	void (*handle_hpf_error)(struct dasd_device *, struct irb *);
+	void (*disable_hpf)(struct dasd_device *);
+	int (*hpf_enabled)(struct dasd_device *);
+	void (*reset_path)(struct dasd_device *, __u8);
+};
+
+extern struct dasd_discipline *dasd_diag_discipline_pointer;
+
+/*
+ * Notification numbers for extended error reporting notifications:
+ * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
+ * eer pointer) is freed. The error reporting module needs to do all necessary
+ * cleanup steps.
+ * The DASD_EER_TRIGGER notification sends the actual error reports (triggers).
+ */
+#define DASD_EER_DISABLE 0
+#define DASD_EER_TRIGGER 1
+
+/* Trigger IDs for extended error reporting DASD_EER_TRIGGER notification */
+#define DASD_EER_FATALERROR  1
+#define DASD_EER_NOPATH      2
+#define DASD_EER_STATECHANGE 3
+#define DASD_EER_PPRCSUSPEND 4
+
+/* DASD path handling */
+
+#define DASD_PATH_OPERATIONAL  1
+#define DASD_PATH_TBV	       2
+#define DASD_PATH_PP	       3
+#define DASD_PATH_NPP	       4
+#define DASD_PATH_MISCABLED    5
+#define DASD_PATH_NOHPF        6
+#define DASD_PATH_CUIR	       7
+#define DASD_PATH_IFCC	       8
+
+#define DASD_THRHLD_MAX		4294967295U
+#define DASD_INTERVAL_MAX	4294967295U
+
+struct dasd_path {
+	unsigned long flags;
+	u8 cssid;
+	u8 ssid;
+	u8 chpid;
+	struct dasd_conf_data *conf_data;
+	atomic_t error_count;
+	unsigned long errorclk;
+};
+
+
+struct dasd_profile_info {
+	/* legacy part of profile data, as in dasd_profile_info_t */
+	unsigned int dasd_io_reqs;	 /* number of requests processed */
+	unsigned int dasd_io_sects;	 /* number of sectors processed */
+	unsigned int dasd_io_secs[32];	 /* histogram of request's sizes */
+	unsigned int dasd_io_times[32];	 /* histogram of requests's times */
+	unsigned int dasd_io_timps[32];	 /* h. of requests's times per sector */
+	unsigned int dasd_io_time1[32];	 /* hist. of time from build to start */
+	unsigned int dasd_io_time2[32];	 /* hist. of time from start to irq */
+	unsigned int dasd_io_time2ps[32]; /* hist. of time from start to irq */
+	unsigned int dasd_io_time3[32];	 /* hist. of time from irq to end */
+	unsigned int dasd_io_nr_req[32]; /* hist. of # of requests in chanq */
+
+	/* new data */
+	struct timespec64 starttod;	   /* time of start or last reset */
+	unsigned int dasd_io_alias;	   /* requests using an alias */
+	unsigned int dasd_io_tpm;	   /* requests using transport mode */
+	unsigned int dasd_read_reqs;	   /* total number of read  requests */
+	unsigned int dasd_read_sects;	   /* total number read sectors */
+	unsigned int dasd_read_alias;	   /* read request using an alias */
+	unsigned int dasd_read_tpm;	   /* read requests in transport mode */
+	unsigned int dasd_read_secs[32];   /* histogram of request's sizes */
+	unsigned int dasd_read_times[32];  /* histogram of requests's times */
+	unsigned int dasd_read_time1[32];  /* hist. time from build to start */
+	unsigned int dasd_read_time2[32];  /* hist. of time from start to irq */
+	unsigned int dasd_read_time3[32];  /* hist. of time from irq to end */
+	unsigned int dasd_read_nr_req[32]; /* hist. of # of requests in chanq */
+	unsigned long dasd_sum_times;	   /* sum of request times */
+	unsigned long dasd_sum_time_str;   /* sum of time from build to start */
+	unsigned long dasd_sum_time_irq;   /* sum of time from start to irq */
+	unsigned long dasd_sum_time_end;   /* sum of time from irq to end */
+};
+
+struct dasd_profile {
+	struct dentry *dentry;
+	struct dasd_profile_info *data;
+	spinlock_t lock;
+};
+
+struct dasd_device {
+	/* Block device stuff. */
+	struct dasd_block *block;
+
+        unsigned int devindex;
+	unsigned long flags;	   /* per device flags */
+	unsigned short features;   /* copy of devmap-features (read-only!) */
+
+	/* extended error reporting stuff (eer) */
+	struct dasd_ccw_req *eer_cqr;
+
+	/* Device discipline stuff. */
+	struct dasd_discipline *discipline;
+	struct dasd_discipline *base_discipline;
+	void *private;
+	struct dasd_path path[8];
+	__u8 opm;
+
+	/* Device state and target state. */
+	int state, target;
+	struct mutex state_mutex;
+	int stopped;		/* device (ccw_device_start) was stopped */
+
+	/* reference count. */
+        atomic_t ref_count;
+
+	/* ccw queue and memory for static ccw/erp buffers. */
+	struct list_head ccw_queue;
+	spinlock_t mem_lock;
+	void *ccw_mem;
+	void *erp_mem;
+	struct list_head ccw_chunks;
+	struct list_head erp_chunks;
+
+	atomic_t tasklet_scheduled;
+        struct tasklet_struct tasklet;
+	struct work_struct kick_work;
+	struct work_struct restore_device;
+	struct work_struct reload_device;
+	struct work_struct kick_validate;
+	struct work_struct suc_work;
+	struct work_struct requeue_requests;
+	struct timer_list timer;
+
+	debug_info_t *debug_area;
+
+	struct ccw_device *cdev;
+
+	/* hook for alias management */
+	struct list_head alias_list;
+
+	/* default expiration time in s */
+	unsigned long default_expires;
+	unsigned long default_retries;
+
+	unsigned long blk_timeout;
+
+	unsigned long path_thrhld;
+	unsigned long path_interval;
+
+	struct dentry *debugfs_dentry;
+	struct dentry *hosts_dentry;
+	struct dasd_profile profile;
+};
+
+struct dasd_block {
+	/* Block device stuff. */
+	struct gendisk *gdp;
+	struct request_queue *request_queue;
+	spinlock_t request_queue_lock;
+	struct blk_mq_tag_set tag_set;
+	struct block_device *bdev;
+	atomic_t open_count;
+
+	unsigned long blocks;	   /* size of volume in blocks */
+	unsigned int bp_block;	   /* bytes per block */
+	unsigned int s2b_shift;	   /* log2 (bp_block/512) */
+
+	struct dasd_device *base;
+	struct list_head ccw_queue;
+	spinlock_t queue_lock;
+
+	atomic_t tasklet_scheduled;
+	struct tasklet_struct tasklet;
+	struct timer_list timer;
+
+	struct dentry *debugfs_dentry;
+	struct dasd_profile profile;
+};
+
+struct dasd_attention_data {
+	struct dasd_device *device;
+	__u8 lpum;
+};
+
+struct dasd_queue {
+	spinlock_t lock;
+};
+
+/* reasons why device (ccw_device_start) was stopped */
+#define DASD_STOPPED_NOT_ACC 1         /* not accessible */
+#define DASD_STOPPED_QUIESCE 2         /* Quiesced */
+#define DASD_STOPPED_PENDING 4         /* long busy */
+#define DASD_STOPPED_DC_WAIT 8         /* disconnected, wait */
+#define DASD_STOPPED_SU      16        /* summary unit check handling */
+#define DASD_STOPPED_PM      32        /* pm state transition */
+#define DASD_UNRESUMED_PM    64        /* pm resume failed state */
+
+/* per device flags */
+#define DASD_FLAG_OFFLINE	3	/* device is in offline processing */
+#define DASD_FLAG_EER_SNSS	4	/* A SNSS is required */
+#define DASD_FLAG_EER_IN_USE	5	/* A SNSS request is running */
+#define DASD_FLAG_DEVICE_RO	6	/* The device itself is read-only. Don't
+					 * confuse this with the user specified
+					 * read-only feature.
+					 */
+#define DASD_FLAG_IS_RESERVED	7	/* The device is reserved */
+#define DASD_FLAG_LOCK_STOLEN	8	/* The device lock was stolen */
+#define DASD_FLAG_SUSPENDED	9	/* The device was suspended */
+#define DASD_FLAG_SAFE_OFFLINE	10	/* safe offline processing requested*/
+#define DASD_FLAG_SAFE_OFFLINE_RUNNING	11	/* safe offline running */
+#define DASD_FLAG_ABORTALL	12	/* Abort all noretry requests */
+#define DASD_FLAG_PATH_VERIFY	13	/* Path verification worker running */
+#define DASD_FLAG_SUC		14	/* unhandled summary unit check */
+
+#define DASD_SLEEPON_START_TAG	((void *) 1)
+#define DASD_SLEEPON_END_TAG	((void *) 2)
+
+void dasd_put_device_wake(struct dasd_device *);
+
+/*
+ * Reference count inliners
+ */
+static inline void
+dasd_get_device(struct dasd_device *device)
+{
+	atomic_inc(&device->ref_count);
+}
+
+static inline void
+dasd_put_device(struct dasd_device *device)
+{
+	if (atomic_dec_return(&device->ref_count) == 0)
+		dasd_put_device_wake(device);
+}
+
+/*
+ * The static memory in ccw_mem and erp_mem is managed by a sorted
+ * list of free memory chunks.
+ */
+struct dasd_mchunk
+{
+	struct list_head list;
+	unsigned long size;
+} __attribute__ ((aligned(8)));
+
+static inline void
+dasd_init_chunklist(struct list_head *chunk_list, void *mem,
+		    unsigned long size)
+{
+	struct dasd_mchunk *chunk;
+
+	INIT_LIST_HEAD(chunk_list);
+	chunk = (struct dasd_mchunk *) mem;
+	chunk->size = size - sizeof(struct dasd_mchunk);
+	list_add(&chunk->list, chunk_list);
+}
+
+static inline void *
+dasd_alloc_chunk(struct list_head *chunk_list, unsigned long size)
+{
+	struct dasd_mchunk *chunk, *tmp;
+
+	size = (size + 7L) & -8L;
+	list_for_each_entry(chunk, chunk_list, list) {
+		if (chunk->size < size)
+			continue;
+		if (chunk->size > size + sizeof(struct dasd_mchunk)) {
+			char *endaddr = (char *) (chunk + 1) + chunk->size;
+			tmp = (struct dasd_mchunk *) (endaddr - size) - 1;
+			tmp->size = size;
+			chunk->size -= size + sizeof(struct dasd_mchunk);
+			chunk = tmp;
+		} else
+			list_del(&chunk->list);
+		return (void *) (chunk + 1);
+	}
+	return NULL;
+}
+
+static inline void
+dasd_free_chunk(struct list_head *chunk_list, void *mem)
+{
+	struct dasd_mchunk *chunk, *tmp;
+	struct list_head *p, *left;
+
+	chunk = (struct dasd_mchunk *)
+		((char *) mem - sizeof(struct dasd_mchunk));
+	/* Find out the left neighbour in chunk_list. */
+	left = chunk_list;
+	list_for_each(p, chunk_list) {
+		if (list_entry(p, struct dasd_mchunk, list) > chunk)
+			break;
+		left = p;
+	}
+	/* Try to merge with right neighbour = next element from left. */
+	if (left->next != chunk_list) {
+		tmp = list_entry(left->next, struct dasd_mchunk, list);
+		if ((char *) (chunk + 1) + chunk->size == (char *) tmp) {
+			list_del(&tmp->list);
+			chunk->size += tmp->size + sizeof(struct dasd_mchunk);
+		}
+	}
+	/* Try to merge with left neighbour. */
+	if (left != chunk_list) {
+		tmp = list_entry(left, struct dasd_mchunk, list);
+		if ((char *) (tmp + 1) + tmp->size == (char *) chunk) {
+			tmp->size += chunk->size + sizeof(struct dasd_mchunk);
+			return;
+		}
+	}
+	__list_add(&chunk->list, left, left->next);
+}
+
+/*
+ * Check if bsize is in { 512, 1024, 2048, 4096 }
+ */
+static inline int
+dasd_check_blocksize(int bsize)
+{
+	if (bsize < 512 || bsize > 4096 || !is_power_of_2(bsize))
+		return -EMEDIUMTYPE;
+	return 0;
+}
+
+/* externals in dasd.c */
+#define DASD_PROFILE_OFF	 0
+#define DASD_PROFILE_ON 	 1
+#define DASD_PROFILE_GLOBAL_ONLY 2
+
+extern debug_info_t *dasd_debug_area;
+extern struct dasd_profile dasd_global_profile;
+extern unsigned int dasd_global_profile_level;
+extern const struct block_device_operations dasd_device_operations;
+
+extern struct kmem_cache *dasd_page_cache;
+
+struct dasd_ccw_req *
+dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *);
+void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
+void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
+
+struct dasd_device *dasd_alloc_device(void);
+void dasd_free_device(struct dasd_device *);
+
+struct dasd_block *dasd_alloc_block(void);
+void dasd_free_block(struct dasd_block *);
+
+enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved);
+
+void dasd_enable_device(struct dasd_device *);
+void dasd_set_target_state(struct dasd_device *, int);
+void dasd_kick_device(struct dasd_device *);
+void dasd_restore_device(struct dasd_device *);
+void dasd_reload_device(struct dasd_device *);
+void dasd_schedule_requeue(struct dasd_device *);
+
+void dasd_add_request_head(struct dasd_ccw_req *);
+void dasd_add_request_tail(struct dasd_ccw_req *);
+int  dasd_start_IO(struct dasd_ccw_req *);
+int  dasd_term_IO(struct dasd_ccw_req *);
+void dasd_schedule_device_bh(struct dasd_device *);
+void dasd_schedule_block_bh(struct dasd_block *);
+int  dasd_sleep_on(struct dasd_ccw_req *);
+int  dasd_sleep_on_queue(struct list_head *);
+int  dasd_sleep_on_immediatly(struct dasd_ccw_req *);
+int  dasd_sleep_on_interruptible(struct dasd_ccw_req *);
+void dasd_device_set_timer(struct dasd_device *, int);
+void dasd_device_clear_timer(struct dasd_device *);
+void dasd_block_set_timer(struct dasd_block *, int);
+void dasd_block_clear_timer(struct dasd_block *);
+int  dasd_cancel_req(struct dasd_ccw_req *);
+int dasd_flush_device_queue(struct dasd_device *);
+int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *);
+void dasd_generic_free_discipline(struct dasd_device *);
+void dasd_generic_remove (struct ccw_device *cdev);
+int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
+int dasd_generic_set_offline (struct ccw_device *cdev);
+int dasd_generic_notify(struct ccw_device *, int);
+int dasd_generic_last_path_gone(struct dasd_device *);
+int dasd_generic_path_operational(struct dasd_device *);
+void dasd_generic_shutdown(struct ccw_device *);
+
+void dasd_generic_handle_state_change(struct dasd_device *);
+int dasd_generic_pm_freeze(struct ccw_device *);
+int dasd_generic_restore_device(struct ccw_device *);
+enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
+void dasd_generic_path_event(struct ccw_device *, int *);
+int dasd_generic_verify_path(struct dasd_device *, __u8);
+
+int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
+char *dasd_get_sense(struct irb *);
+
+void dasd_device_set_stop_bits(struct dasd_device *, int);
+void dasd_device_remove_stop_bits(struct dasd_device *, int);
+
+int dasd_device_is_ro(struct dasd_device *);
+
+void dasd_profile_reset(struct dasd_profile *);
+int dasd_profile_on(struct dasd_profile *);
+void dasd_profile_off(struct dasd_profile *);
+char *dasd_get_user_string(const char __user *, size_t);
+
+/* externals in dasd_devmap.c */
+extern int dasd_max_devindex;
+extern int dasd_probeonly;
+extern int dasd_autodetect;
+extern int dasd_nopav;
+extern int dasd_nofcx;
+
+int dasd_devmap_init(void);
+void dasd_devmap_exit(void);
+
+struct dasd_device *dasd_create_device(struct ccw_device *);
+void dasd_delete_device(struct dasd_device *);
+
+int dasd_get_feature(struct ccw_device *, int);
+int dasd_set_feature(struct ccw_device *, int, int);
+
+int dasd_add_sysfs_files(struct ccw_device *);
+void dasd_remove_sysfs_files(struct ccw_device *);
+
+struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
+struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
+struct dasd_device *dasd_device_from_devindex(int);
+
+void dasd_add_link_to_gendisk(struct gendisk *, struct dasd_device *);
+struct dasd_device *dasd_device_from_gendisk(struct gendisk *);
+
+int dasd_parse(void) __init;
+int dasd_busid_known(const char *);
+
+/* externals in dasd_gendisk.c */
+int  dasd_gendisk_init(void);
+void dasd_gendisk_exit(void);
+int dasd_gendisk_alloc(struct dasd_block *);
+void dasd_gendisk_free(struct dasd_block *);
+int dasd_scan_partitions(struct dasd_block *);
+void dasd_destroy_partitions(struct dasd_block *);
+
+/* externals in dasd_ioctl.c */
+int  dasd_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long);
+
+/* externals in dasd_proc.c */
+int dasd_proc_init(void);
+void dasd_proc_exit(void);
+
+/* externals in dasd_erp.c */
+struct dasd_ccw_req *dasd_default_erp_action(struct dasd_ccw_req *);
+struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *);
+struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
+					    struct dasd_device *);
+void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
+void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
+void dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb);
+
+/* externals in dasd_3990_erp.c */
+struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
+void dasd_3990_erp_handle_sim(struct dasd_device *, char *);
+
+/* externals in dasd_eer.c */
+#ifdef CONFIG_DASD_EER
+int dasd_eer_init(void);
+void dasd_eer_exit(void);
+int dasd_eer_enable(struct dasd_device *);
+void dasd_eer_disable(struct dasd_device *);
+void dasd_eer_write(struct dasd_device *, struct dasd_ccw_req *cqr,
+		    unsigned int id);
+void dasd_eer_snss(struct dasd_device *);
+
+static inline int dasd_eer_enabled(struct dasd_device *device)
+{
+	return device->eer_cqr != NULL;
+}
+#else
+#define dasd_eer_init()		(0)
+#define dasd_eer_exit()		do { } while (0)
+#define dasd_eer_enable(d)	(0)
+#define dasd_eer_disable(d)	do { } while (0)
+#define dasd_eer_write(d,c,i)	do { } while (0)
+#define dasd_eer_snss(d)	do { } while (0)
+#define dasd_eer_enabled(d)	(0)
+#endif	/* CONFIG_DASD_ERR */
+
+
+/* DASD path handling functions */
+
+/*
+ * helper functions to modify bit masks for a given channel path for a device
+ */
+static inline int dasd_path_is_operational(struct dasd_device *device, int chp)
+{
+	return test_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
+}
+
+static inline int dasd_path_need_verify(struct dasd_device *device, int chp)
+{
+	return test_bit(DASD_PATH_TBV, &device->path[chp].flags);
+}
+
+static inline void dasd_path_verify(struct dasd_device *device, int chp)
+{
+	__set_bit(DASD_PATH_TBV, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_verify(struct dasd_device *device, int chp)
+{
+	__clear_bit(DASD_PATH_TBV, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_all_verify(struct dasd_device *device)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		dasd_path_clear_verify(device, chp);
+}
+
+static inline void dasd_path_operational(struct dasd_device *device, int chp)
+{
+	__set_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
+	device->opm |= (0x80 >> chp);
+}
+
+static inline void dasd_path_nonpreferred(struct dasd_device *device, int chp)
+{
+	__set_bit(DASD_PATH_NPP, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_nonpreferred(struct dasd_device *device, int chp)
+{
+	return test_bit(DASD_PATH_NPP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_nonpreferred(struct dasd_device *device,
+						int chp)
+{
+	__clear_bit(DASD_PATH_NPP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_preferred(struct dasd_device *device, int chp)
+{
+	__set_bit(DASD_PATH_PP, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_preferred(struct dasd_device *device, int chp)
+{
+	return test_bit(DASD_PATH_PP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_preferred(struct dasd_device *device,
+					     int chp)
+{
+	__clear_bit(DASD_PATH_PP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_oper(struct dasd_device *device, int chp)
+{
+	__clear_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
+	device->opm &= ~(0x80 >> chp);
+}
+
+static inline void dasd_path_clear_cable(struct dasd_device *device, int chp)
+{
+	__clear_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
+}
+
+static inline void dasd_path_cuir(struct dasd_device *device, int chp)
+{
+	__set_bit(DASD_PATH_CUIR, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_cuir(struct dasd_device *device, int chp)
+{
+	return test_bit(DASD_PATH_CUIR, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_cuir(struct dasd_device *device, int chp)
+{
+	__clear_bit(DASD_PATH_CUIR, &device->path[chp].flags);
+}
+
+static inline void dasd_path_ifcc(struct dasd_device *device, int chp)
+{
+	set_bit(DASD_PATH_IFCC, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_ifcc(struct dasd_device *device, int chp)
+{
+	return test_bit(DASD_PATH_IFCC, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_ifcc(struct dasd_device *device, int chp)
+{
+	clear_bit(DASD_PATH_IFCC, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_nohpf(struct dasd_device *device, int chp)
+{
+	__clear_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
+}
+
+static inline void dasd_path_miscabled(struct dasd_device *device, int chp)
+{
+	__set_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_miscabled(struct dasd_device *device, int chp)
+{
+	return test_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
+}
+
+static inline void dasd_path_nohpf(struct dasd_device *device, int chp)
+{
+	__set_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_nohpf(struct dasd_device *device, int chp)
+{
+	return test_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
+}
+
+/*
+ * get functions for path masks
+ * will return a path masks for the given device
+ */
+
+static inline __u8 dasd_path_get_opm(struct dasd_device *device)
+{
+	return device->opm;
+}
+
+static inline __u8 dasd_path_get_tbvpm(struct dasd_device *device)
+{
+	int chp;
+	__u8 tbvpm = 0x00;
+
+	for (chp = 0; chp < 8; chp++)
+		if (dasd_path_need_verify(device, chp))
+			tbvpm |= 0x80 >> chp;
+	return tbvpm;
+}
+
+static inline __u8 dasd_path_get_nppm(struct dasd_device *device)
+{
+	int chp;
+	__u8 npm = 0x00;
+
+	for (chp = 0; chp < 8; chp++) {
+		if (dasd_path_is_nonpreferred(device, chp))
+			npm |= 0x80 >> chp;
+	}
+	return npm;
+}
+
+static inline __u8 dasd_path_get_ppm(struct dasd_device *device)
+{
+	int chp;
+	__u8 ppm = 0x00;
+
+	for (chp = 0; chp < 8; chp++)
+		if (dasd_path_is_preferred(device, chp))
+			ppm |= 0x80 >> chp;
+	return ppm;
+}
+
+static inline __u8 dasd_path_get_cablepm(struct dasd_device *device)
+{
+	int chp;
+	__u8 cablepm = 0x00;
+
+	for (chp = 0; chp < 8; chp++)
+		if (dasd_path_is_miscabled(device, chp))
+			cablepm |= 0x80 >> chp;
+	return cablepm;
+}
+
+static inline __u8 dasd_path_get_cuirpm(struct dasd_device *device)
+{
+	int chp;
+	__u8 cuirpm = 0x00;
+
+	for (chp = 0; chp < 8; chp++)
+		if (dasd_path_is_cuir(device, chp))
+			cuirpm |= 0x80 >> chp;
+	return cuirpm;
+}
+
+static inline __u8 dasd_path_get_ifccpm(struct dasd_device *device)
+{
+	int chp;
+	__u8 ifccpm = 0x00;
+
+	for (chp = 0; chp < 8; chp++)
+		if (dasd_path_is_ifcc(device, chp))
+			ifccpm |= 0x80 >> chp;
+	return ifccpm;
+}
+
+static inline __u8 dasd_path_get_hpfpm(struct dasd_device *device)
+{
+	int chp;
+	__u8 hpfpm = 0x00;
+
+	for (chp = 0; chp < 8; chp++)
+		if (dasd_path_is_nohpf(device, chp))
+			hpfpm |= 0x80 >> chp;
+	return hpfpm;
+}
+
+/*
+ * add functions for path masks
+ * the existing path mask will be extended by the given path mask
+ */
+static inline void dasd_path_add_tbvpm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp))
+			dasd_path_verify(device, chp);
+}
+
+static inline __u8 dasd_path_get_notoperpm(struct dasd_device *device)
+{
+	int chp;
+	__u8 nopm = 0x00;
+
+	for (chp = 0; chp < 8; chp++)
+		if (dasd_path_is_nohpf(device, chp) ||
+		    dasd_path_is_ifcc(device, chp) ||
+		    dasd_path_is_cuir(device, chp) ||
+		    dasd_path_is_miscabled(device, chp))
+			nopm |= 0x80 >> chp;
+	return nopm;
+}
+
+static inline void dasd_path_add_opm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp)) {
+			dasd_path_operational(device, chp);
+			/*
+			 * if the path is used
+			 * it should not be in one of the negative lists
+			 */
+			dasd_path_clear_nohpf(device, chp);
+			dasd_path_clear_cuir(device, chp);
+			dasd_path_clear_cable(device, chp);
+			dasd_path_clear_ifcc(device, chp);
+		}
+}
+
+static inline void dasd_path_add_cablepm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp))
+			dasd_path_miscabled(device, chp);
+}
+
+static inline void dasd_path_add_cuirpm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp))
+			dasd_path_cuir(device, chp);
+}
+
+static inline void dasd_path_add_ifccpm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp))
+			dasd_path_ifcc(device, chp);
+}
+
+static inline void dasd_path_add_nppm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp))
+			dasd_path_nonpreferred(device, chp);
+}
+
+static inline void dasd_path_add_nohpfpm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp))
+			dasd_path_nohpf(device, chp);
+}
+
+static inline void dasd_path_add_ppm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp))
+			dasd_path_preferred(device, chp);
+}
+
+/*
+ * set functions for path masks
+ * the existing path mask will be replaced by the given path mask
+ */
+static inline void dasd_path_set_tbvpm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp))
+			dasd_path_verify(device, chp);
+		else
+			dasd_path_clear_verify(device, chp);
+}
+
+static inline void dasd_path_set_opm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++) {
+		dasd_path_clear_oper(device, chp);
+		if (pm & (0x80 >> chp)) {
+			dasd_path_operational(device, chp);
+			/*
+			 * if the path is used
+			 * it should not be in one of the negative lists
+			 */
+			dasd_path_clear_nohpf(device, chp);
+			dasd_path_clear_cuir(device, chp);
+			dasd_path_clear_cable(device, chp);
+			dasd_path_clear_ifcc(device, chp);
+		}
+	}
+}
+
+/*
+ * remove functions for path masks
+ * the existing path mask will be cleared with the given path mask
+ */
+static inline void dasd_path_remove_opm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++) {
+		if (pm & (0x80 >> chp))
+			dasd_path_clear_oper(device, chp);
+	}
+}
+
+/*
+ * add the newly available path to the to be verified pm and remove it from
+ * normal operation until it is verified
+ */
+static inline void dasd_path_available(struct dasd_device *device, int chp)
+{
+	dasd_path_clear_oper(device, chp);
+	dasd_path_verify(device, chp);
+}
+
+static inline void dasd_path_notoper(struct dasd_device *device, int chp)
+{
+	dasd_path_clear_oper(device, chp);
+	dasd_path_clear_preferred(device, chp);
+	dasd_path_clear_nonpreferred(device, chp);
+}
+
+/*
+ * remove all paths from normal operation
+ */
+static inline void dasd_path_no_path(struct dasd_device *device)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		dasd_path_notoper(device, chp);
+
+	dasd_path_clear_all_verify(device);
+}
+
+/* end - path handling */
+
+#endif				/* DASD_H */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
new file mode 100644
index 0000000..2016e0e
--- /dev/null
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ *		    Carsten Otte <Cotte@de.ibm.com>
+ *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2001
+ *
+ * i/o controls for the dasd driver.
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/interrupt.h>
+#include <linux/compat.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/blkpg.h>
+#include <linux/slab.h>
+#include <asm/ccwdev.h>
+#include <asm/schid.h>
+#include <asm/cmb.h>
+#include <linux/uaccess.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_ioctl:"
+
+#include "dasd_int.h"
+
+
+static int
+dasd_ioctl_api_version(void __user *argp)
+{
+	int ver = DASD_API_VERSION;
+	return put_user(ver, (int __user *)argp);
+}
+
+/*
+ * Enable device.
+ * used by dasdfmt after BIODASDDISABLE to retrigger blocksize detection
+ */
+static int
+dasd_ioctl_enable(struct block_device *bdev)
+{
+	struct dasd_device *base;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	base = dasd_device_from_gendisk(bdev->bd_disk);
+	if (!base)
+		return -ENODEV;
+
+	dasd_enable_device(base);
+	/* Formatting the dasd device can change the capacity. */
+	mutex_lock(&bdev->bd_mutex);
+	i_size_write(bdev->bd_inode,
+		     (loff_t)get_capacity(base->block->gdp) << 9);
+	mutex_unlock(&bdev->bd_mutex);
+	dasd_put_device(base);
+	return 0;
+}
+
+/*
+ * Disable device.
+ * Used by dasdfmt. Disable I/O operations but allow ioctls.
+ */
+static int
+dasd_ioctl_disable(struct block_device *bdev)
+{
+	struct dasd_device *base;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	base = dasd_device_from_gendisk(bdev->bd_disk);
+	if (!base)
+		return -ENODEV;
+	/*
+	 * Man this is sick. We don't do a real disable but only downgrade
+	 * the device to DASD_STATE_BASIC. The reason is that dasdfmt uses
+	 * BIODASDDISABLE to disable accesses to the device via the block
+	 * device layer but it still wants to do i/o on the device by
+	 * using the BIODASDFMT ioctl. Therefore the correct state for the
+	 * device is DASD_STATE_BASIC that allows to do basic i/o.
+	 */
+	dasd_set_target_state(base, DASD_STATE_BASIC);
+	/*
+	 * Set i_size to zero, since read, write, etc. check against this
+	 * value.
+	 */
+	mutex_lock(&bdev->bd_mutex);
+	i_size_write(bdev->bd_inode, 0);
+	mutex_unlock(&bdev->bd_mutex);
+	dasd_put_device(base);
+	return 0;
+}
+
+/*
+ * Quiesce device.
+ */
+static int dasd_ioctl_quiesce(struct dasd_block *block)
+{
+	unsigned long flags;
+	struct dasd_device *base;
+
+	base = block->base;
+	if (!capable (CAP_SYS_ADMIN))
+		return -EACCES;
+
+	pr_info("%s: The DASD has been put in the quiesce "
+		"state\n", dev_name(&base->cdev->dev));
+	spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
+	dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
+	spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+	return 0;
+}
+
+
+/*
+ * Resume device.
+ */
+static int dasd_ioctl_resume(struct dasd_block *block)
+{
+	unsigned long flags;
+	struct dasd_device *base;
+
+	base = block->base;
+	if (!capable (CAP_SYS_ADMIN))
+		return -EACCES;
+
+	pr_info("%s: I/O operations have been resumed "
+		"on the DASD\n", dev_name(&base->cdev->dev));
+	spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
+	dasd_device_remove_stop_bits(base, DASD_STOPPED_QUIESCE);
+	spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+
+	dasd_schedule_block_bh(block);
+	return 0;
+}
+
+/*
+ * Abort all failfast I/O on a device.
+ */
+static int dasd_ioctl_abortio(struct dasd_block *block)
+{
+	unsigned long flags;
+	struct dasd_device *base;
+	struct dasd_ccw_req *cqr, *n;
+
+	base = block->base;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	if (test_and_set_bit(DASD_FLAG_ABORTALL, &base->flags))
+		return 0;
+	DBF_DEV_EVENT(DBF_NOTICE, base, "%s", "abortall flag set");
+
+	spin_lock_irqsave(&block->request_queue_lock, flags);
+	spin_lock(&block->queue_lock);
+	list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
+		if (test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+		    cqr->callback_data &&
+		    cqr->callback_data != DASD_SLEEPON_START_TAG &&
+		    cqr->callback_data != DASD_SLEEPON_END_TAG) {
+			spin_unlock(&block->queue_lock);
+			blk_abort_request(cqr->callback_data);
+			spin_lock(&block->queue_lock);
+		}
+	}
+	spin_unlock(&block->queue_lock);
+	spin_unlock_irqrestore(&block->request_queue_lock, flags);
+
+	dasd_schedule_block_bh(block);
+	return 0;
+}
+
+/*
+ * Allow I/O on a device
+ */
+static int dasd_ioctl_allowio(struct dasd_block *block)
+{
+	struct dasd_device *base;
+
+	base = block->base;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	if (test_and_clear_bit(DASD_FLAG_ABORTALL, &base->flags))
+		DBF_DEV_EVENT(DBF_NOTICE, base, "%s", "abortall flag unset");
+
+	return 0;
+}
+
+/*
+ * performs formatting of _device_ according to _fdata_
+ * Note: The discipline's format_function is assumed to deliver formatting
+ * commands to format multiple units of the device. In terms of the ECKD
+ * devices this means CCWs are generated to format multiple tracks.
+ */
+static int
+dasd_format(struct dasd_block *block, struct format_data_t *fdata)
+{
+	struct dasd_device *base;
+	int rc;
+
+	base = block->base;
+	if (base->discipline->format_device == NULL)
+		return -EPERM;
+
+	if (base->state != DASD_STATE_BASIC) {
+		pr_warn("%s: The DASD cannot be formatted while it is enabled\n",
+			dev_name(&base->cdev->dev));
+		return -EBUSY;
+	}
+
+	DBF_DEV_EVENT(DBF_NOTICE, base,
+		      "formatting units %u to %u (%u B blocks) flags %u",
+		      fdata->start_unit,
+		      fdata->stop_unit, fdata->blksize, fdata->intensity);
+
+	/* Since dasdfmt keeps the device open after it was disabled,
+	 * there still exists an inode for this device.
+	 * We must update i_blkbits, otherwise we might get errors when
+	 * enabling the device later.
+	 */
+	if (fdata->start_unit == 0) {
+		struct block_device *bdev = bdget_disk(block->gdp, 0);
+		bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize);
+		bdput(bdev);
+	}
+
+	rc = base->discipline->format_device(base, fdata, 1);
+	if (rc == -EAGAIN)
+		rc = base->discipline->format_device(base, fdata, 0);
+
+	return rc;
+}
+
+static int dasd_check_format(struct dasd_block *block,
+			     struct format_check_t *cdata)
+{
+	struct dasd_device *base;
+	int rc;
+
+	base = block->base;
+	if (!base->discipline->check_device_format)
+		return -ENOTTY;
+
+	rc = base->discipline->check_device_format(base, cdata, 1);
+	if (rc == -EAGAIN)
+		rc = base->discipline->check_device_format(base, cdata, 0);
+
+	return rc;
+}
+
+/*
+ * Format device.
+ */
+static int
+dasd_ioctl_format(struct block_device *bdev, void __user *argp)
+{
+	struct dasd_device *base;
+	struct format_data_t fdata;
+	int rc;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+	if (!argp)
+		return -EINVAL;
+	base = dasd_device_from_gendisk(bdev->bd_disk);
+	if (!base)
+		return -ENODEV;
+	if (base->features & DASD_FEATURE_READONLY ||
+	    test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
+		dasd_put_device(base);
+		return -EROFS;
+	}
+	if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) {
+		dasd_put_device(base);
+		return -EFAULT;
+	}
+	if (bdev != bdev->bd_contains) {
+		pr_warn("%s: The specified DASD is a partition and cannot be formatted\n",
+			dev_name(&base->cdev->dev));
+		dasd_put_device(base);
+		return -EINVAL;
+	}
+	rc = dasd_format(base->block, &fdata);
+	dasd_put_device(base);
+
+	return rc;
+}
+
+/*
+ * Check device format
+ */
+static int dasd_ioctl_check_format(struct block_device *bdev, void __user *argp)
+{
+	struct format_check_t cdata;
+	struct dasd_device *base;
+	int rc = 0;
+
+	if (!argp)
+		return -EINVAL;
+
+	base = dasd_device_from_gendisk(bdev->bd_disk);
+	if (!base)
+		return -ENODEV;
+	if (bdev != bdev->bd_contains) {
+		pr_warn("%s: The specified DASD is a partition and cannot be checked\n",
+			dev_name(&base->cdev->dev));
+		rc = -EINVAL;
+		goto out_err;
+	}
+
+	if (copy_from_user(&cdata, argp, sizeof(cdata))) {
+		rc = -EFAULT;
+		goto out_err;
+	}
+
+	rc = dasd_check_format(base->block, &cdata);
+	if (rc)
+		goto out_err;
+
+	if (copy_to_user(argp, &cdata, sizeof(cdata)))
+		rc = -EFAULT;
+
+out_err:
+	dasd_put_device(base);
+
+	return rc;
+}
+
+#ifdef CONFIG_DASD_PROFILE
+/*
+ * Reset device profile information
+ */
+static int dasd_ioctl_reset_profile(struct dasd_block *block)
+{
+	dasd_profile_reset(&block->profile);
+	return 0;
+}
+
+/*
+ * Return device profile information
+ */
+static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
+{
+	struct dasd_profile_info_t *data;
+	int rc = 0;
+
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	spin_lock_bh(&block->profile.lock);
+	if (block->profile.data) {
+		data->dasd_io_reqs = block->profile.data->dasd_io_reqs;
+		data->dasd_io_sects = block->profile.data->dasd_io_sects;
+		memcpy(data->dasd_io_secs, block->profile.data->dasd_io_secs,
+		       sizeof(data->dasd_io_secs));
+		memcpy(data->dasd_io_times, block->profile.data->dasd_io_times,
+		       sizeof(data->dasd_io_times));
+		memcpy(data->dasd_io_timps, block->profile.data->dasd_io_timps,
+		       sizeof(data->dasd_io_timps));
+		memcpy(data->dasd_io_time1, block->profile.data->dasd_io_time1,
+		       sizeof(data->dasd_io_time1));
+		memcpy(data->dasd_io_time2, block->profile.data->dasd_io_time2,
+		       sizeof(data->dasd_io_time2));
+		memcpy(data->dasd_io_time2ps,
+		       block->profile.data->dasd_io_time2ps,
+		       sizeof(data->dasd_io_time2ps));
+		memcpy(data->dasd_io_time3, block->profile.data->dasd_io_time3,
+		       sizeof(data->dasd_io_time3));
+		memcpy(data->dasd_io_nr_req,
+		       block->profile.data->dasd_io_nr_req,
+		       sizeof(data->dasd_io_nr_req));
+		spin_unlock_bh(&block->profile.lock);
+	} else {
+		spin_unlock_bh(&block->profile.lock);
+		rc = -EIO;
+		goto out;
+	}
+	if (copy_to_user(argp, data, sizeof(*data)))
+		rc = -EFAULT;
+out:
+	kfree(data);
+	return rc;
+}
+#else
+static int dasd_ioctl_reset_profile(struct dasd_block *block)
+{
+	return -ENOTTY;
+}
+
+static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
+{
+	return -ENOTTY;
+}
+#endif
+
+/*
+ * Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
+ */
+static int dasd_ioctl_information(struct dasd_block *block,
+				  unsigned int cmd, void __user *argp)
+{
+	struct dasd_information2_t *dasd_info;
+	struct subchannel_id sch_id;
+	struct ccw_dev_id dev_id;
+	struct dasd_device *base;
+	struct ccw_device *cdev;
+	unsigned long flags;
+	int rc;
+
+	base = block->base;
+	if (!base->discipline || !base->discipline->fill_info)
+		return -EINVAL;
+
+	dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
+	if (dasd_info == NULL)
+		return -ENOMEM;
+
+	rc = base->discipline->fill_info(base, dasd_info);
+	if (rc) {
+		kfree(dasd_info);
+		return rc;
+	}
+
+	cdev = base->cdev;
+	ccw_device_get_id(cdev, &dev_id);
+	ccw_device_get_schid(cdev, &sch_id);
+
+	dasd_info->devno = dev_id.devno;
+	dasd_info->schid = sch_id.sch_no;
+	dasd_info->cu_type = cdev->id.cu_type;
+	dasd_info->cu_model = cdev->id.cu_model;
+	dasd_info->dev_type = cdev->id.dev_type;
+	dasd_info->dev_model = cdev->id.dev_model;
+	dasd_info->status = base->state;
+	/*
+	 * The open_count is increased for every opener, that includes
+	 * the blkdev_get in dasd_scan_partitions.
+	 * This must be hidden from user-space.
+	 */
+	dasd_info->open_count = atomic_read(&block->open_count);
+	if (!block->bdev)
+		dasd_info->open_count++;
+
+	/*
+	 * check if device is really formatted
+	 * LDL / CDL was returned by 'fill_info'
+	 */
+	if ((base->state < DASD_STATE_READY) ||
+	    (dasd_check_blocksize(block->bp_block)))
+		dasd_info->format = DASD_FORMAT_NONE;
+
+	dasd_info->features |=
+		((base->features & DASD_FEATURE_READONLY) != 0);
+
+	memcpy(dasd_info->type, base->discipline->name, 4);
+
+	if (block->request_queue->request_fn) {
+		struct list_head *l;
+#ifdef DASD_EXTENDED_PROFILING
+		{
+			struct list_head *l;
+			spin_lock_irqsave(&block->lock, flags);
+			list_for_each(l, &block->request_queue->queue_head)
+				dasd_info->req_queue_len++;
+			spin_unlock_irqrestore(&block->lock, flags);
+		}
+#endif				/* DASD_EXTENDED_PROFILING */
+		spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
+		list_for_each(l, &base->ccw_queue)
+			dasd_info->chanq_len++;
+		spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
+				       flags);
+	}
+
+	rc = 0;
+	if (copy_to_user(argp, dasd_info,
+			 ((cmd == (unsigned int) BIODASDINFO2) ?
+			  sizeof(struct dasd_information2_t) :
+			  sizeof(struct dasd_information_t))))
+		rc = -EFAULT;
+	kfree(dasd_info);
+	return rc;
+}
+
+/*
+ * Set read only
+ */
+static int
+dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
+{
+	struct dasd_device *base;
+	int intval, rc;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+	if (bdev != bdev->bd_contains)
+		// ro setting is not allowed for partitions
+		return -EINVAL;
+	if (get_user(intval, (int __user *)argp))
+		return -EFAULT;
+	base = dasd_device_from_gendisk(bdev->bd_disk);
+	if (!base)
+		return -ENODEV;
+	if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
+		dasd_put_device(base);
+		return -EROFS;
+	}
+	set_disk_ro(bdev->bd_disk, intval);
+	rc = dasd_set_feature(base->cdev, DASD_FEATURE_READONLY, intval);
+	dasd_put_device(base);
+	return rc;
+}
+
+static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
+				  struct cmbdata __user *argp)
+{
+	size_t size = _IOC_SIZE(cmd);
+	struct cmbdata data;
+	int ret;
+
+	ret = cmf_readall(block->base->cdev, &data);
+	if (!ret && copy_to_user(argp, &data, min(size, sizeof(*argp))))
+		return -EFAULT;
+	return ret;
+}
+
+int dasd_ioctl(struct block_device *bdev, fmode_t mode,
+	       unsigned int cmd, unsigned long arg)
+{
+	struct dasd_block *block;
+	struct dasd_device *base;
+	void __user *argp;
+	int rc;
+
+	if (is_compat_task())
+		argp = compat_ptr(arg);
+	else
+		argp = (void __user *)arg;
+
+	if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) {
+		PRINT_DEBUG("empty data ptr");
+		return -EINVAL;
+	}
+
+	base = dasd_device_from_gendisk(bdev->bd_disk);
+	if (!base)
+		return -ENODEV;
+	block = base->block;
+	rc = 0;
+	switch (cmd) {
+	case BIODASDDISABLE:
+		rc = dasd_ioctl_disable(bdev);
+		break;
+	case BIODASDENABLE:
+		rc = dasd_ioctl_enable(bdev);
+		break;
+	case BIODASDQUIESCE:
+		rc = dasd_ioctl_quiesce(block);
+		break;
+	case BIODASDRESUME:
+		rc = dasd_ioctl_resume(block);
+		break;
+	case BIODASDABORTIO:
+		rc = dasd_ioctl_abortio(block);
+		break;
+	case BIODASDALLOWIO:
+		rc = dasd_ioctl_allowio(block);
+		break;
+	case BIODASDFMT:
+		rc = dasd_ioctl_format(bdev, argp);
+		break;
+	case BIODASDCHECKFMT:
+		rc = dasd_ioctl_check_format(bdev, argp);
+		break;
+	case BIODASDINFO:
+		rc = dasd_ioctl_information(block, cmd, argp);
+		break;
+	case BIODASDINFO2:
+		rc = dasd_ioctl_information(block, cmd, argp);
+		break;
+	case BIODASDPRRD:
+		rc = dasd_ioctl_read_profile(block, argp);
+		break;
+	case BIODASDPRRST:
+		rc = dasd_ioctl_reset_profile(block);
+		break;
+	case BLKROSET:
+		rc = dasd_ioctl_set_ro(bdev, argp);
+		break;
+	case DASDAPIVER:
+		rc = dasd_ioctl_api_version(argp);
+		break;
+	case BIODASDCMFENABLE:
+		rc = enable_cmf(base->cdev);
+		break;
+	case BIODASDCMFDISABLE:
+		rc = disable_cmf(base->cdev);
+		break;
+	case BIODASDREADALLCMB:
+		rc = dasd_ioctl_readall_cmb(block, cmd, argp);
+		break;
+	default:
+		/* if the discipline has an ioctl method try it. */
+		rc = -ENOTTY;
+		if (base->discipline->ioctl)
+			rc = base->discipline->ioctl(block, cmd, argp);
+	}
+	dasd_put_device(base);
+	return rc;
+}
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
new file mode 100644
index 0000000..5cb80c6
--- /dev/null
+++ b/drivers/s390/block/dasd_proc.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ *		    Horst Hummel <Horst.Hummel@de.ibm.com>
+ *		    Carsten Otte <Cotte@de.ibm.com>
+ *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Coypright IBM Corp. 1999, 2002
+ *
+ * /proc interface for the dasd driver.
+ *
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/proc_fs.h>
+
+#include <asm/debug.h>
+#include <linux/uaccess.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_proc:"
+
+#include "dasd_int.h"
+
+static struct proc_dir_entry *dasd_proc_root_entry = NULL;
+static struct proc_dir_entry *dasd_devices_entry = NULL;
+static struct proc_dir_entry *dasd_statistics_entry = NULL;
+
+static int
+dasd_devices_show(struct seq_file *m, void *v)
+{
+	struct dasd_device *device;
+	struct dasd_block *block;
+	char *substr;
+
+	device = dasd_device_from_devindex((unsigned long) v - 1);
+	if (IS_ERR(device))
+		return 0;
+	if (device->block)
+		block = device->block;
+	else {
+		dasd_put_device(device);
+		return 0;
+	}
+	/* Print device number. */
+	seq_printf(m, "%s", dev_name(&device->cdev->dev));
+	/* Print discipline string. */
+	if (device->discipline != NULL)
+		seq_printf(m, "(%s)", device->discipline->name);
+	else
+		seq_printf(m, "(none)");
+	/* Print kdev. */
+	if (block->gdp)
+		seq_printf(m, " at (%3d:%6d)",
+			   MAJOR(disk_devt(block->gdp)),
+			   MINOR(disk_devt(block->gdp)));
+	else
+		seq_printf(m, "  at (???:??????)");
+	/* Print device name. */
+	if (block->gdp)
+		seq_printf(m, " is %-8s", block->gdp->disk_name);
+	else
+		seq_printf(m, " is ????????");
+	/* Print devices features. */
+	substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " ";
+	seq_printf(m, "%4s: ", substr);
+	/* Print device status information. */
+	switch (device->state) {
+	case DASD_STATE_NEW:
+		seq_printf(m, "new");
+		break;
+	case DASD_STATE_KNOWN:
+		seq_printf(m, "detected");
+		break;
+	case DASD_STATE_BASIC:
+		seq_printf(m, "basic");
+		break;
+	case DASD_STATE_UNFMT:
+		seq_printf(m, "unformatted");
+		break;
+	case DASD_STATE_READY:
+	case DASD_STATE_ONLINE:
+		seq_printf(m, "active ");
+		if (dasd_check_blocksize(block->bp_block))
+			seq_printf(m, "n/f	 ");
+		else
+			seq_printf(m,
+				   "at blocksize: %u, %lu blocks, %lu MB",
+				   block->bp_block, block->blocks,
+				   ((block->bp_block >> 9) *
+				    block->blocks) >> 11);
+		break;
+	default:
+		seq_printf(m, "no stat");
+		break;
+	}
+	dasd_put_device(device);
+	if (dasd_probeonly)
+		seq_printf(m, "(probeonly)");
+	seq_printf(m, "\n");
+	return 0;
+}
+
+static void *dasd_devices_start(struct seq_file *m, loff_t *pos)
+{
+	if (*pos >= dasd_max_devindex)
+		return NULL;
+	return (void *)((unsigned long) *pos + 1);
+}
+
+static void *dasd_devices_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return dasd_devices_start(m, pos);
+}
+
+static void dasd_devices_stop(struct seq_file *m, void *v)
+{
+}
+
+static const struct seq_operations dasd_devices_seq_ops = {
+	.start		= dasd_devices_start,
+	.next		= dasd_devices_next,
+	.stop		= dasd_devices_stop,
+	.show		= dasd_devices_show,
+};
+
+#ifdef CONFIG_DASD_PROFILE
+static int dasd_stats_all_block_on(void)
+{
+	int i, rc;
+	struct dasd_device *device;
+
+	rc = 0;
+	for (i = 0; i < dasd_max_devindex; ++i) {
+		device = dasd_device_from_devindex(i);
+		if (IS_ERR(device))
+			continue;
+		if (device->block)
+			rc = dasd_profile_on(&device->block->profile);
+		dasd_put_device(device);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+static void dasd_stats_all_block_off(void)
+{
+	int i;
+	struct dasd_device *device;
+
+	for (i = 0; i < dasd_max_devindex; ++i) {
+		device = dasd_device_from_devindex(i);
+		if (IS_ERR(device))
+			continue;
+		if (device->block)
+			dasd_profile_off(&device->block->profile);
+		dasd_put_device(device);
+	}
+}
+
+static void dasd_stats_all_block_reset(void)
+{
+	int i;
+	struct dasd_device *device;
+
+	for (i = 0; i < dasd_max_devindex; ++i) {
+		device = dasd_device_from_devindex(i);
+		if (IS_ERR(device))
+			continue;
+		if (device->block)
+			dasd_profile_reset(&device->block->profile);
+		dasd_put_device(device);
+	}
+}
+
+static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int factor)
+{
+	int i;
+
+	for (i = 0; i < 32; i++) {
+		seq_printf(m, "%7d ", array[i] / factor);
+		if (i == 15)
+			seq_putc(m, '\n');
+	}
+	seq_putc(m, '\n');
+}
+#endif /* CONFIG_DASD_PROFILE */
+
+static int dasd_stats_proc_show(struct seq_file *m, void *v)
+{
+#ifdef CONFIG_DASD_PROFILE
+	struct dasd_profile_info *prof;
+	int factor;
+
+	spin_lock_bh(&dasd_global_profile.lock);
+	prof = dasd_global_profile.data;
+	if (!prof) {
+		spin_unlock_bh(&dasd_global_profile.lock);
+		seq_printf(m, "Statistics are off - they might be "
+				    "switched on using 'echo set on > "
+				    "/proc/dasd/statistics'\n");
+		return 0;
+	}
+
+	/* prevent counter 'overflow' on output */
+	for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
+	     factor *= 10);
+
+	seq_printf(m, "%d dasd I/O requests\n", prof->dasd_io_reqs);
+	seq_printf(m, "with %u sectors(512B each)\n",
+		       prof->dasd_io_sects);
+	seq_printf(m, "Scale Factor is  %d\n", factor);
+	seq_printf(m,
+		       "   __<4	   ___8	   __16	   __32	   __64	   _128	"
+		       "   _256	   _512	   __1k	   __2k	   __4k	   __8k	"
+		       "   _16k	   _32k	   _64k	   128k\n");
+	seq_printf(m,
+		       "   _256	   _512	   __1M	   __2M	   __4M	   __8M	"
+		       "   _16M	   _32M	   _64M	   128M	   256M	   512M	"
+		       "   __1G	   __2G	   __4G " "   _>4G\n");
+
+	seq_printf(m, "Histogram of sizes (512B secs)\n");
+	dasd_statistics_array(m, prof->dasd_io_secs, factor);
+	seq_printf(m, "Histogram of I/O times (microseconds)\n");
+	dasd_statistics_array(m, prof->dasd_io_times, factor);
+	seq_printf(m, "Histogram of I/O times per sector\n");
+	dasd_statistics_array(m, prof->dasd_io_timps, factor);
+	seq_printf(m, "Histogram of I/O time till ssch\n");
+	dasd_statistics_array(m, prof->dasd_io_time1, factor);
+	seq_printf(m, "Histogram of I/O time between ssch and irq\n");
+	dasd_statistics_array(m, prof->dasd_io_time2, factor);
+	seq_printf(m, "Histogram of I/O time between ssch "
+			    "and irq per sector\n");
+	dasd_statistics_array(m, prof->dasd_io_time2ps, factor);
+	seq_printf(m, "Histogram of I/O time between irq and end\n");
+	dasd_statistics_array(m, prof->dasd_io_time3, factor);
+	seq_printf(m, "# of req in chanq at enqueuing (1..32) \n");
+	dasd_statistics_array(m, prof->dasd_io_nr_req, factor);
+	spin_unlock_bh(&dasd_global_profile.lock);
+#else
+	seq_printf(m, "Statistics are not activated in this kernel\n");
+#endif
+	return 0;
+}
+
+static int dasd_stats_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dasd_stats_proc_show, NULL);
+}
+
+static ssize_t dasd_stats_proc_write(struct file *file,
+		const char __user *user_buf, size_t user_len, loff_t *pos)
+{
+#ifdef CONFIG_DASD_PROFILE
+	char *buffer, *str;
+	int rc;
+
+	if (user_len > 65536)
+		user_len = 65536;
+	buffer = dasd_get_user_string(user_buf, user_len);
+	if (IS_ERR(buffer))
+		return PTR_ERR(buffer);
+
+	/* check for valid verbs */
+	str = skip_spaces(buffer);
+	if (strncmp(str, "set", 3) == 0 && isspace(str[3])) {
+		/* 'set xxx' was given */
+		str = skip_spaces(str + 4);
+		if (strcmp(str, "on") == 0) {
+			/* switch on statistics profiling */
+			rc = dasd_stats_all_block_on();
+			if (rc) {
+				dasd_stats_all_block_off();
+				goto out_error;
+			}
+			rc = dasd_profile_on(&dasd_global_profile);
+			if (rc) {
+				dasd_stats_all_block_off();
+				goto out_error;
+			}
+			dasd_profile_reset(&dasd_global_profile);
+			dasd_global_profile_level = DASD_PROFILE_ON;
+			pr_info("The statistics feature has been switched "
+				"on\n");
+		} else if (strcmp(str, "off") == 0) {
+			/* switch off statistics profiling */
+			dasd_global_profile_level = DASD_PROFILE_OFF;
+			dasd_profile_off(&dasd_global_profile);
+			dasd_stats_all_block_off();
+			pr_info("The statistics feature has been switched "
+				"off\n");
+		} else
+			goto out_parse_error;
+	} else if (strncmp(str, "reset", 5) == 0) {
+		/* reset the statistics */
+		dasd_profile_reset(&dasd_global_profile);
+		dasd_stats_all_block_reset();
+		pr_info("The statistics have been reset\n");
+	} else
+		goto out_parse_error;
+	vfree(buffer);
+	return user_len;
+out_parse_error:
+	rc = -EINVAL;
+	pr_warn("%s is not a supported value for /proc/dasd/statistics\n", str);
+out_error:
+	vfree(buffer);
+	return rc;
+#else
+	pr_warn("/proc/dasd/statistics: is not activated in this kernel\n");
+	return user_len;
+#endif				/* CONFIG_DASD_PROFILE */
+}
+
+static const struct file_operations dasd_stats_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dasd_stats_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+	.write		= dasd_stats_proc_write,
+};
+
+/*
+ * Create dasd proc-fs entries.
+ * In case creation failed, cleanup and return -ENOENT.
+ */
+int
+dasd_proc_init(void)
+{
+	dasd_proc_root_entry = proc_mkdir("dasd", NULL);
+	if (!dasd_proc_root_entry)
+		goto out_nodasd;
+	dasd_devices_entry = proc_create_seq("devices",
+					 S_IFREG | S_IRUGO | S_IWUSR,
+					 dasd_proc_root_entry,
+					 &dasd_devices_seq_ops);
+	if (!dasd_devices_entry)
+		goto out_nodevices;
+	dasd_statistics_entry = proc_create("statistics",
+					    S_IFREG | S_IRUGO | S_IWUSR,
+					    dasd_proc_root_entry,
+					    &dasd_stats_proc_fops);
+	if (!dasd_statistics_entry)
+		goto out_nostatistics;
+	return 0;
+
+ out_nostatistics:
+	remove_proc_entry("devices", dasd_proc_root_entry);
+ out_nodevices:
+	remove_proc_entry("dasd", NULL);
+ out_nodasd:
+	return -ENOENT;
+}
+
+void
+dasd_proc_exit(void)
+{
+	remove_proc_entry("devices", dasd_proc_root_entry);
+	remove_proc_entry("statistics", dasd_proc_root_entry);
+	remove_proc_entry("dasd", NULL);
+}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
new file mode 100644
index 0000000..23e526c
--- /dev/null
+++ b/drivers/s390/block/dcssblk.c
@@ -0,0 +1,1136 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dcssblk.c -- the S/390 block driver for dcss memory
+ *
+ * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
+ */
+
+#define KMSG_COMPONENT "dcssblk"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pfn_t.h>
+#include <linux/uio.h>
+#include <linux/dax.h>
+#include <asm/extmem.h>
+#include <asm/io.h>
+
+#define DCSSBLK_NAME "dcssblk"
+#define DCSSBLK_MINORS_PER_DISK 1
+#define DCSSBLK_PARM_LEN 400
+#define DCSS_BUS_ID_SIZE 20
+
+static int dcssblk_open(struct block_device *bdev, fmode_t mode);
+static void dcssblk_release(struct gendisk *disk, fmode_t mode);
+static blk_qc_t dcssblk_make_request(struct request_queue *q,
+						struct bio *bio);
+static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
+		long nr_pages, void **kaddr, pfn_t *pfn);
+
+static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
+
+static int dcssblk_major;
+static const struct block_device_operations dcssblk_devops = {
+	.owner   	= THIS_MODULE,
+	.open    	= dcssblk_open,
+	.release 	= dcssblk_release,
+};
+
+static size_t dcssblk_dax_copy_from_iter(struct dax_device *dax_dev,
+		pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
+{
+	return copy_from_iter(addr, bytes, i);
+}
+
+static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev,
+		pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
+{
+	return copy_to_iter(addr, bytes, i);
+}
+
+static const struct dax_operations dcssblk_dax_ops = {
+	.direct_access = dcssblk_dax_direct_access,
+	.copy_from_iter = dcssblk_dax_copy_from_iter,
+	.copy_to_iter = dcssblk_dax_copy_to_iter,
+};
+
+struct dcssblk_dev_info {
+	struct list_head lh;
+	struct device dev;
+	char segment_name[DCSS_BUS_ID_SIZE];
+	atomic_t use_count;
+	struct gendisk *gd;
+	unsigned long start;
+	unsigned long end;
+	int segment_type;
+	unsigned char save_pending;
+	unsigned char is_shared;
+	struct request_queue *dcssblk_queue;
+	int num_of_segments;
+	struct list_head seg_list;
+	struct dax_device *dax_dev;
+};
+
+struct segment_info {
+	struct list_head lh;
+	char segment_name[DCSS_BUS_ID_SIZE];
+	unsigned long start;
+	unsigned long end;
+	int segment_type;
+};
+
+static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf,
+				  size_t count);
+static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf,
+				  size_t count);
+
+static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
+static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
+
+static struct device *dcssblk_root_dev;
+
+static LIST_HEAD(dcssblk_devices);
+static struct rw_semaphore dcssblk_devices_sem;
+
+/*
+ * release function for segment device.
+ */
+static void
+dcssblk_release_segment(struct device *dev)
+{
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *entry, *temp;
+
+	dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+	list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) {
+		list_del(&entry->lh);
+		kfree(entry);
+	}
+	kfree(dev_info);
+	module_put(THIS_MODULE);
+}
+
+/*
+ * get a minor number. needs to be called with
+ * down_write(&dcssblk_devices_sem) and the
+ * device needs to be enqueued before the semaphore is
+ * freed.
+ */
+static int
+dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
+{
+	int minor, found;
+	struct dcssblk_dev_info *entry;
+
+	if (dev_info == NULL)
+		return -EINVAL;
+	for (minor = 0; minor < (1<<MINORBITS); minor++) {
+		found = 0;
+		// test if minor available
+		list_for_each_entry(entry, &dcssblk_devices, lh)
+			if (minor == entry->gd->first_minor)
+				found++;
+		if (!found) break; // got unused minor
+	}
+	if (found)
+		return -EBUSY;
+	dev_info->gd->first_minor = minor;
+	return 0;
+}
+
+/*
+ * get the struct dcssblk_dev_info from dcssblk_devices
+ * for the given name.
+ * down_read(&dcssblk_devices_sem) must be held.
+ */
+static struct dcssblk_dev_info *
+dcssblk_get_device_by_name(char *name)
+{
+	struct dcssblk_dev_info *entry;
+
+	list_for_each_entry(entry, &dcssblk_devices, lh) {
+		if (!strcmp(name, entry->segment_name)) {
+			return entry;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * get the struct segment_info from seg_list
+ * for the given name.
+ * down_read(&dcssblk_devices_sem) must be held.
+ */
+static struct segment_info *
+dcssblk_get_segment_by_name(char *name)
+{
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *entry;
+
+	list_for_each_entry(dev_info, &dcssblk_devices, lh) {
+		list_for_each_entry(entry, &dev_info->seg_list, lh) {
+			if (!strcmp(name, entry->segment_name))
+				return entry;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * get the highest address of the multi-segment block.
+ */
+static unsigned long
+dcssblk_find_highest_addr(struct dcssblk_dev_info *dev_info)
+{
+	unsigned long highest_addr;
+	struct segment_info *entry;
+
+	highest_addr = 0;
+	list_for_each_entry(entry, &dev_info->seg_list, lh) {
+		if (highest_addr < entry->end)
+			highest_addr = entry->end;
+	}
+	return highest_addr;
+}
+
+/*
+ * get the lowest address of the multi-segment block.
+ */
+static unsigned long
+dcssblk_find_lowest_addr(struct dcssblk_dev_info *dev_info)
+{
+	int set_first;
+	unsigned long lowest_addr;
+	struct segment_info *entry;
+
+	set_first = 0;
+	lowest_addr = 0;
+	list_for_each_entry(entry, &dev_info->seg_list, lh) {
+		if (set_first == 0) {
+			lowest_addr = entry->start;
+			set_first = 1;
+		} else {
+			if (lowest_addr > entry->start)
+				lowest_addr = entry->start;
+		}
+	}
+	return lowest_addr;
+}
+
+/*
+ * Check continuity of segments.
+ */
+static int
+dcssblk_is_continuous(struct dcssblk_dev_info *dev_info)
+{
+	int i, j, rc;
+	struct segment_info *sort_list, *entry, temp;
+
+	if (dev_info->num_of_segments <= 1)
+		return 0;
+
+	sort_list = kcalloc(dev_info->num_of_segments,
+			    sizeof(struct segment_info),
+			    GFP_KERNEL);
+	if (sort_list == NULL)
+		return -ENOMEM;
+	i = 0;
+	list_for_each_entry(entry, &dev_info->seg_list, lh) {
+		memcpy(&sort_list[i], entry, sizeof(struct segment_info));
+		i++;
+	}
+
+	/* sort segments */
+	for (i = 0; i < dev_info->num_of_segments; i++)
+		for (j = 0; j < dev_info->num_of_segments; j++)
+			if (sort_list[j].start > sort_list[i].start) {
+				memcpy(&temp, &sort_list[i],
+					sizeof(struct segment_info));
+				memcpy(&sort_list[i], &sort_list[j],
+					sizeof(struct segment_info));
+				memcpy(&sort_list[j], &temp,
+					sizeof(struct segment_info));
+			}
+
+	/* check continuity */
+	for (i = 0; i < dev_info->num_of_segments - 1; i++) {
+		if ((sort_list[i].end + 1) != sort_list[i+1].start) {
+			pr_err("Adjacent DCSSs %s and %s are not "
+			       "contiguous\n", sort_list[i].segment_name,
+			       sort_list[i+1].segment_name);
+			rc = -EINVAL;
+			goto out;
+		}
+		/* EN and EW are allowed in a block device */
+		if (sort_list[i].segment_type != sort_list[i+1].segment_type) {
+			if (!(sort_list[i].segment_type & SEGMENT_EXCLUSIVE) ||
+				(sort_list[i].segment_type == SEG_TYPE_ER) ||
+				!(sort_list[i+1].segment_type &
+				SEGMENT_EXCLUSIVE) ||
+				(sort_list[i+1].segment_type == SEG_TYPE_ER)) {
+				pr_err("DCSS %s and DCSS %s have "
+				       "incompatible types\n",
+				       sort_list[i].segment_name,
+				       sort_list[i+1].segment_name);
+				rc = -EINVAL;
+				goto out;
+			}
+		}
+	}
+	rc = 0;
+out:
+	kfree(sort_list);
+	return rc;
+}
+
+/*
+ * Load a segment
+ */
+static int
+dcssblk_load_segment(char *name, struct segment_info **seg_info)
+{
+	int rc;
+
+	/* already loaded? */
+	down_read(&dcssblk_devices_sem);
+	*seg_info = dcssblk_get_segment_by_name(name);
+	up_read(&dcssblk_devices_sem);
+	if (*seg_info != NULL)
+		return -EEXIST;
+
+	/* get a struct segment_info */
+	*seg_info = kzalloc(sizeof(struct segment_info), GFP_KERNEL);
+	if (*seg_info == NULL)
+		return -ENOMEM;
+
+	strcpy((*seg_info)->segment_name, name);
+
+	/* load the segment */
+	rc = segment_load(name, SEGMENT_SHARED,
+			&(*seg_info)->start, &(*seg_info)->end);
+	if (rc < 0) {
+		segment_warning(rc, (*seg_info)->segment_name);
+		kfree(*seg_info);
+	} else {
+		INIT_LIST_HEAD(&(*seg_info)->lh);
+		(*seg_info)->segment_type = rc;
+	}
+	return rc;
+}
+
+/*
+ * device attribute for switching shared/nonshared (exclusive)
+ * operation (show + store)
+ */
+static ssize_t
+dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dcssblk_dev_info *dev_info;
+
+	dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+	return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n");
+}
+
+static ssize_t
+dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
+{
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *entry, *temp;
+	int rc;
+
+	if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
+		return -EINVAL;
+	down_write(&dcssblk_devices_sem);
+	dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+	if (atomic_read(&dev_info->use_count)) {
+		rc = -EBUSY;
+		goto out;
+	}
+	if (inbuf[0] == '1') {
+		/* reload segments in shared mode */
+		list_for_each_entry(entry, &dev_info->seg_list, lh) {
+			rc = segment_modify_shared(entry->segment_name,
+						SEGMENT_SHARED);
+			if (rc < 0) {
+				BUG_ON(rc == -EINVAL);
+				if (rc != -EAGAIN)
+					goto removeseg;
+			}
+		}
+		dev_info->is_shared = 1;
+		switch (dev_info->segment_type) {
+		case SEG_TYPE_SR:
+		case SEG_TYPE_ER:
+		case SEG_TYPE_SC:
+			set_disk_ro(dev_info->gd, 1);
+		}
+	} else if (inbuf[0] == '0') {
+		/* reload segments in exclusive mode */
+		if (dev_info->segment_type == SEG_TYPE_SC) {
+			pr_err("DCSS %s is of type SC and cannot be "
+			       "loaded as exclusive-writable\n",
+			       dev_info->segment_name);
+			rc = -EINVAL;
+			goto out;
+		}
+		list_for_each_entry(entry, &dev_info->seg_list, lh) {
+			rc = segment_modify_shared(entry->segment_name,
+						   SEGMENT_EXCLUSIVE);
+			if (rc < 0) {
+				BUG_ON(rc == -EINVAL);
+				if (rc != -EAGAIN)
+					goto removeseg;
+			}
+		}
+		dev_info->is_shared = 0;
+		set_disk_ro(dev_info->gd, 0);
+	} else {
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = count;
+	goto out;
+
+removeseg:
+	pr_err("DCSS device %s is removed after a failed access mode "
+	       "change\n", dev_info->segment_name);
+	temp = entry;
+	list_for_each_entry(entry, &dev_info->seg_list, lh) {
+		if (entry != temp)
+			segment_unload(entry->segment_name);
+	}
+	list_del(&dev_info->lh);
+
+	kill_dax(dev_info->dax_dev);
+	put_dax(dev_info->dax_dev);
+	del_gendisk(dev_info->gd);
+	blk_cleanup_queue(dev_info->dcssblk_queue);
+	dev_info->gd->queue = NULL;
+	put_disk(dev_info->gd);
+	up_write(&dcssblk_devices_sem);
+
+	if (device_remove_file_self(dev, attr)) {
+		device_unregister(dev);
+		put_device(dev);
+	}
+	return rc;
+out:
+	up_write(&dcssblk_devices_sem);
+	return rc;
+}
+static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show,
+		   dcssblk_shared_store);
+
+/*
+ * device attribute for save operation on current copy
+ * of the segment. If the segment is busy, saving will
+ * become pending until it gets released, which can be
+ * undone by storing a non-true value to this entry.
+ * (show + store)
+ */
+static ssize_t
+dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dcssblk_dev_info *dev_info;
+
+	dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+	return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n");
+}
+
+static ssize_t
+dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
+{
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *entry;
+
+	if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
+		return -EINVAL;
+	dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+
+	down_write(&dcssblk_devices_sem);
+	if (inbuf[0] == '1') {
+		if (atomic_read(&dev_info->use_count) == 0) {
+			// device is idle => we save immediately
+			pr_info("All DCSSs that map to device %s are "
+				"saved\n", dev_info->segment_name);
+			list_for_each_entry(entry, &dev_info->seg_list, lh) {
+				if (entry->segment_type == SEG_TYPE_EN ||
+				    entry->segment_type == SEG_TYPE_SN)
+					pr_warn("DCSS %s is of type SN or EN"
+						" and cannot be saved\n",
+						entry->segment_name);
+				else
+					segment_save(entry->segment_name);
+			}
+		}  else {
+			// device is busy => we save it when it becomes
+			// idle in dcssblk_release
+			pr_info("Device %s is in use, its DCSSs will be "
+				"saved when it becomes idle\n",
+				dev_info->segment_name);
+			dev_info->save_pending = 1;
+		}
+	} else if (inbuf[0] == '0') {
+		if (dev_info->save_pending) {
+			// device is busy & the user wants to undo his save
+			// request
+			dev_info->save_pending = 0;
+			pr_info("A pending save request for device %s "
+				"has been canceled\n",
+				dev_info->segment_name);
+		}
+	} else {
+		up_write(&dcssblk_devices_sem);
+		return -EINVAL;
+	}
+	up_write(&dcssblk_devices_sem);
+	return count;
+}
+static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show,
+		   dcssblk_save_store);
+
+/*
+ * device attribute for showing all segments in a device
+ */
+static ssize_t
+dcssblk_seglist_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	int i;
+
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *entry;
+
+	down_read(&dcssblk_devices_sem);
+	dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+	i = 0;
+	buf[0] = '\0';
+	list_for_each_entry(entry, &dev_info->seg_list, lh) {
+		strcpy(&buf[i], entry->segment_name);
+		i += strlen(entry->segment_name);
+		buf[i] = '\n';
+		i++;
+	}
+	up_read(&dcssblk_devices_sem);
+	return i;
+}
+static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL);
+
+static struct attribute *dcssblk_dev_attrs[] = {
+	&dev_attr_shared.attr,
+	&dev_attr_save.attr,
+	&dev_attr_seglist.attr,
+	NULL,
+};
+static struct attribute_group dcssblk_dev_attr_group = {
+	.attrs = dcssblk_dev_attrs,
+};
+static const struct attribute_group *dcssblk_dev_attr_groups[] = {
+	&dcssblk_dev_attr_group,
+	NULL,
+};
+
+/*
+ * device attribute for adding devices
+ */
+static ssize_t
+dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	int rc, i, j, num_of_segments;
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *seg_info, *temp;
+	char *local_buf;
+	unsigned long seg_byte_size;
+
+	dev_info = NULL;
+	seg_info = NULL;
+	if (dev != dcssblk_root_dev) {
+		rc = -EINVAL;
+		goto out_nobuf;
+	}
+	if ((count < 1) || (buf[0] == '\0') || (buf[0] == '\n')) {
+		rc = -ENAMETOOLONG;
+		goto out_nobuf;
+	}
+
+	local_buf = kmalloc(count + 1, GFP_KERNEL);
+	if (local_buf == NULL) {
+		rc = -ENOMEM;
+		goto out_nobuf;
+	}
+
+	/*
+	 * parse input
+	 */
+	num_of_segments = 0;
+	for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) {
+		for (j = i; j < count &&
+			(buf[j] != ':') &&
+			(buf[j] != '\0') &&
+			(buf[j] != '\n'); j++) {
+			local_buf[j-i] = toupper(buf[j]);
+		}
+		local_buf[j-i] = '\0';
+		if (((j - i) == 0) || ((j - i) > 8)) {
+			rc = -ENAMETOOLONG;
+			goto seg_list_del;
+		}
+
+		rc = dcssblk_load_segment(local_buf, &seg_info);
+		if (rc < 0)
+			goto seg_list_del;
+		/*
+		 * get a struct dcssblk_dev_info
+		 */
+		if (num_of_segments == 0) {
+			dev_info = kzalloc(sizeof(struct dcssblk_dev_info),
+					GFP_KERNEL);
+			if (dev_info == NULL) {
+				rc = -ENOMEM;
+				goto out;
+			}
+			strcpy(dev_info->segment_name, local_buf);
+			dev_info->segment_type = seg_info->segment_type;
+			INIT_LIST_HEAD(&dev_info->seg_list);
+		}
+		list_add_tail(&seg_info->lh, &dev_info->seg_list);
+		num_of_segments++;
+		i = j;
+
+		if ((buf[j] == '\0') || (buf[j] == '\n'))
+			break;
+	}
+
+	/* no trailing colon at the end of the input */
+	if ((i > 0) && (buf[i-1] == ':')) {
+		rc = -ENAMETOOLONG;
+		goto seg_list_del;
+	}
+	strlcpy(local_buf, buf, i + 1);
+	dev_info->num_of_segments = num_of_segments;
+	rc = dcssblk_is_continuous(dev_info);
+	if (rc < 0)
+		goto seg_list_del;
+
+	dev_info->start = dcssblk_find_lowest_addr(dev_info);
+	dev_info->end = dcssblk_find_highest_addr(dev_info);
+
+	dev_set_name(&dev_info->dev, "%s", dev_info->segment_name);
+	dev_info->dev.release = dcssblk_release_segment;
+	dev_info->dev.groups = dcssblk_dev_attr_groups;
+	INIT_LIST_HEAD(&dev_info->lh);
+	dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK);
+	if (dev_info->gd == NULL) {
+		rc = -ENOMEM;
+		goto seg_list_del;
+	}
+	dev_info->gd->major = dcssblk_major;
+	dev_info->gd->fops = &dcssblk_devops;
+	dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL);
+	dev_info->gd->queue = dev_info->dcssblk_queue;
+	dev_info->gd->private_data = dev_info;
+	blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
+	blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
+	blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->dcssblk_queue);
+
+	seg_byte_size = (dev_info->end - dev_info->start + 1);
+	set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
+	pr_info("Loaded %s with total size %lu bytes and capacity %lu "
+		"sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9);
+
+	dev_info->save_pending = 0;
+	dev_info->is_shared = 1;
+	dev_info->dev.parent = dcssblk_root_dev;
+
+	/*
+	 *get minor, add to list
+	 */
+	down_write(&dcssblk_devices_sem);
+	if (dcssblk_get_segment_by_name(local_buf)) {
+		rc = -EEXIST;
+		goto release_gd;
+	}
+	rc = dcssblk_assign_free_minor(dev_info);
+	if (rc)
+		goto release_gd;
+	sprintf(dev_info->gd->disk_name, "dcssblk%d",
+		dev_info->gd->first_minor);
+	list_add_tail(&dev_info->lh, &dcssblk_devices);
+
+	if (!try_module_get(THIS_MODULE)) {
+		rc = -ENODEV;
+		goto dev_list_del;
+	}
+	/*
+	 * register the device
+	 */
+	rc = device_register(&dev_info->dev);
+	if (rc)
+		goto put_dev;
+
+	dev_info->dax_dev = alloc_dax(dev_info, dev_info->gd->disk_name,
+			&dcssblk_dax_ops);
+	if (!dev_info->dax_dev) {
+		rc = -ENOMEM;
+		goto put_dev;
+	}
+
+	get_device(&dev_info->dev);
+	device_add_disk(&dev_info->dev, dev_info->gd);
+
+	switch (dev_info->segment_type) {
+		case SEG_TYPE_SR:
+		case SEG_TYPE_ER:
+		case SEG_TYPE_SC:
+			set_disk_ro(dev_info->gd,1);
+			break;
+		default:
+			set_disk_ro(dev_info->gd,0);
+			break;
+	}
+	up_write(&dcssblk_devices_sem);
+	rc = count;
+	goto out;
+
+put_dev:
+	list_del(&dev_info->lh);
+	blk_cleanup_queue(dev_info->dcssblk_queue);
+	dev_info->gd->queue = NULL;
+	put_disk(dev_info->gd);
+	list_for_each_entry(seg_info, &dev_info->seg_list, lh) {
+		segment_unload(seg_info->segment_name);
+	}
+	put_device(&dev_info->dev);
+	up_write(&dcssblk_devices_sem);
+	goto out;
+dev_list_del:
+	list_del(&dev_info->lh);
+release_gd:
+	blk_cleanup_queue(dev_info->dcssblk_queue);
+	dev_info->gd->queue = NULL;
+	put_disk(dev_info->gd);
+	up_write(&dcssblk_devices_sem);
+seg_list_del:
+	if (dev_info == NULL)
+		goto out;
+	list_for_each_entry_safe(seg_info, temp, &dev_info->seg_list, lh) {
+		list_del(&seg_info->lh);
+		segment_unload(seg_info->segment_name);
+		kfree(seg_info);
+	}
+	kfree(dev_info);
+out:
+	kfree(local_buf);
+out_nobuf:
+	return rc;
+}
+
+/*
+ * device attribute for removing devices
+ */
+static ssize_t
+dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *entry;
+	int rc, i;
+	char *local_buf;
+
+	if (dev != dcssblk_root_dev) {
+		return -EINVAL;
+	}
+	local_buf = kmalloc(count + 1, GFP_KERNEL);
+	if (local_buf == NULL) {
+		return -ENOMEM;
+	}
+	/*
+	 * parse input
+	 */
+	for (i = 0; (i < count && (*(buf+i)!='\0') && (*(buf+i)!='\n')); i++) {
+		local_buf[i] = toupper(buf[i]);
+	}
+	local_buf[i] = '\0';
+	if ((i == 0) || (i > 8)) {
+		rc = -ENAMETOOLONG;
+		goto out_buf;
+	}
+
+	down_write(&dcssblk_devices_sem);
+	dev_info = dcssblk_get_device_by_name(local_buf);
+	if (dev_info == NULL) {
+		up_write(&dcssblk_devices_sem);
+		pr_warn("Device %s cannot be removed because it is not a known device\n",
+			local_buf);
+		rc = -ENODEV;
+		goto out_buf;
+	}
+	if (atomic_read(&dev_info->use_count) != 0) {
+		up_write(&dcssblk_devices_sem);
+		pr_warn("Device %s cannot be removed while it is in use\n",
+			local_buf);
+		rc = -EBUSY;
+		goto out_buf;
+	}
+
+	list_del(&dev_info->lh);
+	kill_dax(dev_info->dax_dev);
+	put_dax(dev_info->dax_dev);
+	del_gendisk(dev_info->gd);
+	blk_cleanup_queue(dev_info->dcssblk_queue);
+	dev_info->gd->queue = NULL;
+	put_disk(dev_info->gd);
+
+	/* unload all related segments */
+	list_for_each_entry(entry, &dev_info->seg_list, lh)
+		segment_unload(entry->segment_name);
+
+	up_write(&dcssblk_devices_sem);
+
+	device_unregister(&dev_info->dev);
+	put_device(&dev_info->dev);
+
+	rc = count;
+out_buf:
+	kfree(local_buf);
+	return rc;
+}
+
+static int
+dcssblk_open(struct block_device *bdev, fmode_t mode)
+{
+	struct dcssblk_dev_info *dev_info;
+	int rc;
+
+	dev_info = bdev->bd_disk->private_data;
+	if (NULL == dev_info) {
+		rc = -ENODEV;
+		goto out;
+	}
+	atomic_inc(&dev_info->use_count);
+	bdev->bd_block_size = 4096;
+	rc = 0;
+out:
+	return rc;
+}
+
+static void
+dcssblk_release(struct gendisk *disk, fmode_t mode)
+{
+	struct dcssblk_dev_info *dev_info = disk->private_data;
+	struct segment_info *entry;
+
+	if (!dev_info) {
+		WARN_ON(1);
+		return;
+	}
+	down_write(&dcssblk_devices_sem);
+	if (atomic_dec_and_test(&dev_info->use_count)
+	    && (dev_info->save_pending)) {
+		pr_info("Device %s has become idle and is being saved "
+			"now\n", dev_info->segment_name);
+		list_for_each_entry(entry, &dev_info->seg_list, lh) {
+			if (entry->segment_type == SEG_TYPE_EN ||
+			    entry->segment_type == SEG_TYPE_SN)
+				pr_warn("DCSS %s is of type SN or EN and cannot"
+					" be saved\n", entry->segment_name);
+			else
+				segment_save(entry->segment_name);
+		}
+		dev_info->save_pending = 0;
+	}
+	up_write(&dcssblk_devices_sem);
+}
+
+static blk_qc_t
+dcssblk_make_request(struct request_queue *q, struct bio *bio)
+{
+	struct dcssblk_dev_info *dev_info;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
+	unsigned long index;
+	unsigned long page_addr;
+	unsigned long source_addr;
+	unsigned long bytes_done;
+
+	blk_queue_split(q, &bio);
+
+	bytes_done = 0;
+	dev_info = bio->bi_disk->private_data;
+	if (dev_info == NULL)
+		goto fail;
+	if ((bio->bi_iter.bi_sector & 7) != 0 ||
+	    (bio->bi_iter.bi_size & 4095) != 0)
+		/* Request is not page-aligned. */
+		goto fail;
+	if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) {
+		/* Request beyond end of DCSS segment. */
+		goto fail;
+	}
+	/* verify data transfer direction */
+	if (dev_info->is_shared) {
+		switch (dev_info->segment_type) {
+		case SEG_TYPE_SR:
+		case SEG_TYPE_ER:
+		case SEG_TYPE_SC:
+			/* cannot write to these segments */
+			if (bio_data_dir(bio) == WRITE) {
+				pr_warn("Writing to %s failed because it is a read-only device\n",
+					dev_name(&dev_info->dev));
+				goto fail;
+			}
+		}
+	}
+
+	index = (bio->bi_iter.bi_sector >> 3);
+	bio_for_each_segment(bvec, bio, iter) {
+		page_addr = (unsigned long)
+			page_address(bvec.bv_page) + bvec.bv_offset;
+		source_addr = dev_info->start + (index<<12) + bytes_done;
+		if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
+			// More paranoia.
+			goto fail;
+		if (bio_data_dir(bio) == READ) {
+			memcpy((void*)page_addr, (void*)source_addr,
+				bvec.bv_len);
+		} else {
+			memcpy((void*)source_addr, (void*)page_addr,
+				bvec.bv_len);
+		}
+		bytes_done += bvec.bv_len;
+	}
+	bio_endio(bio);
+	return BLK_QC_T_NONE;
+fail:
+	bio_io_error(bio);
+	return BLK_QC_T_NONE;
+}
+
+static long
+__dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
+		long nr_pages, void **kaddr, pfn_t *pfn)
+{
+	resource_size_t offset = pgoff * PAGE_SIZE;
+	unsigned long dev_sz;
+
+	dev_sz = dev_info->end - dev_info->start + 1;
+	if (kaddr)
+		*kaddr = (void *) dev_info->start + offset;
+	if (pfn)
+		*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
+				PFN_DEV|PFN_SPECIAL);
+
+	return (dev_sz - offset) / PAGE_SIZE;
+}
+
+static long
+dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
+		long nr_pages, void **kaddr, pfn_t *pfn)
+{
+	struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev);
+
+	return __dcssblk_direct_access(dev_info, pgoff, nr_pages, kaddr, pfn);
+}
+
+static void
+dcssblk_check_params(void)
+{
+	int rc, i, j, k;
+	char buf[DCSSBLK_PARM_LEN + 1];
+	struct dcssblk_dev_info *dev_info;
+
+	for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0');
+	     i++) {
+		for (j = i; (j < DCSSBLK_PARM_LEN) &&
+			    (dcssblk_segments[j] != ',')  &&
+			    (dcssblk_segments[j] != '\0') &&
+			    (dcssblk_segments[j] != '('); j++)
+		{
+			buf[j-i] = dcssblk_segments[j];
+		}
+		buf[j-i] = '\0';
+		rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i);
+		if ((rc >= 0) && (dcssblk_segments[j] == '(')) {
+			for (k = 0; (buf[k] != ':') && (buf[k] != '\0'); k++)
+				buf[k] = toupper(buf[k]);
+			buf[k] = '\0';
+			if (!strncmp(&dcssblk_segments[j], "(local)", 7)) {
+				down_read(&dcssblk_devices_sem);
+				dev_info = dcssblk_get_device_by_name(buf);
+				up_read(&dcssblk_devices_sem);
+				if (dev_info)
+					dcssblk_shared_store(&dev_info->dev,
+							     NULL, "0\n", 2);
+			}
+		}
+		while ((dcssblk_segments[j] != ',') &&
+		       (dcssblk_segments[j] != '\0'))
+		{
+			j++;
+		}
+		if (dcssblk_segments[j] == '\0')
+			break;
+		i = j;
+	}
+}
+
+/*
+ * Suspend / Resume
+ */
+static int dcssblk_freeze(struct device *dev)
+{
+	struct dcssblk_dev_info *dev_info;
+	int rc = 0;
+
+	list_for_each_entry(dev_info, &dcssblk_devices, lh) {
+		switch (dev_info->segment_type) {
+			case SEG_TYPE_SR:
+			case SEG_TYPE_ER:
+			case SEG_TYPE_SC:
+				if (!dev_info->is_shared)
+					rc = -EINVAL;
+				break;
+			default:
+				rc = -EINVAL;
+				break;
+		}
+		if (rc)
+			break;
+	}
+	if (rc)
+		pr_err("Suspending the system failed because DCSS device %s "
+		       "is writable\n",
+		       dev_info->segment_name);
+	return rc;
+}
+
+static int dcssblk_restore(struct device *dev)
+{
+	struct dcssblk_dev_info *dev_info;
+	struct segment_info *entry;
+	unsigned long start, end;
+	int rc = 0;
+
+	list_for_each_entry(dev_info, &dcssblk_devices, lh) {
+		list_for_each_entry(entry, &dev_info->seg_list, lh) {
+			segment_unload(entry->segment_name);
+			rc = segment_load(entry->segment_name, SEGMENT_SHARED,
+					  &start, &end);
+			if (rc < 0) {
+// TODO in_use check ?
+				segment_warning(rc, entry->segment_name);
+				goto out_panic;
+			}
+			if (start != entry->start || end != entry->end) {
+				pr_err("The address range of DCSS %s changed "
+				       "while the system was suspended\n",
+				       entry->segment_name);
+				goto out_panic;
+			}
+		}
+	}
+	return 0;
+out_panic:
+	panic("fatal dcssblk resume error\n");
+}
+
+static int dcssblk_thaw(struct device *dev)
+{
+	return 0;
+}
+
+static const struct dev_pm_ops dcssblk_pm_ops = {
+	.freeze		= dcssblk_freeze,
+	.thaw		= dcssblk_thaw,
+	.restore	= dcssblk_restore,
+};
+
+static struct platform_driver dcssblk_pdrv = {
+	.driver = {
+		.name	= "dcssblk",
+		.pm	= &dcssblk_pm_ops,
+	},
+};
+
+static struct platform_device *dcssblk_pdev;
+
+
+/*
+ * The init/exit functions.
+ */
+static void __exit
+dcssblk_exit(void)
+{
+	platform_device_unregister(dcssblk_pdev);
+	platform_driver_unregister(&dcssblk_pdrv);
+	root_device_unregister(dcssblk_root_dev);
+	unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
+}
+
+static int __init
+dcssblk_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&dcssblk_pdrv);
+	if (rc)
+		return rc;
+
+	dcssblk_pdev = platform_device_register_simple("dcssblk", -1, NULL,
+							0);
+	if (IS_ERR(dcssblk_pdev)) {
+		rc = PTR_ERR(dcssblk_pdev);
+		goto out_pdrv;
+	}
+
+	dcssblk_root_dev = root_device_register("dcssblk");
+	if (IS_ERR(dcssblk_root_dev)) {
+		rc = PTR_ERR(dcssblk_root_dev);
+		goto out_pdev;
+	}
+	rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
+	if (rc)
+		goto out_root;
+	rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
+	if (rc)
+		goto out_root;
+	rc = register_blkdev(0, DCSSBLK_NAME);
+	if (rc < 0)
+		goto out_root;
+	dcssblk_major = rc;
+	init_rwsem(&dcssblk_devices_sem);
+
+	dcssblk_check_params();
+	return 0;
+
+out_root:
+	root_device_unregister(dcssblk_root_dev);
+out_pdev:
+	platform_device_unregister(dcssblk_pdev);
+out_pdrv:
+	platform_driver_unregister(&dcssblk_pdrv);
+	return rc;
+}
+
+module_init(dcssblk_init);
+module_exit(dcssblk_exit);
+
+module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444);
+MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, "
+		 "comma-separated list, names in each set separated "
+		 "by commas are separated by colons, each set contains "
+		 "names of contiguous segments and each name max. 8 chars.\n"
+		 "Adding \"(local)\" to the end of each set equals echoing 0 "
+		 "to /sys/devices/dcssblk/<device name>/shared after loading "
+		 "the contiguous segments - \n"
+		 "e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\"");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
new file mode 100644
index 0000000..98f66b7
--- /dev/null
+++ b/drivers/s390/block/scm_blk.c
@@ -0,0 +1,591 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Block driver for s390 storage class memory.
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#define KMSG_COMPONENT "scm_block"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <asm/eadm.h>
+#include "scm_blk.h"
+
+debug_info_t *scm_debug;
+static int scm_major;
+static mempool_t *aidaw_pool;
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(inactive_requests);
+static unsigned int nr_requests = 64;
+static unsigned int nr_requests_per_io = 8;
+static atomic_t nr_devices = ATOMIC_INIT(0);
+module_param(nr_requests, uint, S_IRUGO);
+MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
+
+module_param(nr_requests_per_io, uint, S_IRUGO);
+MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO.");
+
+MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("scm:scmdev*");
+
+static void __scm_free_rq(struct scm_request *scmrq)
+{
+	struct aob_rq_header *aobrq = to_aobrq(scmrq);
+
+	free_page((unsigned long) scmrq->aob);
+	kfree(scmrq->request);
+	kfree(aobrq);
+}
+
+static void scm_free_rqs(void)
+{
+	struct list_head *iter, *safe;
+	struct scm_request *scmrq;
+
+	spin_lock_irq(&list_lock);
+	list_for_each_safe(iter, safe, &inactive_requests) {
+		scmrq = list_entry(iter, struct scm_request, list);
+		list_del(&scmrq->list);
+		__scm_free_rq(scmrq);
+	}
+	spin_unlock_irq(&list_lock);
+
+	mempool_destroy(aidaw_pool);
+}
+
+static int __scm_alloc_rq(void)
+{
+	struct aob_rq_header *aobrq;
+	struct scm_request *scmrq;
+
+	aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
+	if (!aobrq)
+		return -ENOMEM;
+
+	scmrq = (void *) aobrq->data;
+	scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
+	if (!scmrq->aob)
+		goto free;
+
+	scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]),
+				 GFP_KERNEL);
+	if (!scmrq->request)
+		goto free;
+
+	INIT_LIST_HEAD(&scmrq->list);
+	spin_lock_irq(&list_lock);
+	list_add(&scmrq->list, &inactive_requests);
+	spin_unlock_irq(&list_lock);
+
+	return 0;
+free:
+	__scm_free_rq(scmrq);
+	return -ENOMEM;
+}
+
+static int scm_alloc_rqs(unsigned int nrqs)
+{
+	int ret = 0;
+
+	aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
+	if (!aidaw_pool)
+		return -ENOMEM;
+
+	while (nrqs-- && !ret)
+		ret = __scm_alloc_rq();
+
+	return ret;
+}
+
+static struct scm_request *scm_request_fetch(void)
+{
+	struct scm_request *scmrq = NULL;
+
+	spin_lock_irq(&list_lock);
+	if (list_empty(&inactive_requests))
+		goto out;
+	scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
+	list_del(&scmrq->list);
+out:
+	spin_unlock_irq(&list_lock);
+	return scmrq;
+}
+
+static void scm_request_done(struct scm_request *scmrq)
+{
+	unsigned long flags;
+	struct msb *msb;
+	u64 aidaw;
+	int i;
+
+	for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
+		msb = &scmrq->aob->msb[i];
+		aidaw = msb->data_addr;
+
+		if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
+		    IS_ALIGNED(aidaw, PAGE_SIZE))
+			mempool_free(virt_to_page(aidaw), aidaw_pool);
+	}
+
+	spin_lock_irqsave(&list_lock, flags);
+	list_add(&scmrq->list, &inactive_requests);
+	spin_unlock_irqrestore(&list_lock, flags);
+}
+
+static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
+{
+	return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
+}
+
+static inline struct aidaw *scm_aidaw_alloc(void)
+{
+	struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
+
+	return page ? page_address(page) : NULL;
+}
+
+static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
+{
+	unsigned long _aidaw = (unsigned long) aidaw;
+	unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
+
+	return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
+}
+
+struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
+{
+	struct aidaw *aidaw;
+
+	if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
+		return scmrq->next_aidaw;
+
+	aidaw = scm_aidaw_alloc();
+	if (aidaw)
+		memset(aidaw, 0, PAGE_SIZE);
+	return aidaw;
+}
+
+static int scm_request_prepare(struct scm_request *scmrq)
+{
+	struct scm_blk_dev *bdev = scmrq->bdev;
+	struct scm_device *scmdev = bdev->gendisk->private_data;
+	int pos = scmrq->aob->request.msb_count;
+	struct msb *msb = &scmrq->aob->msb[pos];
+	struct request *req = scmrq->request[pos];
+	struct req_iterator iter;
+	struct aidaw *aidaw;
+	struct bio_vec bv;
+
+	aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
+	if (!aidaw)
+		return -ENOMEM;
+
+	msb->bs = MSB_BS_4K;
+	scmrq->aob->request.msb_count++;
+	msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
+	msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
+	msb->flags |= MSB_FLAG_IDA;
+	msb->data_addr = (u64) aidaw;
+
+	rq_for_each_segment(bv, req, iter) {
+		WARN_ON(bv.bv_offset);
+		msb->blk_count += bv.bv_len >> 12;
+		aidaw->data_addr = (u64) page_address(bv.bv_page);
+		aidaw++;
+	}
+
+	scmrq->next_aidaw = aidaw;
+	return 0;
+}
+
+static inline void scm_request_set(struct scm_request *scmrq,
+				   struct request *req)
+{
+	scmrq->request[scmrq->aob->request.msb_count] = req;
+}
+
+static inline void scm_request_init(struct scm_blk_dev *bdev,
+				    struct scm_request *scmrq)
+{
+	struct aob_rq_header *aobrq = to_aobrq(scmrq);
+	struct aob *aob = scmrq->aob;
+
+	memset(scmrq->request, 0,
+	       nr_requests_per_io * sizeof(scmrq->request[0]));
+	memset(aob, 0, sizeof(*aob));
+	aobrq->scmdev = bdev->scmdev;
+	aob->request.cmd_code = ARQB_CMD_MOVE;
+	aob->request.data = (u64) aobrq;
+	scmrq->bdev = bdev;
+	scmrq->retries = 4;
+	scmrq->error = BLK_STS_OK;
+	/* We don't use all msbs - place aidaws at the end of the aob page. */
+	scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
+}
+
+static void scm_request_requeue(struct scm_request *scmrq)
+{
+	struct scm_blk_dev *bdev = scmrq->bdev;
+	int i;
+
+	for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
+		blk_mq_requeue_request(scmrq->request[i], false);
+
+	atomic_dec(&bdev->queued_reqs);
+	scm_request_done(scmrq);
+	blk_mq_kick_requeue_list(bdev->rq);
+}
+
+static void scm_request_finish(struct scm_request *scmrq)
+{
+	struct scm_blk_dev *bdev = scmrq->bdev;
+	blk_status_t *error;
+	int i;
+
+	for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
+		error = blk_mq_rq_to_pdu(scmrq->request[i]);
+		*error = scmrq->error;
+		blk_mq_complete_request(scmrq->request[i]);
+	}
+
+	atomic_dec(&bdev->queued_reqs);
+	scm_request_done(scmrq);
+}
+
+static void scm_request_start(struct scm_request *scmrq)
+{
+	struct scm_blk_dev *bdev = scmrq->bdev;
+
+	atomic_inc(&bdev->queued_reqs);
+	if (eadm_start_aob(scmrq->aob)) {
+		SCM_LOG(5, "no subchannel");
+		scm_request_requeue(scmrq);
+	}
+}
+
+struct scm_queue {
+	struct scm_request *scmrq;
+	spinlock_t lock;
+};
+
+static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx,
+			   const struct blk_mq_queue_data *qd)
+{
+	struct scm_device *scmdev = hctx->queue->queuedata;
+	struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
+	struct scm_queue *sq = hctx->driver_data;
+	struct request *req = qd->rq;
+	struct scm_request *scmrq;
+
+	spin_lock(&sq->lock);
+	if (!scm_permit_request(bdev, req)) {
+		spin_unlock(&sq->lock);
+		return BLK_STS_RESOURCE;
+	}
+
+	scmrq = sq->scmrq;
+	if (!scmrq) {
+		scmrq = scm_request_fetch();
+		if (!scmrq) {
+			SCM_LOG(5, "no request");
+			spin_unlock(&sq->lock);
+			return BLK_STS_RESOURCE;
+		}
+		scm_request_init(bdev, scmrq);
+		sq->scmrq = scmrq;
+	}
+	scm_request_set(scmrq, req);
+
+	if (scm_request_prepare(scmrq)) {
+		SCM_LOG(5, "aidaw alloc failed");
+		scm_request_set(scmrq, NULL);
+
+		if (scmrq->aob->request.msb_count)
+			scm_request_start(scmrq);
+
+		sq->scmrq = NULL;
+		spin_unlock(&sq->lock);
+		return BLK_STS_RESOURCE;
+	}
+	blk_mq_start_request(req);
+
+	if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) {
+		scm_request_start(scmrq);
+		sq->scmrq = NULL;
+	}
+	spin_unlock(&sq->lock);
+	return BLK_STS_OK;
+}
+
+static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+			     unsigned int idx)
+{
+	struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL);
+
+	if (!qd)
+		return -ENOMEM;
+
+	spin_lock_init(&qd->lock);
+	hctx->driver_data = qd;
+
+	return 0;
+}
+
+static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
+{
+	struct scm_queue *qd = hctx->driver_data;
+
+	WARN_ON(qd->scmrq);
+	kfree(hctx->driver_data);
+	hctx->driver_data = NULL;
+}
+
+static void __scmrq_log_error(struct scm_request *scmrq)
+{
+	struct aob *aob = scmrq->aob;
+
+	if (scmrq->error == BLK_STS_TIMEOUT)
+		SCM_LOG(1, "Request timeout");
+	else {
+		SCM_LOG(1, "Request error");
+		SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
+	}
+	if (scmrq->retries)
+		SCM_LOG(1, "Retry request");
+	else
+		pr_err("An I/O operation to SCM failed with rc=%d\n",
+		       scmrq->error);
+}
+
+static void scm_blk_handle_error(struct scm_request *scmrq)
+{
+	struct scm_blk_dev *bdev = scmrq->bdev;
+	unsigned long flags;
+
+	if (scmrq->error != BLK_STS_IOERR)
+		goto restart;
+
+	/* For -EIO the response block is valid. */
+	switch (scmrq->aob->response.eqc) {
+	case EQC_WR_PROHIBIT:
+		spin_lock_irqsave(&bdev->lock, flags);
+		if (bdev->state != SCM_WR_PROHIBIT)
+			pr_info("%lx: Write access to the SCM increment is suspended\n",
+				(unsigned long) bdev->scmdev->address);
+		bdev->state = SCM_WR_PROHIBIT;
+		spin_unlock_irqrestore(&bdev->lock, flags);
+		goto requeue;
+	default:
+		break;
+	}
+
+restart:
+	if (!eadm_start_aob(scmrq->aob))
+		return;
+
+requeue:
+	scm_request_requeue(scmrq);
+}
+
+void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
+{
+	struct scm_request *scmrq = data;
+
+	scmrq->error = error;
+	if (error) {
+		__scmrq_log_error(scmrq);
+		if (scmrq->retries-- > 0) {
+			scm_blk_handle_error(scmrq);
+			return;
+		}
+	}
+
+	scm_request_finish(scmrq);
+}
+
+static void scm_blk_request_done(struct request *req)
+{
+	blk_status_t *error = blk_mq_rq_to_pdu(req);
+
+	blk_mq_end_request(req, *error);
+}
+
+static const struct block_device_operations scm_blk_devops = {
+	.owner = THIS_MODULE,
+};
+
+static const struct blk_mq_ops scm_mq_ops = {
+	.queue_rq = scm_blk_request,
+	.complete = scm_blk_request_done,
+	.init_hctx = scm_blk_init_hctx,
+	.exit_hctx = scm_blk_exit_hctx,
+};
+
+int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
+{
+	unsigned int devindex, nr_max_blk;
+	struct request_queue *rq;
+	int len, ret;
+
+	devindex = atomic_inc_return(&nr_devices) - 1;
+	/* scma..scmz + scmaa..scmzz */
+	if (devindex > 701) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	bdev->scmdev = scmdev;
+	bdev->state = SCM_OPER;
+	spin_lock_init(&bdev->lock);
+	atomic_set(&bdev->queued_reqs, 0);
+
+	bdev->tag_set.ops = &scm_mq_ops;
+	bdev->tag_set.cmd_size = sizeof(blk_status_t);
+	bdev->tag_set.nr_hw_queues = nr_requests;
+	bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
+	bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+	bdev->tag_set.numa_node = NUMA_NO_NODE;
+
+	ret = blk_mq_alloc_tag_set(&bdev->tag_set);
+	if (ret)
+		goto out;
+
+	rq = blk_mq_init_queue(&bdev->tag_set);
+	if (IS_ERR(rq)) {
+		ret = PTR_ERR(rq);
+		goto out_tag;
+	}
+	bdev->rq = rq;
+	nr_max_blk = min(scmdev->nr_max_block,
+			 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
+
+	blk_queue_logical_block_size(rq, 1 << 12);
+	blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
+	blk_queue_max_segments(rq, nr_max_blk);
+	blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
+	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
+
+	bdev->gendisk = alloc_disk(SCM_NR_PARTS);
+	if (!bdev->gendisk) {
+		ret = -ENOMEM;
+		goto out_queue;
+	}
+	rq->queuedata = scmdev;
+	bdev->gendisk->private_data = scmdev;
+	bdev->gendisk->fops = &scm_blk_devops;
+	bdev->gendisk->queue = rq;
+	bdev->gendisk->major = scm_major;
+	bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
+
+	len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
+	if (devindex > 25) {
+		len += snprintf(bdev->gendisk->disk_name + len,
+				DISK_NAME_LEN - len, "%c",
+				'a' + (devindex / 26) - 1);
+		devindex = devindex % 26;
+	}
+	snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
+		 'a' + devindex);
+
+	/* 512 byte sectors */
+	set_capacity(bdev->gendisk, scmdev->size >> 9);
+	device_add_disk(&scmdev->dev, bdev->gendisk);
+	return 0;
+
+out_queue:
+	blk_cleanup_queue(rq);
+out_tag:
+	blk_mq_free_tag_set(&bdev->tag_set);
+out:
+	atomic_dec(&nr_devices);
+	return ret;
+}
+
+void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
+{
+	del_gendisk(bdev->gendisk);
+	blk_cleanup_queue(bdev->gendisk->queue);
+	blk_mq_free_tag_set(&bdev->tag_set);
+	put_disk(bdev->gendisk);
+}
+
+void scm_blk_set_available(struct scm_blk_dev *bdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&bdev->lock, flags);
+	if (bdev->state == SCM_WR_PROHIBIT)
+		pr_info("%lx: Write access to the SCM increment is restored\n",
+			(unsigned long) bdev->scmdev->address);
+	bdev->state = SCM_OPER;
+	spin_unlock_irqrestore(&bdev->lock, flags);
+}
+
+static bool __init scm_blk_params_valid(void)
+{
+	if (!nr_requests_per_io || nr_requests_per_io > 64)
+		return false;
+
+	return true;
+}
+
+static int __init scm_blk_init(void)
+{
+	int ret = -EINVAL;
+
+	if (!scm_blk_params_valid())
+		goto out;
+
+	ret = register_blkdev(0, "scm");
+	if (ret < 0)
+		goto out;
+
+	scm_major = ret;
+	ret = scm_alloc_rqs(nr_requests);
+	if (ret)
+		goto out_free;
+
+	scm_debug = debug_register("scm_log", 16, 1, 16);
+	if (!scm_debug) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+
+	debug_register_view(scm_debug, &debug_hex_ascii_view);
+	debug_set_level(scm_debug, 2);
+
+	ret = scm_drv_init();
+	if (ret)
+		goto out_dbf;
+
+	return ret;
+
+out_dbf:
+	debug_unregister(scm_debug);
+out_free:
+	scm_free_rqs();
+	unregister_blkdev(scm_major, "scm");
+out:
+	return ret;
+}
+module_init(scm_blk_init);
+
+static void __exit scm_blk_cleanup(void)
+{
+	scm_drv_cleanup();
+	debug_unregister(scm_debug);
+	scm_free_rqs();
+	unregister_blkdev(scm_major, "scm");
+}
+module_exit(scm_blk_cleanup);
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
new file mode 100644
index 0000000..a05a429
--- /dev/null
+++ b/drivers/s390/block/scm_blk.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef SCM_BLK_H
+#define SCM_BLK_H
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/genhd.h>
+#include <linux/list.h>
+
+#include <asm/debug.h>
+#include <asm/eadm.h>
+
+#define SCM_NR_PARTS 8
+#define SCM_QUEUE_DELAY 5
+
+struct scm_blk_dev {
+	struct request_queue *rq;
+	struct gendisk *gendisk;
+	struct blk_mq_tag_set tag_set;
+	struct scm_device *scmdev;
+	spinlock_t lock;
+	atomic_t queued_reqs;
+	enum {SCM_OPER, SCM_WR_PROHIBIT} state;
+	struct list_head finished_requests;
+};
+
+struct scm_request {
+	struct scm_blk_dev *bdev;
+	struct aidaw *next_aidaw;
+	struct request **request;
+	struct aob *aob;
+	struct list_head list;
+	u8 retries;
+	blk_status_t error;
+};
+
+#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
+
+int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
+void scm_blk_dev_cleanup(struct scm_blk_dev *);
+void scm_blk_set_available(struct scm_blk_dev *);
+void scm_blk_irq(struct scm_device *, void *, blk_status_t);
+
+struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes);
+
+int scm_drv_init(void);
+void scm_drv_cleanup(void);
+
+extern debug_info_t *scm_debug;
+
+#define SCM_LOG(imp, txt) do {					\
+		debug_text_event(scm_debug, imp, txt);		\
+	} while (0)
+
+static inline void SCM_LOG_HEX(int level, void *data, int length)
+{
+	debug_event(scm_debug, level, data, length);
+}
+
+static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
+{
+	struct {
+		u64 address;
+		u8 oper_state;
+		u8 rank;
+	} __packed data = {
+		.address = scmdev->address,
+		.oper_state = scmdev->attrs.oper_state,
+		.rank = scmdev->attrs.rank,
+	};
+
+	SCM_LOG_HEX(level, &data, sizeof(data));
+}
+
+#endif /* SCM_BLK_H */
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c
new file mode 100644
index 0000000..3134fd6
--- /dev/null
+++ b/drivers/s390/block/scm_drv.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device driver for s390 storage class memory.
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#define KMSG_COMPONENT "scm_block"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/eadm.h>
+#include "scm_blk.h"
+
+static void scm_notify(struct scm_device *scmdev, enum scm_event event)
+{
+	struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
+
+	switch (event) {
+	case SCM_CHANGE:
+		pr_info("%lx: The capabilities of the SCM increment changed\n",
+			(unsigned long) scmdev->address);
+		SCM_LOG(2, "State changed");
+		SCM_LOG_STATE(2, scmdev);
+		break;
+	case SCM_AVAIL:
+		SCM_LOG(2, "Increment available");
+		SCM_LOG_STATE(2, scmdev);
+		scm_blk_set_available(bdev);
+		break;
+	}
+}
+
+static int scm_probe(struct scm_device *scmdev)
+{
+	struct scm_blk_dev *bdev;
+	int ret;
+
+	SCM_LOG(2, "probe");
+	SCM_LOG_STATE(2, scmdev);
+
+	if (scmdev->attrs.oper_state != OP_STATE_GOOD)
+		return -EINVAL;
+
+	bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
+	if (!bdev)
+		return -ENOMEM;
+
+	dev_set_drvdata(&scmdev->dev, bdev);
+	ret = scm_blk_dev_setup(bdev, scmdev);
+	if (ret) {
+		dev_set_drvdata(&scmdev->dev, NULL);
+		kfree(bdev);
+		goto out;
+	}
+
+out:
+	return ret;
+}
+
+static int scm_remove(struct scm_device *scmdev)
+{
+	struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
+
+	scm_blk_dev_cleanup(bdev);
+	dev_set_drvdata(&scmdev->dev, NULL);
+	kfree(bdev);
+
+	return 0;
+}
+
+static struct scm_driver scm_drv = {
+	.drv = {
+		.name = "scm_block",
+		.owner = THIS_MODULE,
+	},
+	.notify = scm_notify,
+	.probe = scm_probe,
+	.remove = scm_remove,
+	.handler = scm_blk_irq,
+};
+
+int __init scm_drv_init(void)
+{
+	return scm_driver_register(&scm_drv);
+}
+
+void scm_drv_cleanup(void)
+{
+	scm_driver_unregister(&scm_drv);
+}
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
new file mode 100644
index 0000000..3df5d68
--- /dev/null
+++ b/drivers/s390/block/xpram.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xpram.c -- the S/390 expanded memory RAM-disk
+ *           
+ * significant parts of this code are based on
+ * the sbull device driver presented in
+ * A. Rubini: Linux Device Drivers
+ *
+ * Author of XPRAM specific coding: Reinhard Buendgen
+ *                                  buendgen@de.ibm.com
+ * Rewrite for 2.5: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * External interfaces:
+ *   Interfaces to linux kernel
+ *        xpram_setup: read kernel parameters
+ *   Device specific file operations
+ *        xpram_iotcl
+ *        xpram_open
+ *
+ * "ad-hoc" partitioning:
+ *    the expanded memory can be partitioned among several devices 
+ *    (with different minors). The partitioning set up can be
+ *    set by kernel or module parameters (int devs & int sizes[])
+ *
+ * Potential future improvements:
+ *   generic hard disk support to replace ad-hoc partitioning
+ */
+
+#define KMSG_COMPONENT "xpram"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ctype.h>  /* isdigit, isxdigit */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/hdreg.h>  /* HDIO_GETGEO */
+#include <linux/device.h>
+#include <linux/bio.h>
+#include <linux/suspend.h>
+#include <linux/platform_device.h>
+#include <linux/gfp.h>
+#include <linux/uaccess.h>
+
+#define XPRAM_NAME	"xpram"
+#define XPRAM_DEVS	1	/* one partition */
+#define XPRAM_MAX_DEVS	32	/* maximal number of devices (partitions) */
+
+typedef struct {
+	unsigned int	size;		/* size of xpram segment in pages */
+	unsigned int	offset;		/* start page of xpram segment */
+} xpram_device_t;
+
+static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
+static unsigned int xpram_sizes[XPRAM_MAX_DEVS];
+static struct gendisk *xpram_disks[XPRAM_MAX_DEVS];
+static struct request_queue *xpram_queues[XPRAM_MAX_DEVS];
+static unsigned int xpram_pages;
+static int xpram_devs;
+
+/*
+ * Parameter parsing functions.
+ */
+static int devs = XPRAM_DEVS;
+static char *sizes[XPRAM_MAX_DEVS];
+
+module_param(devs, int, 0);
+module_param_array(sizes, charp, NULL, 0);
+
+MODULE_PARM_DESC(devs, "number of devices (\"partitions\"), " \
+		 "the default is " __MODULE_STRING(XPRAM_DEVS) "\n");
+MODULE_PARM_DESC(sizes, "list of device (partition) sizes " \
+		 "the defaults are 0s \n" \
+		 "All devices with size 0 equally partition the "
+		 "remaining space on the expanded strorage not "
+		 "claimed by explicit sizes\n");
+MODULE_LICENSE("GPL");
+
+/*
+ * Copy expanded memory page (4kB) into main memory                  
+ * Arguments                                                         
+ *           page_addr:    address of target page                    
+ *           xpage_index:  index of expandeded memory page           
+ * Return value                                                      
+ *           0:            if operation succeeds
+ *           -EIO:         if pgin failed
+ *           -ENXIO:       if xpram has vanished
+ */
+static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
+{
+	int cc = 2;	/* return unused cc 2 if pgin traps */
+
+	asm volatile(
+		"	.insn	rre,0xb22e0000,%1,%2\n"  /* pgin %1,%2 */
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b,1b)
+		: "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
+	if (cc == 3)
+		return -ENXIO;
+	if (cc == 2)
+		return -ENXIO;
+	if (cc == 1)
+		return -EIO;
+	return 0;
+}
+
+/*
+ * Copy a 4kB page of main memory to an expanded memory page          
+ * Arguments                                                          
+ *           page_addr:    address of source page                     
+ *           xpage_index:  index of expandeded memory page            
+ * Return value                                                       
+ *           0:            if operation succeeds
+ *           -EIO:         if pgout failed
+ *           -ENXIO:       if xpram has vanished
+ */
+static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
+{
+	int cc = 2;	/* return unused cc 2 if pgin traps */
+
+	asm volatile(
+		"	.insn	rre,0xb22f0000,%1,%2\n"  /* pgout %1,%2 */
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b,1b)
+		: "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
+	if (cc == 3)
+		return -ENXIO;
+	if (cc == 2)
+		return -ENXIO;
+	if (cc == 1)
+		return -EIO;
+	return 0;
+}
+
+/*
+ * Check if xpram is available.
+ */
+static int xpram_present(void)
+{
+	unsigned long mem_page;
+	int rc;
+
+	mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
+	if (!mem_page)
+		return -ENOMEM;
+	rc = xpram_page_in(mem_page, 0);
+	free_page(mem_page);
+	return rc ? -ENXIO : 0;
+}
+
+/*
+ * Return index of the last available xpram page.
+ */
+static unsigned long xpram_highest_page_index(void)
+{
+	unsigned int page_index, add_bit;
+	unsigned long mem_page;
+
+	mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
+	if (!mem_page)
+		return 0;
+
+	page_index = 0;
+	add_bit = 1ULL << (sizeof(unsigned int)*8 - 1);
+	while (add_bit > 0) {
+		if (xpram_page_in(mem_page, page_index | add_bit) == 0)
+			page_index |= add_bit;
+		add_bit >>= 1;
+	}
+
+	free_page (mem_page);
+
+	return page_index;
+}
+
+/*
+ * Block device make request function.
+ */
+static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
+{
+	xpram_device_t *xdev = bio->bi_disk->private_data;
+	struct bio_vec bvec;
+	struct bvec_iter iter;
+	unsigned int index;
+	unsigned long page_addr;
+	unsigned long bytes;
+
+	blk_queue_split(q, &bio);
+
+	if ((bio->bi_iter.bi_sector & 7) != 0 ||
+	    (bio->bi_iter.bi_size & 4095) != 0)
+		/* Request is not page-aligned. */
+		goto fail;
+	if ((bio->bi_iter.bi_size >> 12) > xdev->size)
+		/* Request size is no page-aligned. */
+		goto fail;
+	if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
+		goto fail;
+	index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
+	bio_for_each_segment(bvec, bio, iter) {
+		page_addr = (unsigned long)
+			kmap(bvec.bv_page) + bvec.bv_offset;
+		bytes = bvec.bv_len;
+		if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
+			/* More paranoia. */
+			goto fail;
+		while (bytes > 0) {
+			if (bio_data_dir(bio) == READ) {
+				if (xpram_page_in(page_addr, index) != 0)
+					goto fail;
+			} else {
+				if (xpram_page_out(page_addr, index) != 0)
+					goto fail;
+			}
+			page_addr += 4096;
+			bytes -= 4096;
+			index++;
+		}
+	}
+	bio_endio(bio);
+	return BLK_QC_T_NONE;
+fail:
+	bio_io_error(bio);
+	return BLK_QC_T_NONE;
+}
+
+static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+	unsigned long size;
+
+	/*
+	 * get geometry: we have to fake one...  trim the size to a
+	 * multiple of 64 (32k): tell we have 16 sectors, 4 heads,
+	 * whatever cylinders. Tell also that data starts at sector. 4.
+	 */
+	size = (xpram_pages * 8) & ~0x3f;
+	geo->cylinders = size >> 6;
+	geo->heads = 4;
+	geo->sectors = 16;
+	geo->start = 4;
+	return 0;
+}
+
+static const struct block_device_operations xpram_devops =
+{
+	.owner	= THIS_MODULE,
+	.getgeo	= xpram_getgeo,
+};
+
+/*
+ * Setup xpram_sizes array.
+ */
+static int __init xpram_setup_sizes(unsigned long pages)
+{
+	unsigned long mem_needed;
+	unsigned long mem_auto;
+	unsigned long long size;
+	char *sizes_end;
+	int mem_auto_no;
+	int i;
+
+	/* Check number of devices. */
+	if (devs <= 0 || devs > XPRAM_MAX_DEVS) {
+		pr_err("%d is not a valid number of XPRAM devices\n",devs);
+		return -EINVAL;
+	}
+	xpram_devs = devs;
+
+	/*
+	 * Copy sizes array to xpram_sizes and align partition
+	 * sizes to page boundary.
+	 */
+	mem_needed = 0;
+	mem_auto_no = 0;
+	for (i = 0; i < xpram_devs; i++) {
+		if (sizes[i]) {
+			size = simple_strtoull(sizes[i], &sizes_end, 0);
+			switch (*sizes_end) {
+			case 'g':
+			case 'G':
+				size <<= 20;
+				break;
+			case 'm':
+			case 'M':
+				size <<= 10;
+			}
+			xpram_sizes[i] = (size + 3) & -4UL;
+		}
+		if (xpram_sizes[i])
+			mem_needed += xpram_sizes[i];
+		else
+			mem_auto_no++;
+	}
+	
+	pr_info("  number of devices (partitions): %d \n", xpram_devs);
+	for (i = 0; i < xpram_devs; i++) {
+		if (xpram_sizes[i])
+			pr_info("  size of partition %d: %u kB\n",
+				i, xpram_sizes[i]);
+		else
+			pr_info("  size of partition %d to be set "
+				"automatically\n",i);
+	}
+	pr_info("  memory needed (for sized partitions): %lu kB\n",
+		mem_needed);
+	pr_info("  partitions to be sized automatically: %d\n",
+		mem_auto_no);
+
+	if (mem_needed > pages * 4) {
+		pr_err("Not enough expanded memory available\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * partitioning:
+	 * xpram_sizes[i] != 0; partition i has size xpram_sizes[i] kB
+	 * else:             ; all partitions with zero xpram_sizes[i]
+	 *                     partition equally the remaining space
+	 */
+	if (mem_auto_no) {
+		mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4;
+		pr_info("  automatically determined "
+			"partition size: %lu kB\n", mem_auto);
+		for (i = 0; i < xpram_devs; i++)
+			if (xpram_sizes[i] == 0)
+				xpram_sizes[i] = mem_auto;
+	}
+	return 0;
+}
+
+static int __init xpram_setup_blkdev(void)
+{
+	unsigned long offset;
+	int i, rc = -ENOMEM;
+
+	for (i = 0; i < xpram_devs; i++) {
+		xpram_disks[i] = alloc_disk(1);
+		if (!xpram_disks[i])
+			goto out;
+		xpram_queues[i] = blk_alloc_queue(GFP_KERNEL);
+		if (!xpram_queues[i]) {
+			put_disk(xpram_disks[i]);
+			goto out;
+		}
+		blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_queues[i]);
+		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]);
+		blk_queue_make_request(xpram_queues[i], xpram_make_request);
+		blk_queue_logical_block_size(xpram_queues[i], 4096);
+	}
+
+	/*
+	 * Register xpram major.
+	 */
+	rc = register_blkdev(XPRAM_MAJOR, XPRAM_NAME);
+	if (rc < 0)
+		goto out;
+
+	/*
+	 * Setup device structures.
+	 */
+	offset = 0;
+	for (i = 0; i < xpram_devs; i++) {
+		struct gendisk *disk = xpram_disks[i];
+
+		xpram_devices[i].size = xpram_sizes[i] / 4;
+		xpram_devices[i].offset = offset;
+		offset += xpram_devices[i].size;
+		disk->major = XPRAM_MAJOR;
+		disk->first_minor = i;
+		disk->fops = &xpram_devops;
+		disk->private_data = &xpram_devices[i];
+		disk->queue = xpram_queues[i];
+		sprintf(disk->disk_name, "slram%d", i);
+		set_capacity(disk, xpram_sizes[i] << 1);
+		add_disk(disk);
+	}
+
+	return 0;
+out:
+	while (i--) {
+		blk_cleanup_queue(xpram_queues[i]);
+		put_disk(xpram_disks[i]);
+	}
+	return rc;
+}
+
+/*
+ * Resume failed: Print error message and call panic.
+ */
+static void xpram_resume_error(const char *message)
+{
+	pr_err("Resuming the system failed: %s\n", message);
+	panic("xpram resume error\n");
+}
+
+/*
+ * Check if xpram setup changed between suspend and resume.
+ */
+static int xpram_restore(struct device *dev)
+{
+	if (!xpram_pages)
+		return 0;
+	if (xpram_present() != 0)
+		xpram_resume_error("xpram disappeared");
+	if (xpram_pages != xpram_highest_page_index() + 1)
+		xpram_resume_error("Size of xpram changed");
+	return 0;
+}
+
+static const struct dev_pm_ops xpram_pm_ops = {
+	.restore	= xpram_restore,
+};
+
+static struct platform_driver xpram_pdrv = {
+	.driver = {
+		.name	= XPRAM_NAME,
+		.pm	= &xpram_pm_ops,
+	},
+};
+
+static struct platform_device *xpram_pdev;
+
+/*
+ * Finally, the init/exit functions.
+ */
+static void __exit xpram_exit(void)
+{
+	int i;
+	for (i = 0; i < xpram_devs; i++) {
+		del_gendisk(xpram_disks[i]);
+		blk_cleanup_queue(xpram_queues[i]);
+		put_disk(xpram_disks[i]);
+	}
+	unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
+	platform_device_unregister(xpram_pdev);
+	platform_driver_unregister(&xpram_pdrv);
+}
+
+static int __init xpram_init(void)
+{
+	int rc;
+
+	/* Find out size of expanded memory. */
+	if (xpram_present() != 0) {
+		pr_err("No expanded memory available\n");
+		return -ENODEV;
+	}
+	xpram_pages = xpram_highest_page_index() + 1;
+	pr_info("  %u pages expanded memory found (%lu KB).\n",
+		xpram_pages, (unsigned long) xpram_pages*4);
+	rc = xpram_setup_sizes(xpram_pages);
+	if (rc)
+		return rc;
+	rc = platform_driver_register(&xpram_pdrv);
+	if (rc)
+		return rc;
+	xpram_pdev = platform_device_register_simple(XPRAM_NAME, -1, NULL, 0);
+	if (IS_ERR(xpram_pdev)) {
+		rc = PTR_ERR(xpram_pdev);
+		goto fail_platform_driver_unregister;
+	}
+	rc = xpram_setup_blkdev();
+	if (rc)
+		goto fail_platform_device_unregister;
+	return 0;
+
+fail_platform_device_unregister:
+	platform_device_unregister(xpram_pdev);
+fail_platform_driver_unregister:
+	platform_driver_unregister(&xpram_pdrv);
+	return rc;
+}
+
+module_init(xpram_init);
+module_exit(xpram_exit);
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
new file mode 100644
index 0000000..ab0b243
--- /dev/null
+++ b/drivers/s390/char/Kconfig
@@ -0,0 +1,208 @@
+# SPDX-License-Identifier: GPL-2.0
+comment "S/390 character device drivers"
+	depends on S390
+
+config TN3270
+	def_tristate y
+	prompt "Support for locally attached 3270 terminals"
+	depends on CCW
+	help
+	  Include support for IBM 3270 terminals.
+
+config TN3270_TTY
+	def_tristate y
+	prompt "Support for tty input/output on 3270 terminals"
+	depends on TN3270 && TTY
+	help
+	  Include support for using an IBM 3270 terminal as a Linux tty.
+
+config TN3270_FS
+	def_tristate m
+	prompt "Support for fullscreen applications on 3270 terminals"
+	depends on TN3270
+	help
+	  Include support for fullscreen applications on an IBM 3270 terminal.
+
+config TN3270_CONSOLE
+	def_bool y
+	prompt "Support for console on 3270 terminal"
+	depends on TN3270=y && TN3270_TTY=y
+	help
+	  Include support for using an IBM 3270 terminal as a Linux system
+	  console.  Available only if 3270 support is compiled in statically.
+
+config TN3215
+	def_bool y
+	prompt "Support for 3215 line mode terminal"
+	depends on CCW && TTY
+	help
+	  Include support for IBM 3215 line-mode terminals.
+
+config TN3215_CONSOLE
+	def_bool y
+	prompt "Support for console on 3215 line mode terminal"
+	depends on TN3215
+	help
+	  Include support for using an IBM 3215 line-mode terminal as a
+	  Linux system console.
+
+config CCW_CONSOLE
+	def_bool y if TN3215_CONSOLE || TN3270_CONSOLE
+
+config SCLP_TTY
+	def_bool y
+	prompt "Support for SCLP line mode terminal"
+	depends on S390 && TTY
+	help
+	  Include support for IBM SCLP line-mode terminals.
+
+config SCLP_CONSOLE
+	def_bool y
+	prompt "Support for console on SCLP line mode terminal"
+	depends on SCLP_TTY
+	help
+	  Include support for using an IBM HWC line-mode terminal as the Linux
+	  system console.
+
+config SCLP_VT220_TTY
+	def_bool y
+	prompt "Support for SCLP VT220-compatible terminal"
+	depends on S390 && TTY
+	help
+	  Include support for an IBM SCLP VT220-compatible terminal.
+
+config SCLP_VT220_CONSOLE
+	def_bool y
+	prompt "Support for console on SCLP VT220-compatible terminal"
+	depends on SCLP_VT220_TTY
+	help
+	  Include support for using an IBM SCLP VT220-compatible terminal as a
+	  Linux system console.
+
+config SCLP_ASYNC
+	def_tristate m
+	prompt "Support for Call Home via Asynchronous SCLP Records"
+	depends on S390
+	help
+	  This option enables the call home function, which is able to inform
+	  the service element and connected organisations about a kernel panic.
+	  You should only select this option if you know what you are doing,
+	  want for inform other people about your kernel panics,
+	  need this feature and intend to run your kernel in LPAR.
+
+config SCLP_ASYNC_ID
+       string "Component ID for Call Home"
+       depends on SCLP_ASYNC
+       default "000000000"
+       help
+	 The Component ID for Call Home is used to identify the correct
+	 problem reporting queue the call home records should be sent to.
+
+	 If your are unsure, please use the default value "000000000".
+
+config HMC_DRV
+	def_tristate m
+	prompt "Support for file transfers from HMC drive CD/DVD-ROM"
+	depends on S390
+	select CRC16
+	help
+	  This option enables support for file transfers from a Hardware
+	  Management Console (HMC) drive CD/DVD-ROM. It is available as a
+	  module, called 'hmcdrv', and also as kernel built-in. There is one
+	  optional parameter for this module: cachesize=N, which modifies the
+	  transfer cache size from it's default value 0.5MB to N bytes. If N
+	  is zero, then no caching is performed.
+
+config SCLP_OFB
+	def_bool n
+	prompt "Support for Open-for-Business SCLP Event"
+	depends on S390
+	help
+	  This option enables the Open-for-Business interface to the s390
+	  Service Element.
+
+config S390_TAPE
+	def_tristate m
+	prompt "S/390 tape device support"
+	depends on CCW
+	help
+	  Select this option if you want to access channel-attached tape
+	  devices on IBM S/390 or zSeries.
+	  If you select this option you will also want to select at
+	  least one of the tape interface options and one of the tape
+	  hardware options in order to access a tape device.
+	  This option is also available as a module. The module will be
+	  called tape390 and include all selected interfaces and
+	  hardware drivers.
+
+comment "S/390 tape hardware support"
+	depends on S390_TAPE
+
+config S390_TAPE_34XX
+	def_tristate m
+	prompt "Support for 3480/3490 tape hardware"
+	depends on S390_TAPE
+	help
+	  Select this option if you want to access IBM 3480/3490 magnetic
+	  tape subsystems and 100% compatibles.
+	  It is safe to say "Y" here.
+
+config S390_TAPE_3590
+	def_tristate m
+	prompt "Support for 3590 tape hardware"
+	depends on S390_TAPE
+	help
+	  Select this option if you want to access IBM 3590 magnetic
+	  tape subsystems and 100% compatibles.
+	  It is safe to say "Y" here.
+
+config VMLOGRDR
+	def_tristate m
+	prompt "Support for the z/VM recording system services (VM only)"
+	depends on IUCV
+	help
+	  Select this option if you want to be able to receive records collected
+	  by the z/VM recording system services, eg. from *LOGREC, *ACCOUNT or
+	  *SYMPTOM.
+	  This driver depends on the IUCV support driver.
+
+config VMCP
+	def_bool y
+	prompt "Support for the z/VM CP interface"
+	depends on S390
+	select CMA
+	help
+	  Select this option if you want to be able to interact with the control
+	  program on z/VM
+
+config VMCP_CMA_SIZE
+	int "Memory in MiB reserved for z/VM CP interface"
+	default "4"
+	depends on VMCP
+	help
+	  Specify the default amount of memory in MiB reserved for the z/VM CP
+	  interface. If needed this memory is used for large contiguous memory
+	  allocations. The default can be changed with the kernel command line
+	  parameter "vmcp_cma".
+
+config MONREADER
+	def_tristate m
+	prompt "API for reading z/VM monitor service records"
+	depends on IUCV
+	help
+	  Character device driver for reading z/VM monitor service records
+
+config MONWRITER
+	def_tristate m
+	prompt "API for writing z/VM monitor service records"
+	depends on S390
+	help
+	  Character device driver for writing z/VM monitor service records
+
+config S390_VMUR
+	def_tristate m
+	prompt "z/VM unit record device driver"
+	depends on S390
+	help
+	  Character device driver for z/VM reader, puncher and printer.
+
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
new file mode 100644
index 0000000..c6ab34f
--- /dev/null
+++ b/drivers/s390/char/Makefile
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# S/390 character devices
+#
+
+ifdef CONFIG_FUNCTION_TRACER
+# Do not trace early setup code
+CFLAGS_REMOVE_sclp_early_core.o	= $(CC_FLAGS_FTRACE)
+endif
+
+GCOV_PROFILE_sclp_early_core.o		:= n
+KCOV_INSTRUMENT_sclp_early_core.o	:= n
+UBSAN_SANITIZE_sclp_early_core.o	:= n
+
+CFLAGS_sclp_early_core.o		+= -D__NO_FORTIFY
+
+CFLAGS_REMOVE_sclp_early_core.o	+= $(CC_FLAGS_EXPOLINE)
+
+obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
+	 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
+	 sclp_early.o sclp_early_core.o sclp_sd.o
+
+obj-$(CONFIG_TN3270) += raw3270.o
+obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
+obj-$(CONFIG_TN3270_TTY) += tty3270.o
+obj-$(CONFIG_TN3270_FS) += fs3270.o
+
+obj-$(CONFIG_TN3215) += con3215.o
+
+obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
+obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
+obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
+obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
+
+obj-$(CONFIG_PCI) += sclp_pci.o
+
+obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
+obj-$(CONFIG_VMCP) += vmcp.o
+
+tape-$(CONFIG_PROC_FS) += tape_proc.o
+tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y)
+obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o
+obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
+obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
+obj-$(CONFIG_MONREADER) += monreader.o
+obj-$(CONFIG_MONWRITER) += monwriter.o
+obj-$(CONFIG_S390_VMUR) += vmur.o
+obj-$(CONFIG_CRASH_DUMP) += sclp_sdias.o zcore.o
+
+hmcdrv-objs := hmcdrv_mod.o hmcdrv_dev.o hmcdrv_ftp.o hmcdrv_cache.o diag_ftp.o sclp_ftp.o
+obj-$(CONFIG_HMC_DRV) += hmcdrv.o
+
+chkbss := sclp_early_core.o
+include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
new file mode 100644
index 0000000..8c9d412
--- /dev/null
+++ b/drivers/s390/char/con3215.c
@@ -0,0 +1,1218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * 3215 line mode terminal driver.
+ *
+ * Copyright IBM Corp. 1999, 2009
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Updated:
+ *  Aug-2000: Added tab support
+ *	      Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
+ */
+
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/vt_kern.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/reboot.h>
+#include <linux/serial.h> /* ASYNC_* flags */
+#include <linux/slab.h>
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/io.h>
+#include <asm/ebcdic.h>
+#include <linux/uaccess.h>
+#include <asm/delay.h>
+#include <asm/cpcmd.h>
+#include <asm/setup.h>
+
+#include "ctrlchar.h"
+
+#define NR_3215		    1
+#define NR_3215_REQ	    (4*NR_3215)
+#define RAW3215_BUFFER_SIZE 65536     /* output buffer size */
+#define RAW3215_INBUF_SIZE  256	      /* input buffer size */
+#define RAW3215_MIN_SPACE   128	      /* minimum free space for wakeup */
+#define RAW3215_MIN_WRITE   1024      /* min. length for immediate output */
+#define RAW3215_MAX_BYTES   3968      /* max. bytes to write with one ssch */
+#define RAW3215_MAX_NEWLINE 50	      /* max. lines to write with one ssch */
+#define RAW3215_NR_CCWS	    3
+#define RAW3215_TIMEOUT	    HZ/10     /* time for delayed output */
+
+#define RAW3215_FIXED	    1	      /* 3215 console device is not be freed */
+#define RAW3215_WORKING	    4	      /* set if a request is being worked on */
+#define RAW3215_THROTTLED   8	      /* set if reading is disabled */
+#define RAW3215_STOPPED	    16	      /* set if writing is disabled */
+#define RAW3215_TIMER_RUNS  64	      /* set if the output delay timer is on */
+#define RAW3215_FLUSHING    128	      /* set to flush buffer (no delay) */
+
+#define TAB_STOP_SIZE	    8	      /* tab stop size */
+
+/*
+ * Request types for a 3215 device
+ */
+enum raw3215_type {
+	RAW3215_FREE, RAW3215_READ, RAW3215_WRITE
+};
+
+/*
+ * Request structure for a 3215 device
+ */
+struct raw3215_req {
+	enum raw3215_type type;	      /* type of the request */
+	int start, len;		      /* start index & len in output buffer */
+	int delayable;		      /* indication to wait for more data */
+	int residual;		      /* residual count for read request */
+	struct ccw1 ccws[RAW3215_NR_CCWS]; /* space for the channel program */
+	struct raw3215_info *info;    /* pointer to main structure */
+	struct raw3215_req *next;     /* pointer to next request */
+} __attribute__ ((aligned(8)));
+
+struct raw3215_info {
+	struct tty_port port;
+	struct ccw_device *cdev;      /* device for tty driver */
+	spinlock_t *lock;	      /* pointer to irq lock */
+	int flags;		      /* state flags */
+	char *buffer;		      /* pointer to output buffer */
+	char *inbuf;		      /* pointer to input buffer */
+	int head;		      /* first free byte in output buffer */
+	int count;		      /* number of bytes in output buffer */
+	int written;		      /* number of bytes in write requests */
+	struct raw3215_req *queued_read; /* pointer to queued read requests */
+	struct raw3215_req *queued_write;/* pointer to queued write requests */
+	struct tasklet_struct tlet;   /* tasklet to invoke tty_wakeup */
+	wait_queue_head_t empty_wait; /* wait queue for flushing */
+	struct timer_list timer;      /* timer for delayed output */
+	int line_pos;		      /* position on the line (for tabs) */
+	char ubuffer[80];	      /* copy_from_user buffer */
+};
+
+/* array of 3215 devices structures */
+static struct raw3215_info *raw3215[NR_3215];
+/* spinlock to protect the raw3215 array */
+static DEFINE_SPINLOCK(raw3215_device_lock);
+/* list of free request structures */
+static struct raw3215_req *raw3215_freelist;
+/* spinlock to protect free list */
+static spinlock_t raw3215_freelist_lock;
+
+static struct tty_driver *tty3215_driver;
+
+/*
+ * Get a request structure from the free list
+ */
+static inline struct raw3215_req *raw3215_alloc_req(void)
+{
+	struct raw3215_req *req;
+	unsigned long flags;
+
+	spin_lock_irqsave(&raw3215_freelist_lock, flags);
+	req = raw3215_freelist;
+	raw3215_freelist = req->next;
+	spin_unlock_irqrestore(&raw3215_freelist_lock, flags);
+	return req;
+}
+
+/*
+ * Put a request structure back to the free list
+ */
+static inline void raw3215_free_req(struct raw3215_req *req)
+{
+	unsigned long flags;
+
+	if (req->type == RAW3215_FREE)
+		return;		/* don't free a free request */
+	req->type = RAW3215_FREE;
+	spin_lock_irqsave(&raw3215_freelist_lock, flags);
+	req->next = raw3215_freelist;
+	raw3215_freelist = req;
+	spin_unlock_irqrestore(&raw3215_freelist_lock, flags);
+}
+
+/*
+ * Set up a read request that reads up to 160 byte from the 3215 device.
+ * If there is a queued read request it is used, but that shouldn't happen
+ * because a 3215 terminal won't accept a new read before the old one is
+ * completed.
+ */
+static void raw3215_mk_read_req(struct raw3215_info *raw)
+{
+	struct raw3215_req *req;
+	struct ccw1 *ccw;
+
+	/* there can only be ONE read request at a time */
+	req = raw->queued_read;
+	if (req == NULL) {
+		/* no queued read request, use new req structure */
+		req = raw3215_alloc_req();
+		req->type = RAW3215_READ;
+		req->info = raw;
+		raw->queued_read = req;
+	}
+
+	ccw = req->ccws;
+	ccw->cmd_code = 0x0A; /* read inquiry */
+	ccw->flags = 0x20;    /* ignore incorrect length */
+	ccw->count = 160;
+	ccw->cda = (__u32) __pa(raw->inbuf);
+}
+
+/*
+ * Set up a write request with the information from the main structure.
+ * A ccw chain is created that writes as much as possible from the output
+ * buffer to the 3215 device. If a queued write exists it is replaced by
+ * the new, probably lengthened request.
+ */
+static void raw3215_mk_write_req(struct raw3215_info *raw)
+{
+	struct raw3215_req *req;
+	struct ccw1 *ccw;
+	int len, count, ix, lines;
+
+	if (raw->count <= raw->written)
+		return;
+	/* check if there is a queued write request */
+	req = raw->queued_write;
+	if (req == NULL) {
+		/* no queued write request, use new req structure */
+		req = raw3215_alloc_req();
+		req->type = RAW3215_WRITE;
+		req->info = raw;
+		raw->queued_write = req;
+	} else {
+		raw->written -= req->len;
+	}
+
+	ccw = req->ccws;
+	req->start = (raw->head - raw->count + raw->written) &
+		     (RAW3215_BUFFER_SIZE - 1);
+	/*
+	 * now we have to count newlines. We can at max accept
+	 * RAW3215_MAX_NEWLINE newlines in a single ssch due to
+	 * a restriction in VM
+	 */
+	lines = 0;
+	ix = req->start;
+	while (lines < RAW3215_MAX_NEWLINE && ix != raw->head) {
+		if (raw->buffer[ix] == 0x15)
+			lines++;
+		ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1);
+	}
+	len = ((ix - 1 - req->start) & (RAW3215_BUFFER_SIZE - 1)) + 1;
+	if (len > RAW3215_MAX_BYTES)
+		len = RAW3215_MAX_BYTES;
+	req->len = len;
+	raw->written += len;
+
+	/* set the indication if we should try to enlarge this request */
+	req->delayable = (ix == raw->head) && (len < RAW3215_MIN_WRITE);
+
+	ix = req->start;
+	while (len > 0) {
+		if (ccw > req->ccws)
+			ccw[-1].flags |= 0x40; /* use command chaining */
+		ccw->cmd_code = 0x01; /* write, auto carrier return */
+		ccw->flags = 0x20;    /* ignore incorrect length ind.  */
+		ccw->cda =
+			(__u32) __pa(raw->buffer + ix);
+		count = len;
+		if (ix + count > RAW3215_BUFFER_SIZE)
+			count = RAW3215_BUFFER_SIZE - ix;
+		ccw->count = count;
+		len -= count;
+		ix = (ix + count) & (RAW3215_BUFFER_SIZE - 1);
+		ccw++;
+	}
+	/*
+	 * Add a NOP to the channel program. 3215 devices are purely
+	 * emulated and its much better to avoid the channel end
+	 * interrupt in this case.
+	 */
+	if (ccw > req->ccws)
+		ccw[-1].flags |= 0x40; /* use command chaining */
+	ccw->cmd_code = 0x03; /* NOP */
+	ccw->flags = 0;
+	ccw->cda = 0;
+	ccw->count = 1;
+}
+
+/*
+ * Start a read or a write request
+ */
+static void raw3215_start_io(struct raw3215_info *raw)
+{
+	struct raw3215_req *req;
+	int res;
+
+	req = raw->queued_read;
+	if (req != NULL &&
+	    !(raw->flags & (RAW3215_WORKING | RAW3215_THROTTLED))) {
+		/* dequeue request */
+		raw->queued_read = NULL;
+		res = ccw_device_start(raw->cdev, req->ccws,
+				       (unsigned long) req, 0, 0);
+		if (res != 0) {
+			/* do_IO failed, put request back to queue */
+			raw->queued_read = req;
+		} else {
+			raw->flags |= RAW3215_WORKING;
+		}
+	}
+	req = raw->queued_write;
+	if (req != NULL &&
+	    !(raw->flags & (RAW3215_WORKING | RAW3215_STOPPED))) {
+		/* dequeue request */
+		raw->queued_write = NULL;
+		res = ccw_device_start(raw->cdev, req->ccws,
+				       (unsigned long) req, 0, 0);
+		if (res != 0) {
+			/* do_IO failed, put request back to queue */
+			raw->queued_write = req;
+		} else {
+			raw->flags |= RAW3215_WORKING;
+		}
+	}
+}
+
+/*
+ * Function to start a delayed output after RAW3215_TIMEOUT seconds
+ */
+static void raw3215_timeout(struct timer_list *t)
+{
+	struct raw3215_info *raw = from_timer(raw, t, timer);
+	unsigned long flags;
+
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	raw->flags &= ~RAW3215_TIMER_RUNS;
+	if (!tty_port_suspended(&raw->port)) {
+		raw3215_mk_write_req(raw);
+		raw3215_start_io(raw);
+		if ((raw->queued_read || raw->queued_write) &&
+		    !(raw->flags & RAW3215_WORKING) &&
+		    !(raw->flags & RAW3215_TIMER_RUNS)) {
+			raw->timer.expires = RAW3215_TIMEOUT + jiffies;
+			add_timer(&raw->timer);
+			raw->flags |= RAW3215_TIMER_RUNS;
+		}
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+}
+
+/*
+ * Function to conditionally start an IO. A read is started immediately,
+ * a write is only started immediately if the flush flag is on or the
+ * amount of data is bigger than RAW3215_MIN_WRITE. If a write is not
+ * done immediately a timer is started with a delay of RAW3215_TIMEOUT.
+ */
+static inline void raw3215_try_io(struct raw3215_info *raw)
+{
+	if (!tty_port_initialized(&raw->port) || tty_port_suspended(&raw->port))
+		return;
+	if (raw->queued_read != NULL)
+		raw3215_start_io(raw);
+	else if (raw->queued_write != NULL) {
+		if ((raw->queued_write->delayable == 0) ||
+		    (raw->flags & RAW3215_FLUSHING)) {
+			/* execute write requests bigger than minimum size */
+			raw3215_start_io(raw);
+		}
+	}
+	if ((raw->queued_read || raw->queued_write) &&
+	    !(raw->flags & RAW3215_WORKING) &&
+	    !(raw->flags & RAW3215_TIMER_RUNS)) {
+		raw->timer.expires = RAW3215_TIMEOUT + jiffies;
+		add_timer(&raw->timer);
+		raw->flags |= RAW3215_TIMER_RUNS;
+	}
+}
+
+/*
+ * Call tty_wakeup from tasklet context
+ */
+static void raw3215_wakeup(unsigned long data)
+{
+	struct raw3215_info *raw = (struct raw3215_info *) data;
+	struct tty_struct *tty;
+
+	tty = tty_port_tty_get(&raw->port);
+	if (tty) {
+		tty_wakeup(tty);
+		tty_kref_put(tty);
+	}
+}
+
+/*
+ * Try to start the next IO and wake up processes waiting on the tty.
+ */
+static void raw3215_next_io(struct raw3215_info *raw, struct tty_struct *tty)
+{
+	raw3215_mk_write_req(raw);
+	raw3215_try_io(raw);
+	if (tty && RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE)
+		tasklet_schedule(&raw->tlet);
+}
+
+/*
+ * Interrupt routine, called from common io layer
+ */
+static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
+			struct irb *irb)
+{
+	struct raw3215_info *raw;
+	struct raw3215_req *req;
+	struct tty_struct *tty;
+	int cstat, dstat;
+	int count;
+
+	raw = dev_get_drvdata(&cdev->dev);
+	req = (struct raw3215_req *) intparm;
+	tty = tty_port_tty_get(&raw->port);
+	cstat = irb->scsw.cmd.cstat;
+	dstat = irb->scsw.cmd.dstat;
+	if (cstat != 0)
+		raw3215_next_io(raw, tty);
+	if (dstat & 0x01) { /* we got a unit exception */
+		dstat &= ~0x01;	 /* we can ignore it */
+	}
+	switch (dstat) {
+	case 0x80:
+		if (cstat != 0)
+			break;
+		/* Attention interrupt, someone hit the enter key */
+		raw3215_mk_read_req(raw);
+		raw3215_next_io(raw, tty);
+		break;
+	case 0x08:
+	case 0x0C:
+		/* Channel end interrupt. */
+		if ((raw = req->info) == NULL)
+			goto put_tty;	     /* That shouldn't happen ... */
+		if (req->type == RAW3215_READ) {
+			/* store residual count, then wait for device end */
+			req->residual = irb->scsw.cmd.count;
+		}
+		if (dstat == 0x08)
+			break;
+	case 0x04:
+		/* Device end interrupt. */
+		if ((raw = req->info) == NULL)
+			goto put_tty;	     /* That shouldn't happen ... */
+		if (req->type == RAW3215_READ && tty != NULL) {
+			unsigned int cchar;
+
+			count = 160 - req->residual;
+			EBCASC(raw->inbuf, count);
+			cchar = ctrlchar_handle(raw->inbuf, count, tty);
+			switch (cchar & CTRLCHAR_MASK) {
+			case CTRLCHAR_SYSRQ:
+				break;
+
+			case CTRLCHAR_CTRL:
+				tty_insert_flip_char(&raw->port, cchar,
+						TTY_NORMAL);
+				tty_flip_buffer_push(&raw->port);
+				break;
+
+			case CTRLCHAR_NONE:
+				if (count < 2 ||
+				    (strncmp(raw->inbuf+count-2, "\252n", 2) &&
+				     strncmp(raw->inbuf+count-2, "^n", 2)) ) {
+					/* add the auto \n */
+					raw->inbuf[count] = '\n';
+					count++;
+				} else
+					count -= 2;
+				tty_insert_flip_string(&raw->port, raw->inbuf,
+						count);
+				tty_flip_buffer_push(&raw->port);
+				break;
+			}
+		} else if (req->type == RAW3215_WRITE) {
+			raw->count -= req->len;
+			raw->written -= req->len;
+		}
+		raw->flags &= ~RAW3215_WORKING;
+		raw3215_free_req(req);
+		/* check for empty wait */
+		if (waitqueue_active(&raw->empty_wait) &&
+		    raw->queued_write == NULL &&
+		    raw->queued_read == NULL) {
+			wake_up_interruptible(&raw->empty_wait);
+		}
+		raw3215_next_io(raw, tty);
+		break;
+	default:
+		/* Strange interrupt, I'll do my best to clean up */
+		if (req != NULL && req->type != RAW3215_FREE) {
+			if (req->type == RAW3215_WRITE) {
+				raw->count -= req->len;
+				raw->written -= req->len;
+			}
+			raw->flags &= ~RAW3215_WORKING;
+			raw3215_free_req(req);
+		}
+		raw3215_next_io(raw, tty);
+	}
+put_tty:
+	tty_kref_put(tty);
+}
+
+/*
+ * Drop the oldest line from the output buffer.
+ */
+static void raw3215_drop_line(struct raw3215_info *raw)
+{
+	int ix;
+	char ch;
+
+	BUG_ON(raw->written != 0);
+	ix = (raw->head - raw->count) & (RAW3215_BUFFER_SIZE - 1);
+	while (raw->count > 0) {
+		ch = raw->buffer[ix];
+		ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1);
+		raw->count--;
+		if (ch == 0x15)
+			break;
+	}
+	raw->head = ix;
+}
+
+/*
+ * Wait until length bytes are available int the output buffer.
+ * Has to be called with the s390irq lock held. Can be called
+ * disabled.
+ */
+static void raw3215_make_room(struct raw3215_info *raw, unsigned int length)
+{
+	while (RAW3215_BUFFER_SIZE - raw->count < length) {
+		/* While console is frozen for suspend we have no other
+		 * choice but to drop message from the buffer to make
+		 * room for even more messages. */
+		if (tty_port_suspended(&raw->port)) {
+			raw3215_drop_line(raw);
+			continue;
+		}
+		/* there might be a request pending */
+		raw->flags |= RAW3215_FLUSHING;
+		raw3215_mk_write_req(raw);
+		raw3215_try_io(raw);
+		raw->flags &= ~RAW3215_FLUSHING;
+#ifdef CONFIG_TN3215_CONSOLE
+		ccw_device_wait_idle(raw->cdev);
+#endif
+		/* Enough room freed up ? */
+		if (RAW3215_BUFFER_SIZE - raw->count >= length)
+			break;
+		/* there might be another cpu waiting for the lock */
+		spin_unlock(get_ccwdev_lock(raw->cdev));
+		udelay(100);
+		spin_lock(get_ccwdev_lock(raw->cdev));
+	}
+}
+
+/*
+ * String write routine for 3215 devices
+ */
+static void raw3215_write(struct raw3215_info *raw, const char *str,
+			  unsigned int length)
+{
+	unsigned long flags;
+	int c, count;
+
+	while (length > 0) {
+		spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+		count = (length > RAW3215_BUFFER_SIZE) ?
+					     RAW3215_BUFFER_SIZE : length;
+		length -= count;
+
+		raw3215_make_room(raw, count);
+
+		/* copy string to output buffer and convert it to EBCDIC */
+		while (1) {
+			c = min_t(int, count,
+				  min(RAW3215_BUFFER_SIZE - raw->count,
+				      RAW3215_BUFFER_SIZE - raw->head));
+			if (c <= 0)
+				break;
+			memcpy(raw->buffer + raw->head, str, c);
+			ASCEBC(raw->buffer + raw->head, c);
+			raw->head = (raw->head + c) & (RAW3215_BUFFER_SIZE - 1);
+			raw->count += c;
+			raw->line_pos += c;
+			str += c;
+			count -= c;
+		}
+		if (!(raw->flags & RAW3215_WORKING)) {
+			raw3215_mk_write_req(raw);
+			/* start or queue request */
+			raw3215_try_io(raw);
+		}
+		spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	}
+}
+
+/*
+ * Put character routine for 3215 devices
+ */
+static void raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
+{
+	unsigned long flags;
+	unsigned int length, i;
+
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	if (ch == '\t') {
+		length = TAB_STOP_SIZE - (raw->line_pos%TAB_STOP_SIZE);
+		raw->line_pos += length;
+		ch = ' ';
+	} else if (ch == '\n') {
+		length = 1;
+		raw->line_pos = 0;
+	} else {
+		length = 1;
+		raw->line_pos++;
+	}
+	raw3215_make_room(raw, length);
+
+	for (i = 0; i < length; i++) {
+		raw->buffer[raw->head] = (char) _ascebc[(int) ch];
+		raw->head = (raw->head + 1) & (RAW3215_BUFFER_SIZE - 1);
+		raw->count++;
+	}
+	if (!(raw->flags & RAW3215_WORKING)) {
+		raw3215_mk_write_req(raw);
+		/* start or queue request */
+		raw3215_try_io(raw);
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+}
+
+/*
+ * Flush routine, it simply sets the flush flag and tries to start
+ * pending IO.
+ */
+static void raw3215_flush_buffer(struct raw3215_info *raw)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	if (raw->count > 0) {
+		raw->flags |= RAW3215_FLUSHING;
+		raw3215_try_io(raw);
+		raw->flags &= ~RAW3215_FLUSHING;
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+}
+
+/*
+ * Fire up a 3215 device.
+ */
+static int raw3215_startup(struct raw3215_info *raw)
+{
+	unsigned long flags;
+
+	if (tty_port_initialized(&raw->port))
+		return 0;
+	raw->line_pos = 0;
+	tty_port_set_initialized(&raw->port, 1);
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	raw3215_try_io(raw);
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+
+	return 0;
+}
+
+/*
+ * Shutdown a 3215 device.
+ */
+static void raw3215_shutdown(struct raw3215_info *raw)
+{
+	DECLARE_WAITQUEUE(wait, current);
+	unsigned long flags;
+
+	if (!tty_port_initialized(&raw->port) || (raw->flags & RAW3215_FIXED))
+		return;
+	/* Wait for outstanding requests, then free irq */
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	if ((raw->flags & RAW3215_WORKING) ||
+	    raw->queued_write != NULL ||
+	    raw->queued_read != NULL) {
+		add_wait_queue(&raw->empty_wait, &wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+		spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+		schedule();
+		spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+		remove_wait_queue(&raw->empty_wait, &wait);
+		set_current_state(TASK_RUNNING);
+		tty_port_set_initialized(&raw->port, 1);
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+}
+
+static struct raw3215_info *raw3215_alloc_info(void)
+{
+	struct raw3215_info *info;
+
+	info = kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA);
+	if (!info)
+		return NULL;
+
+	info->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
+	info->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA);
+	if (!info->buffer || !info->inbuf) {
+		kfree(info->inbuf);
+		kfree(info->buffer);
+		kfree(info);
+		return NULL;
+	}
+
+	timer_setup(&info->timer, raw3215_timeout, 0);
+	init_waitqueue_head(&info->empty_wait);
+	tasklet_init(&info->tlet, raw3215_wakeup, (unsigned long)info);
+	tty_port_init(&info->port);
+
+	return info;
+}
+
+static void raw3215_free_info(struct raw3215_info *raw)
+{
+	kfree(raw->inbuf);
+	kfree(raw->buffer);
+	tty_port_destroy(&raw->port);
+	kfree(raw);
+}
+
+static int raw3215_probe (struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+	int line;
+
+	/* Console is special. */
+	if (raw3215[0] && (raw3215[0] == dev_get_drvdata(&cdev->dev)))
+		return 0;
+
+	raw = raw3215_alloc_info();
+	if (raw == NULL)
+		return -ENOMEM;
+
+	raw->cdev = cdev;
+	dev_set_drvdata(&cdev->dev, raw);
+	cdev->handler = raw3215_irq;
+
+	spin_lock(&raw3215_device_lock);
+	for (line = 0; line < NR_3215; line++) {
+		if (!raw3215[line]) {
+			raw3215[line] = raw;
+			break;
+		}
+	}
+	spin_unlock(&raw3215_device_lock);
+	if (line == NR_3215) {
+		raw3215_free_info(raw);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void raw3215_remove (struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+	unsigned int line;
+
+	ccw_device_set_offline(cdev);
+	raw = dev_get_drvdata(&cdev->dev);
+	if (raw) {
+		spin_lock(&raw3215_device_lock);
+		for (line = 0; line < NR_3215; line++)
+			if (raw3215[line] == raw)
+				break;
+		raw3215[line] = NULL;
+		spin_unlock(&raw3215_device_lock);
+		dev_set_drvdata(&cdev->dev, NULL);
+		raw3215_free_info(raw);
+	}
+}
+
+static int raw3215_set_online (struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+
+	raw = dev_get_drvdata(&cdev->dev);
+	if (!raw)
+		return -ENODEV;
+
+	return raw3215_startup(raw);
+}
+
+static int raw3215_set_offline (struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+
+	raw = dev_get_drvdata(&cdev->dev);
+	if (!raw)
+		return -ENODEV;
+
+	raw3215_shutdown(raw);
+
+	return 0;
+}
+
+static int raw3215_pm_stop(struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	/* Empty the output buffer, then prevent new I/O. */
+	raw = dev_get_drvdata(&cdev->dev);
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
+	tty_port_set_suspended(&raw->port, 1);
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	return 0;
+}
+
+static int raw3215_pm_start(struct ccw_device *cdev)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	/* Allow I/O again and flush output buffer. */
+	raw = dev_get_drvdata(&cdev->dev);
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	tty_port_set_suspended(&raw->port, 0);
+	raw->flags |= RAW3215_FLUSHING;
+	raw3215_try_io(raw);
+	raw->flags &= ~RAW3215_FLUSHING;
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	return 0;
+}
+
+static struct ccw_device_id raw3215_id[] = {
+	{ CCW_DEVICE(0x3215, 0) },
+	{ /* end of list */ },
+};
+
+static struct ccw_driver raw3215_ccw_driver = {
+	.driver = {
+		.name	= "3215",
+		.owner	= THIS_MODULE,
+	},
+	.ids		= raw3215_id,
+	.probe		= &raw3215_probe,
+	.remove		= &raw3215_remove,
+	.set_online	= &raw3215_set_online,
+	.set_offline	= &raw3215_set_offline,
+	.freeze		= &raw3215_pm_stop,
+	.thaw		= &raw3215_pm_start,
+	.restore	= &raw3215_pm_start,
+	.int_class	= IRQIO_C15,
+};
+
+#ifdef CONFIG_TN3215_CONSOLE
+/*
+ * Write a string to the 3215 console
+ */
+static void con3215_write(struct console *co, const char *str,
+			  unsigned int count)
+{
+	struct raw3215_info *raw;
+	int i;
+
+	if (count <= 0)
+		return;
+	raw = raw3215[0];	/* console 3215 is the first one */
+	while (count > 0) {
+		for (i = 0; i < count; i++)
+			if (str[i] == '\t' || str[i] == '\n')
+				break;
+		raw3215_write(raw, str, i);
+		count -= i;
+		str += i;
+		if (count > 0) {
+			raw3215_putchar(raw, *str);
+			count--;
+			str++;
+		}
+	}
+}
+
+static struct tty_driver *con3215_device(struct console *c, int *index)
+{
+	*index = c->index;
+	return tty3215_driver;
+}
+
+/*
+ * panic() calls con3215_flush through a panic_notifier
+ * before the system enters a disabled, endless loop.
+ */
+static void con3215_flush(void)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	raw = raw3215[0];  /* console 3215 is the first one */
+	if (tty_port_suspended(&raw->port))
+		/* The console is still frozen for suspend. */
+		if (ccw_device_force_console(raw->cdev))
+			/* Forcing didn't work, no panic message .. */
+			return;
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+	raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+}
+
+static int con3215_notify(struct notifier_block *self,
+			  unsigned long event, void *data)
+{
+	con3215_flush();
+	return NOTIFY_OK;
+}
+
+static struct notifier_block on_panic_nb = {
+	.notifier_call = con3215_notify,
+	.priority = 0,
+};
+
+static struct notifier_block on_reboot_nb = {
+	.notifier_call = con3215_notify,
+	.priority = 0,
+};
+
+/*
+ *  The console structure for the 3215 console
+ */
+static struct console con3215 = {
+	.name	 = "ttyS",
+	.write	 = con3215_write,
+	.device	 = con3215_device,
+	.flags	 = CON_PRINTBUFFER,
+};
+
+/*
+ * 3215 console initialization code called from console_init().
+ */
+static int __init con3215_init(void)
+{
+	struct ccw_device *cdev;
+	struct raw3215_info *raw;
+	struct raw3215_req *req;
+	int i;
+
+	/* Check if 3215 is to be the console */
+	if (!CONSOLE_IS_3215)
+		return -ENODEV;
+
+	/* Set the console mode for VM */
+	if (MACHINE_IS_VM) {
+		cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
+		cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
+	}
+
+	/* allocate 3215 request structures */
+	raw3215_freelist = NULL;
+	spin_lock_init(&raw3215_freelist_lock);
+	for (i = 0; i < NR_3215_REQ; i++) {
+		req = kzalloc(sizeof(struct raw3215_req), GFP_KERNEL | GFP_DMA);
+		if (!req)
+			return -ENOMEM;
+		req->next = raw3215_freelist;
+		raw3215_freelist = req;
+	}
+
+	cdev = ccw_device_create_console(&raw3215_ccw_driver);
+	if (IS_ERR(cdev))
+		return -ENODEV;
+
+	raw3215[0] = raw = raw3215_alloc_info();
+	raw->cdev = cdev;
+	dev_set_drvdata(&cdev->dev, raw);
+	cdev->handler = raw3215_irq;
+
+	raw->flags |= RAW3215_FIXED;
+	if (ccw_device_enable_console(cdev)) {
+		ccw_device_destroy_console(cdev);
+		raw3215_free_info(raw);
+		raw3215[0] = NULL;
+		return -ENODEV;
+	}
+
+	/* Request the console irq */
+	if (raw3215_startup(raw) != 0) {
+		raw3215_free_info(raw);
+		raw3215[0] = NULL;
+		return -ENODEV;
+	}
+	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+	register_reboot_notifier(&on_reboot_nb);
+	register_console(&con3215);
+	return 0;
+}
+console_initcall(con3215_init);
+#endif
+
+static int tty3215_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+
+	raw = raw3215[tty->index];
+	if (raw == NULL)
+		return -ENODEV;
+
+	tty->driver_data = raw;
+
+	return tty_port_install(&raw->port, driver, tty);
+}
+
+/*
+ * tty3215_open
+ *
+ * This routine is called whenever a 3215 tty is opened.
+ */
+static int tty3215_open(struct tty_struct *tty, struct file * filp)
+{
+	struct raw3215_info *raw = tty->driver_data;
+	int retval;
+
+	tty_port_tty_set(&raw->port, tty);
+
+	raw->port.low_latency = 0; /* don't use bottom half for pushing chars */
+	/*
+	 * Start up 3215 device
+	 */
+	retval = raw3215_startup(raw);
+	if (retval)
+		return retval;
+
+	return 0;
+}
+
+/*
+ * tty3215_close()
+ *
+ * This routine is called when the 3215 tty is closed. We wait
+ * for the remaining request to be completed. Then we clean up.
+ */
+static void tty3215_close(struct tty_struct *tty, struct file * filp)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	if (raw == NULL || tty->count > 1)
+		return;
+	tty->closing = 1;
+	/* Shutdown the terminal */
+	raw3215_shutdown(raw);
+	tasklet_kill(&raw->tlet);
+	tty->closing = 0;
+	tty_port_tty_set(&raw->port, NULL);
+}
+
+/*
+ * Returns the amount of free space in the output buffer.
+ */
+static int tty3215_write_room(struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+
+	/* Subtract TAB_STOP_SIZE to allow for a tab, 8 <<< 64K */
+	if ((RAW3215_BUFFER_SIZE - raw->count - TAB_STOP_SIZE) >= 0)
+		return RAW3215_BUFFER_SIZE - raw->count - TAB_STOP_SIZE;
+	else
+		return 0;
+}
+
+/*
+ * String write routine for 3215 ttys
+ */
+static int tty3215_write(struct tty_struct * tty,
+			 const unsigned char *buf, int count)
+{
+	struct raw3215_info *raw;
+	int i, written;
+
+	if (!tty)
+		return 0;
+	raw = (struct raw3215_info *) tty->driver_data;
+	written = count;
+	while (count > 0) {
+		for (i = 0; i < count; i++)
+			if (buf[i] == '\t' || buf[i] == '\n')
+				break;
+		raw3215_write(raw, buf, i);
+		count -= i;
+		buf += i;
+		if (count > 0) {
+			raw3215_putchar(raw, *buf);
+			count--;
+			buf++;
+		}
+	}
+	return written;
+}
+
+/*
+ * Put character routine for 3215 ttys
+ */
+static int tty3215_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	struct raw3215_info *raw;
+
+	if (!tty)
+		return 0;
+	raw = (struct raw3215_info *) tty->driver_data;
+	raw3215_putchar(raw, ch);
+	return 1;
+}
+
+static void tty3215_flush_chars(struct tty_struct *tty)
+{
+}
+
+/*
+ * Returns the number of characters in the output buffer
+ */
+static int tty3215_chars_in_buffer(struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	return raw->count;
+}
+
+static void tty3215_flush_buffer(struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	raw3215_flush_buffer(raw);
+	tty_wakeup(tty);
+}
+
+/*
+ * Disable reading from a 3215 tty
+ */
+static void tty3215_throttle(struct tty_struct * tty)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	raw->flags |= RAW3215_THROTTLED;
+}
+
+/*
+ * Enable reading from a 3215 tty
+ */
+static void tty3215_unthrottle(struct tty_struct * tty)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	if (raw->flags & RAW3215_THROTTLED) {
+		spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+		raw->flags &= ~RAW3215_THROTTLED;
+		raw3215_try_io(raw);
+		spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	}
+}
+
+/*
+ * Disable writing to a 3215 tty
+ */
+static void tty3215_stop(struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	raw->flags |= RAW3215_STOPPED;
+}
+
+/*
+ * Enable writing to a 3215 tty
+ */
+static void tty3215_start(struct tty_struct *tty)
+{
+	struct raw3215_info *raw;
+	unsigned long flags;
+
+	raw = (struct raw3215_info *) tty->driver_data;
+	if (raw->flags & RAW3215_STOPPED) {
+		spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+		raw->flags &= ~RAW3215_STOPPED;
+		raw3215_try_io(raw);
+		spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+	}
+}
+
+static const struct tty_operations tty3215_ops = {
+	.install = tty3215_install,
+	.open = tty3215_open,
+	.close = tty3215_close,
+	.write = tty3215_write,
+	.put_char = tty3215_put_char,
+	.flush_chars = tty3215_flush_chars,
+	.write_room = tty3215_write_room,
+	.chars_in_buffer = tty3215_chars_in_buffer,
+	.flush_buffer = tty3215_flush_buffer,
+	.throttle = tty3215_throttle,
+	.unthrottle = tty3215_unthrottle,
+	.stop = tty3215_stop,
+	.start = tty3215_start,
+};
+
+/*
+ * 3215 tty registration code called from tty_init().
+ * Most kernel services (incl. kmalloc) are available at this poimt.
+ */
+static int __init tty3215_init(void)
+{
+	struct tty_driver *driver;
+	int ret;
+
+	if (!CONSOLE_IS_3215)
+		return 0;
+
+	driver = alloc_tty_driver(NR_3215);
+	if (!driver)
+		return -ENOMEM;
+
+	ret = ccw_driver_register(&raw3215_ccw_driver);
+	if (ret) {
+		put_tty_driver(driver);
+		return ret;
+	}
+	/*
+	 * Initialize the tty_driver structure
+	 * Entries in tty3215_driver that are NOT initialized:
+	 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
+	 */
+
+	driver->driver_name = "tty3215";
+	driver->name = "ttyS";
+	driver->major = TTY_MAJOR;
+	driver->minor_start = 64;
+	driver->type = TTY_DRIVER_TYPE_SYSTEM;
+	driver->subtype = SYSTEM_TYPE_TTY;
+	driver->init_termios = tty_std_termios;
+	driver->init_termios.c_iflag = IGNBRK | IGNPAR;
+	driver->init_termios.c_oflag = ONLCR;
+	driver->init_termios.c_lflag = ISIG;
+	driver->flags = TTY_DRIVER_REAL_RAW;
+	tty_set_operations(driver, &tty3215_ops);
+	ret = tty_register_driver(driver);
+	if (ret) {
+		put_tty_driver(driver);
+		return ret;
+	}
+	tty3215_driver = driver;
+	return 0;
+}
+device_initcall(tty3215_init);
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
new file mode 100644
index 0000000..fd2146b
--- /dev/null
+++ b/drivers/s390/char/con3270.c
@@ -0,0 +1,649 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IBM/3270 Driver - console view.
+ *
+ * Author(s):
+ *   Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *   Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *     Copyright IBM Corp. 2003, 2009
+ */
+
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/reboot.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/cpcmd.h>
+#include <asm/ebcdic.h>
+
+#include "raw3270.h"
+#include "tty3270.h"
+#include "ctrlchar.h"
+
+#define CON3270_OUTPUT_BUFFER_SIZE 1024
+#define CON3270_STRING_PAGES 4
+
+static struct raw3270_fn con3270_fn;
+
+static bool auto_update = true;
+module_param(auto_update, bool, 0);
+
+/*
+ * Main 3270 console view data structure.
+ */
+struct con3270 {
+	struct raw3270_view view;
+	struct list_head freemem;	/* list of free memory for strings. */
+
+	/* Output stuff. */
+	struct list_head lines;		/* list of lines. */
+	struct list_head update;	/* list of lines to update. */
+	int line_nr;			/* line number for next update. */
+	int nr_lines;			/* # lines in list. */
+	int nr_up;			/* # lines up in history. */
+	unsigned long update_flags;	/* Update indication bits. */
+	struct string *cline;		/* current output line. */
+	struct string *status;		/* last line of display. */
+	struct raw3270_request *write;	/* single write request. */
+	struct timer_list timer;
+
+	/* Input stuff. */
+	struct string *input;		/* input string for read request. */
+	struct raw3270_request *read;	/* single read request. */
+	struct raw3270_request *kreset;	/* single keyboard reset request. */
+	struct tasklet_struct readlet;	/* tasklet to issue read request. */
+};
+
+static struct con3270 *condev;
+
+/* con3270->update_flags. See con3270_update for details. */
+#define CON_UPDATE_ERASE	1	/* Use EWRITEA instead of WRITE. */
+#define CON_UPDATE_LIST		2	/* Update lines in tty3270->update. */
+#define CON_UPDATE_STATUS	4	/* Update status line. */
+#define CON_UPDATE_ALL		8	/* Recreate screen. */
+
+static void con3270_update(struct timer_list *);
+
+/*
+ * Setup timeout for a device. On timeout trigger an update.
+ */
+static void con3270_set_timer(struct con3270 *cp, int expires)
+{
+	if (expires == 0)
+		del_timer(&cp->timer);
+	else
+		mod_timer(&cp->timer, jiffies + expires);
+}
+
+/*
+ * The status line is the last line of the screen. It shows the string
+ * "console view" in the lower left corner and "Running"/"More..."/"Holding"
+ * in the lower right corner of the screen.
+ */
+static void
+con3270_update_status(struct con3270 *cp)
+{
+	char *str;
+
+	str = (cp->nr_up != 0) ? "History" : "Running";
+	memcpy(cp->status->string + 24, str, 7);
+	codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
+	cp->update_flags |= CON_UPDATE_STATUS;
+}
+
+static void
+con3270_create_status(struct con3270 *cp)
+{
+	static const unsigned char blueprint[] =
+		{ TO_SBA, 0, 0, TO_SF,TF_LOG,TO_SA,TAT_COLOR, TAC_GREEN,
+		  'c','o','n','s','o','l','e',' ','v','i','e','w',
+		  TO_RA,0,0,0,'R','u','n','n','i','n','g',TO_SF,TF_LOG };
+
+	cp->status = alloc_string(&cp->freemem, sizeof(blueprint));
+	/* Copy blueprint to status line */
+	memcpy(cp->status->string, blueprint, sizeof(blueprint));
+	/* Set TO_RA addresses. */
+	raw3270_buffer_address(cp->view.dev, cp->status->string + 1,
+			       cp->view.cols * (cp->view.rows - 1));
+	raw3270_buffer_address(cp->view.dev, cp->status->string + 21,
+			       cp->view.cols * cp->view.rows - 8);
+	/* Convert strings to ebcdic. */
+	codepage_convert(cp->view.ascebc, cp->status->string + 8, 12);
+	codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
+}
+
+/*
+ * Set output offsets to 3270 datastream fragment of a console string.
+ */
+static void
+con3270_update_string(struct con3270 *cp, struct string *s, int nr)
+{
+	if (s->len < 4) {
+		/* This indicates a bug, but printing a warning would
+		 * cause a deadlock. */
+		return;
+	}
+	if (s->string[s->len - 4] != TO_RA)
+		return;
+	raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
+			       cp->view.cols * (nr + 1));
+}
+
+/*
+ * Rebuild update list to print all lines.
+ */
+static void
+con3270_rebuild_update(struct con3270 *cp)
+{
+	struct string *s, *n;
+	int nr;
+
+	/* 
+	 * Throw away update list and create a new one,
+	 * containing all lines that will fit on the screen.
+	 */
+	list_for_each_entry_safe(s, n, &cp->update, update)
+		list_del_init(&s->update);
+	nr = cp->view.rows - 2 + cp->nr_up;
+	list_for_each_entry_reverse(s, &cp->lines, list) {
+		if (nr < cp->view.rows - 1)
+			list_add(&s->update, &cp->update);
+		if (--nr < 0)
+			break;
+	}
+	cp->line_nr = 0;
+	cp->update_flags |= CON_UPDATE_LIST;
+}
+
+/*
+ * Alloc string for size bytes. Free strings from history if necessary.
+ */
+static struct string *
+con3270_alloc_string(struct con3270 *cp, size_t size)
+{
+	struct string *s, *n;
+
+	s = alloc_string(&cp->freemem, size);
+	if (s)
+		return s;
+	list_for_each_entry_safe(s, n, &cp->lines, list) {
+		list_del(&s->list);
+		if (!list_empty(&s->update))
+			list_del(&s->update);
+		cp->nr_lines--;
+		if (free_string(&cp->freemem, s) >= size)
+			break;
+	}
+	s = alloc_string(&cp->freemem, size);
+	BUG_ON(!s);
+	if (cp->nr_up != 0 && cp->nr_up + cp->view.rows > cp->nr_lines) {
+		cp->nr_up = cp->nr_lines - cp->view.rows + 1;
+		con3270_rebuild_update(cp);
+		con3270_update_status(cp);
+	}
+	return s;
+}
+
+/*
+ * Write completion callback.
+ */
+static void
+con3270_write_callback(struct raw3270_request *rq, void *data)
+{
+	raw3270_request_reset(rq);
+	xchg(&((struct con3270 *) rq->view)->write, rq);
+}
+
+/*
+ * Update console display.
+ */
+static void
+con3270_update(struct timer_list *t)
+{
+	struct con3270 *cp = from_timer(cp, t, timer);
+	struct raw3270_request *wrq;
+	char wcc, prolog[6];
+	unsigned long flags;
+	unsigned long updated;
+	struct string *s, *n;
+	int rc;
+
+	if (!auto_update && !raw3270_view_active(&cp->view))
+		return;
+	if (cp->view.dev)
+		raw3270_activate_view(&cp->view);
+
+	wrq = xchg(&cp->write, 0);
+	if (!wrq) {
+		con3270_set_timer(cp, 1);
+		return;
+	}
+
+	spin_lock_irqsave(&cp->view.lock, flags);
+	updated = 0;
+	if (cp->update_flags & CON_UPDATE_ALL) {
+		con3270_rebuild_update(cp);
+		con3270_update_status(cp);
+		cp->update_flags = CON_UPDATE_ERASE | CON_UPDATE_LIST |
+			CON_UPDATE_STATUS;
+	}
+	if (cp->update_flags & CON_UPDATE_ERASE) {
+		/* Use erase write alternate to initialize display. */
+		raw3270_request_set_cmd(wrq, TC_EWRITEA);
+		updated |= CON_UPDATE_ERASE;
+	} else
+		raw3270_request_set_cmd(wrq, TC_WRITE);
+
+	wcc = TW_NONE;
+	raw3270_request_add_data(wrq, &wcc, 1);
+
+	/*
+	 * Update status line.
+	 */
+	if (cp->update_flags & CON_UPDATE_STATUS)
+		if (raw3270_request_add_data(wrq, cp->status->string,
+					     cp->status->len) == 0)
+			updated |= CON_UPDATE_STATUS;
+
+	if (cp->update_flags & CON_UPDATE_LIST) {
+		prolog[0] = TO_SBA;
+		prolog[3] = TO_SA;
+		prolog[4] = TAT_COLOR;
+		prolog[5] = TAC_TURQ;
+		raw3270_buffer_address(cp->view.dev, prolog + 1,
+				       cp->view.cols * cp->line_nr);
+		raw3270_request_add_data(wrq, prolog, 6);
+		/* Write strings in the update list to the screen. */
+		list_for_each_entry_safe(s, n, &cp->update, update) {
+			if (s != cp->cline)
+				con3270_update_string(cp, s, cp->line_nr);
+			if (raw3270_request_add_data(wrq, s->string,
+						     s->len) != 0)
+				break;
+			list_del_init(&s->update);
+			if (s != cp->cline)
+				cp->line_nr++;
+		}
+		if (list_empty(&cp->update))
+			updated |= CON_UPDATE_LIST;
+	}
+	wrq->callback = con3270_write_callback;
+	rc = raw3270_start(&cp->view, wrq);
+	if (rc == 0) {
+		cp->update_flags &= ~updated;
+		if (cp->update_flags)
+			con3270_set_timer(cp, 1);
+	} else {
+		raw3270_request_reset(wrq);
+		xchg(&cp->write, wrq);
+	}
+	spin_unlock_irqrestore(&cp->view.lock, flags);
+}
+
+/*
+ * Read tasklet.
+ */
+static void
+con3270_read_tasklet(struct raw3270_request *rrq)
+{
+	static char kreset_data = TW_KR;
+	struct con3270 *cp;
+	unsigned long flags;
+	int nr_up, deactivate;
+
+	cp = (struct con3270 *) rrq->view;
+	spin_lock_irqsave(&cp->view.lock, flags);
+	nr_up = cp->nr_up;
+	deactivate = 0;
+	/* Check aid byte. */
+	switch (cp->input->string[0]) {
+	case 0x7d:	/* enter: jump to bottom. */
+		nr_up = 0;
+		break;
+	case 0xf3:	/* PF3: deactivate the console view. */
+		deactivate = 1;
+		break;
+	case 0x6d:	/* clear: start from scratch. */
+		cp->update_flags = CON_UPDATE_ALL;
+		con3270_set_timer(cp, 1);
+		break;
+	case 0xf7:	/* PF7: do a page up in the console log. */
+		nr_up += cp->view.rows - 2;
+		if (nr_up + cp->view.rows - 1 > cp->nr_lines) {
+			nr_up = cp->nr_lines - cp->view.rows + 1;
+			if (nr_up < 0)
+				nr_up = 0;
+		}
+		break;
+	case 0xf8:	/* PF8: do a page down in the console log. */
+		nr_up -= cp->view.rows - 2;
+		if (nr_up < 0)
+			nr_up = 0;
+		break;
+	}
+	if (nr_up != cp->nr_up) {
+		cp->nr_up = nr_up;
+		con3270_rebuild_update(cp);
+		con3270_update_status(cp);
+		con3270_set_timer(cp, 1);
+	}
+	spin_unlock_irqrestore(&cp->view.lock, flags);
+
+	/* Start keyboard reset command. */
+	raw3270_request_reset(cp->kreset);
+	raw3270_request_set_cmd(cp->kreset, TC_WRITE);
+	raw3270_request_add_data(cp->kreset, &kreset_data, 1);
+	raw3270_start(&cp->view, cp->kreset);
+
+	if (deactivate)
+		raw3270_deactivate_view(&cp->view);
+
+	raw3270_request_reset(rrq);
+	xchg(&cp->read, rrq);
+	raw3270_put_view(&cp->view);
+}
+
+/*
+ * Read request completion callback.
+ */
+static void
+con3270_read_callback(struct raw3270_request *rq, void *data)
+{
+	raw3270_get_view(rq->view);
+	/* Schedule tasklet to pass input to tty. */
+	tasklet_schedule(&((struct con3270 *) rq->view)->readlet);
+}
+
+/*
+ * Issue a read request. Called only from interrupt function.
+ */
+static void
+con3270_issue_read(struct con3270 *cp)
+{
+	struct raw3270_request *rrq;
+	int rc;
+
+	rrq = xchg(&cp->read, 0);
+	if (!rrq)
+		/* Read already scheduled. */
+		return;
+	rrq->callback = con3270_read_callback;
+	rrq->callback_data = cp;
+	raw3270_request_set_cmd(rrq, TC_READMOD);
+	raw3270_request_set_data(rrq, cp->input->string, cp->input->len);
+	/* Issue the read modified request. */
+	rc = raw3270_start_irq(&cp->view, rrq);
+	if (rc)
+		raw3270_request_reset(rrq);
+}
+
+/*
+ * Switch to the console view.
+ */
+static int
+con3270_activate(struct raw3270_view *view)
+{
+	struct con3270 *cp;
+
+	cp = (struct con3270 *) view;
+	cp->update_flags = CON_UPDATE_ALL;
+	con3270_set_timer(cp, 1);
+	return 0;
+}
+
+static void
+con3270_deactivate(struct raw3270_view *view)
+{
+	struct con3270 *cp;
+
+	cp = (struct con3270 *) view;
+	del_timer(&cp->timer);
+}
+
+static void
+con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
+{
+	/* Handle ATTN. Schedule tasklet to read aid. */
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
+		con3270_issue_read(cp);
+
+	if (rq) {
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+			rq->rc = -EIO;
+		else
+			/* Normal end. Copy residual count. */
+			rq->rescnt = irb->scsw.cmd.count;
+	} else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+		/* Interrupt without an outstanding request -> update all */
+		cp->update_flags = CON_UPDATE_ALL;
+		con3270_set_timer(cp, 1);
+	}
+}
+
+/* Console view to a 3270 device. */
+static struct raw3270_fn con3270_fn = {
+	.activate = con3270_activate,
+	.deactivate = con3270_deactivate,
+	.intv = (void *) con3270_irq
+};
+
+static inline void
+con3270_cline_add(struct con3270 *cp)
+{
+	if (!list_empty(&cp->cline->list))
+		/* Already added. */
+		return;
+	list_add_tail(&cp->cline->list, &cp->lines);
+	cp->nr_lines++;
+	con3270_rebuild_update(cp);
+}
+
+static inline void
+con3270_cline_insert(struct con3270 *cp, unsigned char c)
+{
+	cp->cline->string[cp->cline->len++] = 
+		cp->view.ascebc[(c < ' ') ? ' ' : c];
+	if (list_empty(&cp->cline->update)) {
+		list_add_tail(&cp->cline->update, &cp->update);
+		cp->update_flags |= CON_UPDATE_LIST;
+	}
+}
+
+static inline void
+con3270_cline_end(struct con3270 *cp)
+{
+	struct string *s;
+	unsigned int size;
+
+	/* Copy cline. */
+	size = (cp->cline->len < cp->view.cols - 5) ?
+		cp->cline->len + 4 : cp->view.cols;
+	s = con3270_alloc_string(cp, size);
+	memcpy(s->string, cp->cline->string, cp->cline->len);
+	if (cp->cline->len < cp->view.cols - 5) {
+		s->string[s->len - 4] = TO_RA;
+		s->string[s->len - 1] = 0;
+	} else {
+		while (--size >= cp->cline->len)
+			s->string[size] = cp->view.ascebc[' '];
+	}
+	/* Replace cline with allocated line s and reset cline. */
+	list_add(&s->list, &cp->cline->list);
+	list_del_init(&cp->cline->list);
+	if (!list_empty(&cp->cline->update)) {
+		list_add(&s->update, &cp->cline->update);
+		list_del_init(&cp->cline->update);
+	}
+	cp->cline->len = 0;
+}
+
+/*
+ * Write a string to the 3270 console
+ */
+static void
+con3270_write(struct console *co, const char *str, unsigned int count)
+{
+	struct con3270 *cp;
+	unsigned long flags;
+	unsigned char c;
+
+	cp = condev;
+	spin_lock_irqsave(&cp->view.lock, flags);
+	while (count-- > 0) {
+		c = *str++;
+		if (cp->cline->len == 0)
+			con3270_cline_add(cp);
+		if (c != '\n')
+			con3270_cline_insert(cp, c);
+		if (c == '\n' || cp->cline->len >= cp->view.cols)
+			con3270_cline_end(cp);
+	}
+	/* Setup timer to output current console buffer after 1/10 second */
+	cp->nr_up = 0;
+	if (cp->view.dev && !timer_pending(&cp->timer))
+		con3270_set_timer(cp, HZ/10);
+	spin_unlock_irqrestore(&cp->view.lock,flags);
+}
+
+static struct tty_driver *
+con3270_device(struct console *c, int *index)
+{
+	*index = c->index;
+	return tty3270_driver;
+}
+
+/*
+ * Wait for end of write request.
+ */
+static void
+con3270_wait_write(struct con3270 *cp)
+{
+	while (!cp->write) {
+		raw3270_wait_cons_dev(cp->view.dev);
+		barrier();
+	}
+}
+
+/*
+ * panic() calls con3270_flush through a panic_notifier
+ * before the system enters a disabled, endless loop.
+ */
+static void
+con3270_flush(void)
+{
+	struct con3270 *cp;
+	unsigned long flags;
+
+	cp = condev;
+	if (!cp->view.dev)
+		return;
+	raw3270_pm_unfreeze(&cp->view);
+	raw3270_activate_view(&cp->view);
+	spin_lock_irqsave(&cp->view.lock, flags);
+	con3270_wait_write(cp);
+	cp->nr_up = 0;
+	con3270_rebuild_update(cp);
+	con3270_update_status(cp);
+	while (cp->update_flags != 0) {
+		spin_unlock_irqrestore(&cp->view.lock, flags);
+		con3270_update(&cp->timer);
+		spin_lock_irqsave(&cp->view.lock, flags);
+		con3270_wait_write(cp);
+	}
+	spin_unlock_irqrestore(&cp->view.lock, flags);
+}
+
+static int con3270_notify(struct notifier_block *self,
+			  unsigned long event, void *data)
+{
+	con3270_flush();
+	return NOTIFY_OK;
+}
+
+static struct notifier_block on_panic_nb = {
+	.notifier_call = con3270_notify,
+	.priority = 0,
+};
+
+static struct notifier_block on_reboot_nb = {
+	.notifier_call = con3270_notify,
+	.priority = 0,
+};
+
+/*
+ *  The console structure for the 3270 console
+ */
+static struct console con3270 = {
+	.name	 = "tty3270",
+	.write	 = con3270_write,
+	.device	 = con3270_device,
+	.flags	 = CON_PRINTBUFFER,
+};
+
+/*
+ * 3270 console initialization code called from console_init().
+ */
+static int __init
+con3270_init(void)
+{
+	struct raw3270 *rp;
+	void *cbuf;
+	int i;
+
+	/* Check if 3270 is to be the console */
+	if (!CONSOLE_IS_3270)
+		return -ENODEV;
+
+	/* Set the console mode for VM */
+	if (MACHINE_IS_VM) {
+		cpcmd("TERM CONMODE 3270", NULL, 0, NULL);
+		cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
+	}
+
+	rp = raw3270_setup_console();
+	if (IS_ERR(rp))
+		return PTR_ERR(rp);
+
+	condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA);
+	if (!condev)
+		return -ENOMEM;
+	condev->view.dev = rp;
+
+	condev->read = raw3270_request_alloc(0);
+	condev->read->callback = con3270_read_callback;
+	condev->read->callback_data = condev;
+	condev->write = raw3270_request_alloc(CON3270_OUTPUT_BUFFER_SIZE);
+	condev->kreset = raw3270_request_alloc(1);
+
+	INIT_LIST_HEAD(&condev->lines);
+	INIT_LIST_HEAD(&condev->update);
+	timer_setup(&condev->timer, con3270_update, 0);
+	tasklet_init(&condev->readlet, 
+		     (void (*)(unsigned long)) con3270_read_tasklet,
+		     (unsigned long) condev->read);
+
+	raw3270_add_view(&condev->view, &con3270_fn, 1);
+
+	INIT_LIST_HEAD(&condev->freemem);
+	for (i = 0; i < CON3270_STRING_PAGES; i++) {
+		cbuf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		add_string_memory(&condev->freemem, cbuf, PAGE_SIZE);
+	}
+	condev->cline = alloc_string(&condev->freemem, condev->view.cols);
+	condev->cline->len = 0;
+	con3270_create_status(condev);
+	condev->input = alloc_string(&condev->freemem, 80);
+	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+	register_reboot_notifier(&on_reboot_nb);
+	register_console(&con3270);
+	return 0;
+}
+
+console_initcall(con3270_init);
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
new file mode 100644
index 0000000..e1686a6
--- /dev/null
+++ b/drivers/s390/char/ctrlchar.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Unified handling of special chars.
+ *
+ *    Copyright IBM Corp. 2001
+ *    Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
+ *
+ */
+
+#include <linux/stddef.h>
+#include <asm/errno.h>
+#include <linux/sysrq.h>
+#include <linux/ctype.h>
+
+#include "ctrlchar.h"
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static struct sysrq_work ctrlchar_sysrq;
+
+static void
+ctrlchar_handle_sysrq(struct work_struct *work)
+{
+	struct sysrq_work *sysrq = container_of(work, struct sysrq_work, work);
+
+	handle_sysrq(sysrq->key);
+}
+
+void schedule_sysrq_work(struct sysrq_work *sw)
+{
+	INIT_WORK(&sw->work, ctrlchar_handle_sysrq);
+	schedule_work(&sw->work);
+}
+#endif
+
+
+/**
+ * Check for special chars at start of input.
+ *
+ * @param buf Console input buffer.
+ * @param len Length of valid data in buffer.
+ * @param tty The tty struct for this console.
+ * @return CTRLCHAR_NONE, if nothing matched,
+ *         CTRLCHAR_SYSRQ, if sysrq was encountered
+ *         otherwise char to be inserted logically or'ed
+ *         with CTRLCHAR_CTRL
+ */
+unsigned int
+ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
+{
+	if ((len < 2) || (len > 3))
+		return CTRLCHAR_NONE;
+
+	/* hat is 0xb1 in codepage 037 (US etc.) and thus */
+	/* converted to 0x5e in ascii ('^') */
+	if ((buf[0] != '^') && (buf[0] != '\252'))
+		return CTRLCHAR_NONE;
+
+#ifdef CONFIG_MAGIC_SYSRQ
+	/* racy */
+	if (len == 3 && buf[1] == '-') {
+		ctrlchar_sysrq.key = buf[2];
+		schedule_sysrq_work(&ctrlchar_sysrq);
+		return CTRLCHAR_SYSRQ;
+	}
+#endif
+
+	if (len != 2)
+		return CTRLCHAR_NONE;
+
+	switch (tolower(buf[1])) {
+	case 'c':
+		return INTR_CHAR(tty) | CTRLCHAR_CTRL;
+	case 'd':
+		return EOF_CHAR(tty)  | CTRLCHAR_CTRL;
+	case 'z':
+		return SUSP_CHAR(tty) | CTRLCHAR_CTRL;
+	}
+	return CTRLCHAR_NONE;
+}
diff --git a/drivers/s390/char/ctrlchar.h b/drivers/s390/char/ctrlchar.h
new file mode 100644
index 0000000..e52afa3
--- /dev/null
+++ b/drivers/s390/char/ctrlchar.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Unified handling of special chars.
+ *
+ *    Copyright IBM Corp. 2001
+ *    Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
+ *
+ */
+
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/workqueue.h>
+
+extern unsigned int
+ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty);
+
+
+#define CTRLCHAR_NONE  (1 << 8)
+#define CTRLCHAR_CTRL  (2 << 8)
+#define CTRLCHAR_SYSRQ (3 << 8)
+
+#define CTRLCHAR_MASK (~0xffu)
+
+
+#ifdef CONFIG_MAGIC_SYSRQ
+struct sysrq_work {
+	int key;
+	struct work_struct work;
+};
+
+void schedule_sysrq_work(struct sysrq_work *sw);
+#endif
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c
new file mode 100644
index 0000000..60845d4
--- /dev/null
+++ b/drivers/s390/char/defkeymap.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Do not edit this file! It was automatically generated by   */
+/*    loadkeys --mktable defkeymap.map > defkeymap.c          */
+
+#include <linux/types.h>
+#include <linux/keyboard.h>
+#include <linux/kd.h>
+#include <linux/kbd_kern.h>
+#include <linux/kbd_diacr.h>
+
+#include "keyboard.h"
+
+u_short ebc_plain_map[NR_KEYS] = {
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,	0xf000,
+	0xf020,	0xf000,	0xf0e2,	0xf0e4,	0xf0e0,	0xf0e1,	0xf0e3,	0xf0e5,
+	0xf0e7,	0xf0f1,	0xf0a2,	0xf02e,	0xf03c,	0xf028,	0xf02b,	0xf07c,
+	0xf026,	0xf0e9,	0xf0e2,	0xf0eb,	0xf0e8,	0xf0ed,	0xf0ee,	0xf0ef,
+	0xf0ec,	0xf0df,	0xf021,	0xf024,	0xf02a,	0xf029,	0xf03b,	0xf0ac,
+	0xf02d,	0xf02f,	0xf0c2,	0xf0c4,	0xf0c0,	0xf0c1,	0xf0c3,	0xf0c5,
+	0xf0c7,	0xf0d1,	0xf0a6,	0xf02c,	0xf025,	0xf05f,	0xf03e,	0xf03f,
+	0xf0f8,	0xf0c9,	0xf0ca,	0xf0cb,	0xf0c8,	0xf0cd,	0xf0ce,	0xf0cf,
+	0xf0cc,	0xf060,	0xf03a,	0xf023,	0xf040,	0xf027,	0xf03d,	0xf022,
+};
+
+static u_short shift_map[NR_KEYS] = {
+	0xf0d8,	0xf061,	0xf062,	0xf063,	0xf064,	0xf065,	0xf066,	0xf067,
+	0xf068,	0xf069,	0xf0ab,	0xf0bb,	0xf0f0,	0xf0fd,	0xf0fe,	0xf0b1,
+	0xf0b0,	0xf06a,	0xf06b,	0xf06c,	0xf06d,	0xf06e,	0xf06f,	0xf070,
+	0xf071,	0xf072,	0xf000,	0xf000,	0xf0e6,	0xf0b8,	0xf0c6,	0xf0a4,
+	0xf0b5,	0xf07e,	0xf073,	0xf074,	0xf075,	0xf076,	0xf077,	0xf078,
+	0xf079,	0xf07a,	0xf0a1,	0xf0bf,	0xf0d0,	0xf0dd,	0xf0de,	0xf0ae,
+	0xf402,	0xf0a3,	0xf0a5,	0xf0b7,	0xf0a9,	0xf0a7,	0xf0b6,	0xf0bc,
+	0xf0bd,	0xf0be,	0xf05b,	0xf05d,	0xf000,	0xf0a8,	0xf0b4,	0xf0d7,
+	0xf07b,	0xf041,	0xf042,	0xf043,	0xf044,	0xf045,	0xf046,	0xf047,
+	0xf048,	0xf049,	0xf000,	0xf0f4,	0xf0f6,	0xf0f2,	0xf0f3,	0xf0f5,
+	0xf07d,	0xf04a,	0xf04b,	0xf04c,	0xf04d,	0xf04e,	0xf04f,	0xf050,
+	0xf051,	0xf052,	0xf0b9,	0xf0fb,	0xf0fc,	0xf0f9,	0xf0fa,	0xf0ff,
+	0xf05c,	0xf0f7,	0xf053,	0xf054,	0xf055,	0xf056,	0xf057,	0xf058,
+	0xf059,	0xf05a,	0xf0b2,	0xf0d4,	0xf0d6,	0xf0d2,	0xf0d3,	0xf0d5,
+	0xf030,	0xf031,	0xf032,	0xf033,	0xf034,	0xf035,	0xf036,	0xf037,
+	0xf038,	0xf039,	0xf0b3,	0xf0db,	0xf0dc,	0xf0d9,	0xf0da,	0xf000,
+};
+
+static u_short ctrl_map[NR_KEYS] = {
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf11f,	0xf120,	0xf121,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf01a,	0xf003,	0xf212,	0xf004,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf109,	0xf10a,	0xf206,	0xf00a,	0xf200,	0xf200,
+};
+
+static u_short shift_ctrl_map[NR_KEYS] = {
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf10c,	0xf10d,	0xf10e,	0xf10f,	0xf110,	0xf111,	0xf112,
+	0xf113,	0xf11e,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+	0xf200,	0xf100,	0xf101,	0xf211,	0xf103,	0xf104,	0xf105,	0xf20b,
+	0xf20a,	0xf108,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,	0xf200,
+};
+
+ushort *ebc_key_maps[MAX_NR_KEYMAPS] = {
+	ebc_plain_map, shift_map, NULL, NULL,
+	ctrl_map, shift_ctrl_map, NULL,
+};
+
+unsigned int ebc_keymap_count = 4;
+
+
+/*
+ * Philosophy: most people do not define more strings, but they who do
+ * often want quite a lot of string space. So, we statically allocate
+ * the default and allocate dynamically in chunks of 512 bytes.
+ */
+
+char ebc_func_buf[] = {
+	'\033', '[', '[', 'A', 0, 
+	'\033', '[', '[', 'B', 0, 
+	'\033', '[', '[', 'C', 0, 
+	'\033', '[', '[', 'D', 0, 
+	'\033', '[', '[', 'E', 0, 
+	'\033', '[', '1', '7', '~', 0, 
+	'\033', '[', '1', '8', '~', 0, 
+	'\033', '[', '1', '9', '~', 0, 
+	'\033', '[', '2', '0', '~', 0, 
+	'\033', '[', '2', '1', '~', 0, 
+	'\033', '[', '2', '3', '~', 0, 
+	'\033', '[', '2', '4', '~', 0, 
+	'\033', '[', '2', '5', '~', 0, 
+	'\033', '[', '2', '6', '~', 0, 
+	'\033', '[', '2', '8', '~', 0, 
+	'\033', '[', '2', '9', '~', 0, 
+	'\033', '[', '3', '1', '~', 0, 
+	'\033', '[', '3', '2', '~', 0, 
+	'\033', '[', '3', '3', '~', 0, 
+	'\033', '[', '3', '4', '~', 0, 
+};
+
+
+char *ebc_funcbufptr = ebc_func_buf;
+int ebc_funcbufsize = sizeof(ebc_func_buf);
+int ebc_funcbufleft;		/* space left */
+
+char *ebc_func_table[MAX_NR_FUNC] = {
+	ebc_func_buf + 0,
+	ebc_func_buf + 5,
+	ebc_func_buf + 10,
+	ebc_func_buf + 15,
+	ebc_func_buf + 20,
+	ebc_func_buf + 25,
+	ebc_func_buf + 31,
+	ebc_func_buf + 37,
+	ebc_func_buf + 43,
+	ebc_func_buf + 49,
+	ebc_func_buf + 55,
+	ebc_func_buf + 61,
+	ebc_func_buf + 67,
+	ebc_func_buf + 73,
+	ebc_func_buf + 79,
+	ebc_func_buf + 85,
+	ebc_func_buf + 91,
+	ebc_func_buf + 97,
+	ebc_func_buf + 103,
+	ebc_func_buf + 109,
+	NULL,
+};
+
+struct kbdiacruc ebc_accent_table[MAX_DIACR] = {
+	{'^', 'c', 0003},	{'^', 'd', 0004},
+	{'^', 'z', 0032},	{'^', 0012, 0000},
+};
+
+unsigned int ebc_accent_table_size = 4;
diff --git a/drivers/s390/char/defkeymap.map b/drivers/s390/char/defkeymap.map
new file mode 100644
index 0000000..f4c0956
--- /dev/null
+++ b/drivers/s390/char/defkeymap.map
@@ -0,0 +1,192 @@
+# SPDX-License-Identifier: GPL-2.0
+# Default keymap for 3270 (ebcdic codepage 037).
+keymaps 0-1,4-5
+
+keycode   0 = nul		Oslash
+keycode   1 = nul		a
+keycode   2 = nul		b
+keycode   3 = nul		c
+keycode   4 = nul		d
+keycode   5 = nul		e
+keycode   6 = nul		f
+keycode   7 = nul		g
+keycode   8 = nul		h
+keycode   9 = nul		i
+keycode  10 = nul		guillemotleft
+keycode  11 = nul		guillemotright
+keycode  12 = nul		eth
+keycode  13 = nul		yacute
+keycode  14 = nul		thorn
+keycode  15 = nul		plusminus
+keycode  16 = nul		degree
+keycode  17 = nul		j
+keycode  18 = nul		k
+keycode  19 = nul		l
+keycode  20 = nul		m
+keycode  21 = nul		n
+keycode  22 = nul		o
+keycode  23 = nul		p
+keycode  24 = nul		q
+keycode  25 = nul		r
+keycode  26 = nul		nul
+keycode  27 = nul		nul
+keycode  28 = nul		ae
+keycode  29 = nul		cedilla
+keycode  30 = nul		AE
+keycode  31 = nul		currency
+keycode  32 = nul		mu
+keycode  33 = nul		tilde
+keycode  34 = nul		s
+keycode  35 = nul		t
+keycode  36 = nul		u
+keycode  37 = nul		v
+keycode  38 = nul		w
+keycode  39 = nul		x
+keycode  40 = nul		y
+keycode  41 = nul		z
+keycode  42 = nul		exclamdown
+keycode  43 = nul		questiondown
+keycode  44 = nul		ETH
+keycode  45 = nul		Yacute
+keycode  46 = nul		THORN
+keycode  47 = nul		registered
+keycode  48 = nul		dead_circumflex
+keycode  49 = nul		sterling
+keycode  50 = nul		yen
+keycode  51 = nul		periodcentered
+keycode  52 = nul		copyright
+keycode  53 = nul		section
+keycode  54 = nul		paragraph
+keycode  55 = nul		onequarter
+keycode  56 = nul		onehalf
+keycode  57 = nul		threequarters
+keycode  58 = nul		bracketleft
+keycode  59 = nul		bracketright
+keycode  60 = nul		nul
+keycode  61 = nul		diaeresis
+keycode  62 = nul		acute
+keycode  63 = nul		multiply
+keycode  64 = space		braceleft
+keycode  65 = nul		A
+keycode  66 = acircumflex	B
+keycode  67 = adiaeresis	C
+keycode  68 = agrave		D
+keycode  69 = aacute		E
+keycode  70 = atilde		F
+keycode  71 = aring		G
+keycode  72 = ccedilla		H
+keycode  73 = ntilde		I
+keycode  74 = cent		nul
+keycode  75 = period		ocircumflex
+keycode  76 = less		odiaeresis
+keycode  77 = parenleft		ograve
+keycode  78 = plus		oacute
+keycode  79 = bar		otilde
+keycode  80 = ampersand		braceright
+keycode  81 = eacute		J
+keycode  82 = acircumflex	K
+keycode  83 = ediaeresis	L
+keycode  84 = egrave		M
+keycode  85 = iacute		N
+keycode  86 = icircumflex	O
+keycode  87 = idiaeresis	P
+keycode  88 = igrave		Q
+keycode  89 = ssharp		R
+keycode  90 = exclam		onesuperior
+keycode  91 = dollar		ucircumflex
+keycode  92 = asterisk		udiaeresis
+keycode  93 = parenright	ugrave
+keycode  94 = semicolon		uacute
+keycode  95 = notsign		ydiaeresis
+keycode  96 = minus		backslash
+keycode  97 = slash		division
+keycode  98 = Acircumflex	S
+keycode  99 = Adiaeresis	T
+keycode 100 = Agrave		U
+keycode 101 = Aacute		V
+keycode 102 = Atilde		W
+keycode 103 = Aring		X
+keycode 104 = Ccedilla		Y
+keycode 105 = Ntilde		Z
+keycode 106 = brokenbar		twosuperior
+keycode 107 = comma		Ocircumflex
+keycode 108 = percent		Odiaeresis
+keycode 109 = underscore	Ograve
+keycode 110 = greater		Oacute
+keycode 111 = question		Otilde
+keycode 112 = oslash		zero
+keycode 113 = Eacute		one
+keycode 114 = Ecircumflex	two
+keycode 115 = Ediaeresis	three
+keycode 116 = Egrave		four
+keycode 117 = Iacute		five
+keycode 118 = Icircumflex	six
+keycode 119 = Idiaeresis	seven
+keycode 120 = Igrave		eight
+keycode 121 = grave		nine
+keycode 122 = colon		threesuperior
+keycode 123 = numbersign	Ucircumflex
+keycode 124 = at		Udiaeresis
+keycode 125 = apostrophe	Ugrave
+keycode 126 = equal		Uacute
+keycode 127 = quotedbl		nul
+
+# AID keys
+control keycode  74 = F22
+control keycode  75 = F23
+control keycode  76 = F24
+control keycode 107 = Control_z		# PA3
+control keycode 108 = Control_c		# PA1
+control keycode 109 = KeyboardSignal	# Clear
+control keycode 110 = Control_d		# PA2
+control keycode 122 = F10
+control keycode 123 = F11		# F11
+control keycode 124 = Last_Console	# F12
+control keycode 125 = Linefeed
+shift control keycode  65 = F13
+shift control keycode  66 = F14
+shift control keycode  67 = F15
+shift control keycode  68 = F16
+shift control keycode  69 = F17
+shift control keycode  70 = F18
+shift control keycode  71 = F19
+shift control keycode  72 = F20
+shift control keycode  73 = F21
+shift control keycode 113 = F1
+shift control keycode 114 = F2
+shift control keycode 115 = Incr_Console
+shift control keycode 116 = F4
+shift control keycode 117 = F5
+shift control keycode 118 = F6
+shift control keycode 119 = Scroll_Backward
+shift control keycode 120 = Scroll_Forward
+shift control keycode 121 = F9
+
+string F1 = "\033[[A"
+string F2 = "\033[[B"
+string F3 = "\033[[C"
+string F4 = "\033[[D"
+string F5 = "\033[[E"
+string F6 = "\033[17~"
+string F7 = "\033[18~"
+string F8 = "\033[19~"
+string F9 = "\033[20~"
+string F10 = "\033[21~"
+string F11 = "\033[23~"
+string F12 = "\033[24~"
+string F13 = "\033[25~"
+string F14 = "\033[26~"
+string F15 = "\033[28~"
+string F16 = "\033[29~"
+string F17 = "\033[31~"
+string F18 = "\033[32~"
+string F19 = "\033[33~"
+string F20 = "\033[34~"
+# string F21 ??
+# string F22 ??
+# string F23 ??
+# string F24 ??
+compose '^' 'c' to Control_c
+compose '^' 'd' to Control_d
+compose '^' 'z' to Control_z
+compose '^' '\012' to nul
diff --git a/drivers/s390/char/diag_ftp.c b/drivers/s390/char/diag_ftp.c
new file mode 100644
index 0000000..6bf1058
--- /dev/null
+++ b/drivers/s390/char/diag_ftp.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    DIAGNOSE X'2C4' instruction based HMC FTP services, useable on z/VM
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ *
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/irq.h>
+#include <linux/wait.h>
+#include <linux/string.h>
+#include <asm/ctl_reg.h>
+#include <asm/diag.h>
+
+#include "hmcdrv_ftp.h"
+#include "diag_ftp.h"
+
+/* DIAGNOSE X'2C4' return codes in Ry */
+#define DIAG_FTP_RET_OK	0 /* HMC FTP started successfully */
+#define DIAG_FTP_RET_EBUSY	4 /* HMC FTP service currently busy */
+#define DIAG_FTP_RET_EIO	8 /* HMC FTP service I/O error */
+/* and an artificial extension */
+#define DIAG_FTP_RET_EPERM	2 /* HMC FTP service privilege error */
+
+/* FTP service status codes (after INTR at guest real location 133) */
+#define DIAG_FTP_STAT_OK	0U /* request completed successfully */
+#define DIAG_FTP_STAT_PGCC	4U /* program check condition */
+#define DIAG_FTP_STAT_PGIOE	8U /* paging I/O error */
+#define DIAG_FTP_STAT_TIMEOUT	12U /* timeout */
+#define DIAG_FTP_STAT_EBASE	16U /* base of error codes from SCLP */
+#define DIAG_FTP_STAT_LDFAIL	(DIAG_FTP_STAT_EBASE + 1U) /* failed */
+#define DIAG_FTP_STAT_LDNPERM	(DIAG_FTP_STAT_EBASE + 2U) /* not allowed */
+#define DIAG_FTP_STAT_LDRUNS	(DIAG_FTP_STAT_EBASE + 3U) /* runs */
+#define DIAG_FTP_STAT_LDNRUNS	(DIAG_FTP_STAT_EBASE + 4U) /* not runs */
+
+/**
+ * struct diag_ftp_ldfpl - load file FTP parameter list (LDFPL)
+ * @bufaddr: real buffer address (at 4k boundary)
+ * @buflen: length of buffer
+ * @offset: dir/file offset
+ * @intparm: interruption parameter (unused)
+ * @transferred: bytes transferred
+ * @fsize: file size, filled on GET
+ * @failaddr: failing address
+ * @spare: padding
+ * @fident: file name - ASCII
+ */
+struct diag_ftp_ldfpl {
+	u64 bufaddr;
+	u64 buflen;
+	u64 offset;
+	u64 intparm;
+	u64 transferred;
+	u64 fsize;
+	u64 failaddr;
+	u64 spare;
+	u8 fident[HMCDRV_FTP_FIDENT_MAX];
+} __packed;
+
+static DECLARE_COMPLETION(diag_ftp_rx_complete);
+static int diag_ftp_subcode;
+
+/**
+ * diag_ftp_handler() - FTP services IRQ handler
+ * @extirq: external interrupt (sub-) code
+ * @param32: 32-bit interruption parameter from &struct diag_ftp_ldfpl
+ * @param64: unused (for 64-bit interrupt parameters)
+ */
+static void diag_ftp_handler(struct ext_code extirq,
+			     unsigned int param32,
+			     unsigned long param64)
+{
+	if ((extirq.subcode >> 8) != 8)
+		return; /* not a FTP services sub-code */
+
+	inc_irq_stat(IRQEXT_FTP);
+	diag_ftp_subcode = extirq.subcode & 0xffU;
+	complete(&diag_ftp_rx_complete);
+}
+
+/**
+ * diag_ftp_2c4() - DIAGNOSE X'2C4' service call
+ * @fpl: pointer to prepared LDFPL
+ * @cmd: FTP command to be executed
+ *
+ * Performs a DIAGNOSE X'2C4' call with (input/output) FTP parameter list
+ * @fpl and FTP function code @cmd. In case of an error the function does
+ * nothing and returns an (negative) error code.
+ *
+ * Notes:
+ * 1. This function only initiates a transfer, so the caller must wait
+ *    for completion (asynchronous execution).
+ * 2. The FTP parameter list @fpl must be aligned to a double-word boundary.
+ * 3. fpl->bufaddr must be a real address, 4k aligned
+ */
+static int diag_ftp_2c4(struct diag_ftp_ldfpl *fpl,
+			enum hmcdrv_ftp_cmdid cmd)
+{
+	int rc;
+
+	diag_stat_inc(DIAG_STAT_X2C4);
+	asm volatile(
+		"	diag	%[addr],%[cmd],0x2c4\n"
+		"0:	j	2f\n"
+		"1:	la	%[rc],%[err]\n"
+		"2:\n"
+		EX_TABLE(0b, 1b)
+		: [rc] "=d" (rc), "+m" (*fpl)
+		: [cmd] "0" (cmd), [addr] "d" (virt_to_phys(fpl)),
+		  [err] "i" (DIAG_FTP_RET_EPERM)
+		: "cc");
+
+	switch (rc) {
+	case DIAG_FTP_RET_OK:
+		return 0;
+	case DIAG_FTP_RET_EBUSY:
+		return -EBUSY;
+	case DIAG_FTP_RET_EPERM:
+		return -EPERM;
+	case DIAG_FTP_RET_EIO:
+	default:
+		return -EIO;
+	}
+}
+
+/**
+ * diag_ftp_cmd() - executes a DIAG X'2C4' FTP command, targeting a HMC
+ * @ftp: pointer to FTP command specification
+ * @fsize: return of file size (or NULL if undesirable)
+ *
+ * Attention: Notice that this function is not reentrant - so the caller
+ * must ensure locking.
+ *
+ * Return: number of bytes read/written or a (negative) error code
+ */
+ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize)
+{
+	struct diag_ftp_ldfpl *ldfpl;
+	ssize_t len;
+#ifdef DEBUG
+	unsigned long start_jiffies;
+
+	pr_debug("starting DIAG X'2C4' on '%s', requesting %zd bytes\n",
+		 ftp->fname, ftp->len);
+	start_jiffies = jiffies;
+#endif
+	init_completion(&diag_ftp_rx_complete);
+
+	ldfpl = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!ldfpl) {
+		len = -ENOMEM;
+		goto out;
+	}
+
+	len = strlcpy(ldfpl->fident, ftp->fname, sizeof(ldfpl->fident));
+	if (len >= HMCDRV_FTP_FIDENT_MAX) {
+		len = -EINVAL;
+		goto out_free;
+	}
+
+	ldfpl->transferred = 0;
+	ldfpl->fsize = 0;
+	ldfpl->offset = ftp->ofs;
+	ldfpl->buflen = ftp->len;
+	ldfpl->bufaddr = virt_to_phys(ftp->buf);
+
+	len = diag_ftp_2c4(ldfpl, ftp->id);
+	if (len)
+		goto out_free;
+
+	/*
+	 * There is no way to cancel the running diag X'2C4', the code
+	 * needs to wait unconditionally until the transfer is complete.
+	 */
+	wait_for_completion(&diag_ftp_rx_complete);
+
+#ifdef DEBUG
+	pr_debug("completed DIAG X'2C4' after %lu ms\n",
+		 (jiffies - start_jiffies) * 1000 / HZ);
+	pr_debug("status of DIAG X'2C4' is %u, with %lld/%lld bytes\n",
+		 diag_ftp_subcode, ldfpl->transferred, ldfpl->fsize);
+#endif
+
+	switch (diag_ftp_subcode) {
+	case DIAG_FTP_STAT_OK: /* success */
+		len = ldfpl->transferred;
+		if (fsize)
+			*fsize = ldfpl->fsize;
+		break;
+	case DIAG_FTP_STAT_LDNPERM:
+		len = -EPERM;
+		break;
+	case DIAG_FTP_STAT_LDRUNS:
+		len = -EBUSY;
+		break;
+	case DIAG_FTP_STAT_LDFAIL:
+		len = -ENOENT; /* no such file or media */
+		break;
+	default:
+		len = -EIO;
+		break;
+	}
+
+out_free:
+	free_page((unsigned long) ldfpl);
+out:
+	return len;
+}
+
+/**
+ * diag_ftp_startup() - startup of FTP services, when running on z/VM
+ *
+ * Return: 0 on success, else an (negative) error code
+ */
+int diag_ftp_startup(void)
+{
+	int rc;
+
+	rc = register_external_irq(EXT_IRQ_CP_SERVICE, diag_ftp_handler);
+	if (rc)
+		return rc;
+
+	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
+	return 0;
+}
+
+/**
+ * diag_ftp_shutdown() - shutdown of FTP services, when running on z/VM
+ */
+void diag_ftp_shutdown(void)
+{
+	irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
+	unregister_external_irq(EXT_IRQ_CP_SERVICE, diag_ftp_handler);
+}
diff --git a/drivers/s390/char/diag_ftp.h b/drivers/s390/char/diag_ftp.h
new file mode 100644
index 0000000..5d036ba
--- /dev/null
+++ b/drivers/s390/char/diag_ftp.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    DIAGNOSE X'2C4' instruction based SE/HMC FTP Services, useable on z/VM
+ *
+ *    Notice that all functions exported here are not reentrant.
+ *    So usage should be exclusive, ensured by the caller (e.g. using a
+ *    mutex).
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef __DIAG_FTP_H__
+#define __DIAG_FTP_H__
+
+#include "hmcdrv_ftp.h"
+
+int diag_ftp_startup(void);
+void diag_ftp_shutdown(void);
+ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize);
+
+#endif	 /* __DIAG_FTP_H__ */
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
new file mode 100644
index 0000000..16a4e85
--- /dev/null
+++ b/drivers/s390/char/fs3270.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IBM/3270 Driver - fullscreen driver.
+ *
+ * Author(s):
+ *   Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *   Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *     Copyright IBM Corp. 2003, 2009
+ */
+
+#include <linux/bootmem.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/compat.h>
+#include <linux/sched/signal.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/ebcdic.h>
+#include <asm/idals.h>
+
+#include "raw3270.h"
+#include "ctrlchar.h"
+
+static struct raw3270_fn fs3270_fn;
+
+struct fs3270 {
+	struct raw3270_view view;
+	struct pid *fs_pid;		/* Pid of controlling program. */
+	int read_command;		/* ccw command to use for reads. */
+	int write_command;		/* ccw command to use for writes. */
+	int attention;			/* Got attention. */
+	int active;			/* Fullscreen view is active. */
+	struct raw3270_request *init;	/* single init request. */
+	wait_queue_head_t wait;		/* Init & attention wait queue. */
+	struct idal_buffer *rdbuf;	/* full-screen-deactivate buffer */
+	size_t rdbuf_size;		/* size of data returned by RDBUF */
+};
+
+static DEFINE_MUTEX(fs3270_mutex);
+
+static void
+fs3270_wake_up(struct raw3270_request *rq, void *data)
+{
+	wake_up((wait_queue_head_t *) data);
+}
+
+static inline int
+fs3270_working(struct fs3270 *fp)
+{
+	/*
+	 * The fullscreen view is in working order if the view
+	 * has been activated AND the initial request is finished.
+	 */
+	return fp->active && raw3270_request_final(fp->init);
+}
+
+static int
+fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
+{
+	struct fs3270 *fp;
+	int rc;
+
+	fp = (struct fs3270 *) view;
+	rq->callback = fs3270_wake_up;
+	rq->callback_data = &fp->wait;
+
+	do {
+		if (!fs3270_working(fp)) {
+			/* Fullscreen view isn't ready yet. */
+			rc = wait_event_interruptible(fp->wait,
+						      fs3270_working(fp));
+			if (rc != 0)
+				break;
+		}
+		rc = raw3270_start(view, rq);
+		if (rc == 0) {
+			/* Started successfully. Now wait for completion. */
+			wait_event(fp->wait, raw3270_request_final(rq));
+		}
+	} while (rc == -EACCES);
+	return rc;
+}
+
+/*
+ * Switch to the fullscreen view.
+ */
+static void
+fs3270_reset_callback(struct raw3270_request *rq, void *data)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) rq->view;
+	raw3270_request_reset(rq);
+	wake_up(&fp->wait);
+}
+
+static void
+fs3270_restore_callback(struct raw3270_request *rq, void *data)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) rq->view;
+	if (rq->rc != 0 || rq->rescnt != 0) {
+		if (fp->fs_pid)
+			kill_pid(fp->fs_pid, SIGHUP, 1);
+	}
+	fp->rdbuf_size = 0;
+	raw3270_request_reset(rq);
+	wake_up(&fp->wait);
+}
+
+static int
+fs3270_activate(struct raw3270_view *view)
+{
+	struct fs3270 *fp;
+	char *cp;
+	int rc;
+
+	fp = (struct fs3270 *) view;
+
+	/* If an old init command is still running just return. */
+	if (!raw3270_request_final(fp->init))
+		return 0;
+
+	if (fp->rdbuf_size == 0) {
+		/* No saved buffer. Just clear the screen. */
+		raw3270_request_set_cmd(fp->init, TC_EWRITEA);
+		fp->init->callback = fs3270_reset_callback;
+	} else {
+		/* Restore fullscreen buffer saved by fs3270_deactivate. */
+		raw3270_request_set_cmd(fp->init, TC_EWRITEA);
+		raw3270_request_set_idal(fp->init, fp->rdbuf);
+		fp->init->ccw.count = fp->rdbuf_size;
+		cp = fp->rdbuf->data[0];
+		cp[0] = TW_KR;
+		cp[1] = TO_SBA;
+		cp[2] = cp[6];
+		cp[3] = cp[7];
+		cp[4] = TO_IC;
+		cp[5] = TO_SBA;
+		cp[6] = 0x40;
+		cp[7] = 0x40;
+		fp->init->rescnt = 0;
+		fp->init->callback = fs3270_restore_callback;
+	}
+	rc = fp->init->rc = raw3270_start_locked(view, fp->init);
+	if (rc)
+		fp->init->callback(fp->init, NULL);
+	else
+		fp->active = 1;
+	return rc;
+}
+
+/*
+ * Shutdown fullscreen view.
+ */
+static void
+fs3270_save_callback(struct raw3270_request *rq, void *data)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) rq->view;
+
+	/* Correct idal buffer element 0 address. */
+	fp->rdbuf->data[0] -= 5;
+	fp->rdbuf->size += 5;
+
+	/*
+	 * If the rdbuf command failed or the idal buffer is
+	 * to small for the amount of data returned by the
+	 * rdbuf command, then we have no choice but to send
+	 * a SIGHUP to the application.
+	 */
+	if (rq->rc != 0 || rq->rescnt == 0) {
+		if (fp->fs_pid)
+			kill_pid(fp->fs_pid, SIGHUP, 1);
+		fp->rdbuf_size = 0;
+	} else
+		fp->rdbuf_size = fp->rdbuf->size - rq->rescnt;
+	raw3270_request_reset(rq);
+	wake_up(&fp->wait);
+}
+
+static void
+fs3270_deactivate(struct raw3270_view *view)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) view;
+	fp->active = 0;
+
+	/* If an old init command is still running just return. */
+	if (!raw3270_request_final(fp->init))
+		return;
+
+	/* Prepare read-buffer request. */
+	raw3270_request_set_cmd(fp->init, TC_RDBUF);
+	/*
+	 * Hackish: skip first 5 bytes of the idal buffer to make
+	 * room for the TW_KR/TO_SBA/<address>/<address>/TO_IC sequence
+	 * in the activation command.
+	 */
+	fp->rdbuf->data[0] += 5;
+	fp->rdbuf->size -= 5;
+	raw3270_request_set_idal(fp->init, fp->rdbuf);
+	fp->init->rescnt = 0;
+	fp->init->callback = fs3270_save_callback;
+
+	/* Start I/O to read in the 3270 buffer. */
+	fp->init->rc = raw3270_start_locked(view, fp->init);
+	if (fp->init->rc)
+		fp->init->callback(fp->init, NULL);
+}
+
+static void
+fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
+{
+	/* Handle ATTN. Set indication and wake waiters for attention. */
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		fp->attention = 1;
+		wake_up(&fp->wait);
+	}
+
+	if (rq) {
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+			rq->rc = -EIO;
+		else
+			/* Normal end. Copy residual count. */
+			rq->rescnt = irb->scsw.cmd.count;
+	}
+}
+
+/*
+ * Process reads from fullscreen 3270.
+ */
+static ssize_t
+fs3270_read(struct file *filp, char __user *data, size_t count, loff_t *off)
+{
+	struct fs3270 *fp;
+	struct raw3270_request *rq;
+	struct idal_buffer *ib;
+	ssize_t rc;
+	
+	if (count == 0 || count > 65535)
+		return -EINVAL;
+	fp = filp->private_data;
+	if (!fp)
+		return -ENODEV;
+	ib = idal_buffer_alloc(count, 0);
+	if (IS_ERR(ib))
+		return -ENOMEM;
+	rq = raw3270_request_alloc(0);
+	if (!IS_ERR(rq)) {
+		if (fp->read_command == 0 && fp->write_command != 0)
+			fp->read_command = 6;
+		raw3270_request_set_cmd(rq, fp->read_command ? : 2);
+		raw3270_request_set_idal(rq, ib);
+		rc = wait_event_interruptible(fp->wait, fp->attention);
+		fp->attention = 0;
+		if (rc == 0) {
+			rc = fs3270_do_io(&fp->view, rq);
+			if (rc == 0) {
+				count -= rq->rescnt;
+				if (idal_buffer_to_user(ib, data, count) != 0)
+					rc = -EFAULT;
+				else
+					rc = count;
+
+			}
+		}
+		raw3270_request_free(rq);
+	} else
+		rc = PTR_ERR(rq);
+	idal_buffer_free(ib);
+	return rc;
+}
+
+/*
+ * Process writes to fullscreen 3270.
+ */
+static ssize_t
+fs3270_write(struct file *filp, const char __user *data, size_t count, loff_t *off)
+{
+	struct fs3270 *fp;
+	struct raw3270_request *rq;
+	struct idal_buffer *ib;
+	int write_command;
+	ssize_t rc;
+
+	fp = filp->private_data;
+	if (!fp)
+		return -ENODEV;
+	ib = idal_buffer_alloc(count, 0);
+	if (IS_ERR(ib))
+		return -ENOMEM;
+	rq = raw3270_request_alloc(0);
+	if (!IS_ERR(rq)) {
+		if (idal_buffer_from_user(ib, data, count) == 0) {
+			write_command = fp->write_command ? : 1;
+			if (write_command == 5)
+				write_command = 13;
+			raw3270_request_set_cmd(rq, write_command);
+			raw3270_request_set_idal(rq, ib);
+			rc = fs3270_do_io(&fp->view, rq);
+			if (rc == 0)
+				rc = count - rq->rescnt;
+		} else
+			rc = -EFAULT;
+		raw3270_request_free(rq);
+	} else
+		rc = PTR_ERR(rq);
+	idal_buffer_free(ib);
+	return rc;
+}
+
+/*
+ * process ioctl commands for the tube driver
+ */
+static long
+fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	char __user *argp;
+	struct fs3270 *fp;
+	struct raw3270_iocb iocb;
+	int rc;
+
+	fp = filp->private_data;
+	if (!fp)
+		return -ENODEV;
+	if (is_compat_task())
+		argp = compat_ptr(arg);
+	else
+		argp = (char __user *)arg;
+	rc = 0;
+	mutex_lock(&fs3270_mutex);
+	switch (cmd) {
+	case TUBICMD:
+		fp->read_command = arg;
+		break;
+	case TUBOCMD:
+		fp->write_command = arg;
+		break;
+	case TUBGETI:
+		rc = put_user(fp->read_command, argp);
+		break;
+	case TUBGETO:
+		rc = put_user(fp->write_command, argp);
+		break;
+	case TUBGETMOD:
+		iocb.model = fp->view.model;
+		iocb.line_cnt = fp->view.rows;
+		iocb.col_cnt = fp->view.cols;
+		iocb.pf_cnt = 24;
+		iocb.re_cnt = 20;
+		iocb.map = 0;
+		if (copy_to_user(argp, &iocb, sizeof(struct raw3270_iocb)))
+			rc = -EFAULT;
+		break;
+	}
+	mutex_unlock(&fs3270_mutex);
+	return rc;
+}
+
+/*
+ * Allocate fs3270 structure.
+ */
+static struct fs3270 *
+fs3270_alloc_view(void)
+{
+	struct fs3270 *fp;
+
+	fp = kzalloc(sizeof(struct fs3270),GFP_KERNEL);
+	if (!fp)
+		return ERR_PTR(-ENOMEM);
+	fp->init = raw3270_request_alloc(0);
+	if (IS_ERR(fp->init)) {
+		kfree(fp);
+		return ERR_PTR(-ENOMEM);
+	}
+	return fp;
+}
+
+/*
+ * Free fs3270 structure.
+ */
+static void
+fs3270_free_view(struct raw3270_view *view)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) view;
+	if (fp->rdbuf)
+		idal_buffer_free(fp->rdbuf);
+	raw3270_request_free(((struct fs3270 *) view)->init);
+	kfree(view);
+}
+
+/*
+ * Unlink fs3270 data structure from filp.
+ */
+static void
+fs3270_release(struct raw3270_view *view)
+{
+	struct fs3270 *fp;
+
+	fp = (struct fs3270 *) view;
+	if (fp->fs_pid)
+		kill_pid(fp->fs_pid, SIGHUP, 1);
+}
+
+/* View to a 3270 device. Can be console, tty or fullscreen. */
+static struct raw3270_fn fs3270_fn = {
+	.activate = fs3270_activate,
+	.deactivate = fs3270_deactivate,
+	.intv = (void *) fs3270_irq,
+	.release = fs3270_release,
+	.free = fs3270_free_view
+};
+
+/*
+ * This routine is called whenever a 3270 fullscreen device is opened.
+ */
+static int
+fs3270_open(struct inode *inode, struct file *filp)
+{
+	struct fs3270 *fp;
+	struct idal_buffer *ib;
+	int minor, rc = 0;
+
+	if (imajor(file_inode(filp)) != IBM_FS3270_MAJOR)
+		return -ENODEV;
+	minor = iminor(file_inode(filp));
+	/* Check for minor 0 multiplexer. */
+	if (minor == 0) {
+		struct tty_struct *tty = get_current_tty();
+		if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) {
+			tty_kref_put(tty);
+			return -ENODEV;
+		}
+		minor = tty->index;
+		tty_kref_put(tty);
+	}
+	mutex_lock(&fs3270_mutex);
+	/* Check if some other program is already using fullscreen mode. */
+	fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
+	if (!IS_ERR(fp)) {
+		raw3270_put_view(&fp->view);
+		rc = -EBUSY;
+		goto out;
+	}
+	/* Allocate fullscreen view structure. */
+	fp = fs3270_alloc_view();
+	if (IS_ERR(fp)) {
+		rc = PTR_ERR(fp);
+		goto out;
+	}
+
+	init_waitqueue_head(&fp->wait);
+	fp->fs_pid = get_pid(task_pid(current));
+	rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
+	if (rc) {
+		fs3270_free_view(&fp->view);
+		goto out;
+	}
+
+	/* Allocate idal-buffer. */
+	ib = idal_buffer_alloc(2*fp->view.rows*fp->view.cols + 5, 0);
+	if (IS_ERR(ib)) {
+		raw3270_put_view(&fp->view);
+		raw3270_del_view(&fp->view);
+		rc = PTR_ERR(ib);
+		goto out;
+	}
+	fp->rdbuf = ib;
+
+	rc = raw3270_activate_view(&fp->view);
+	if (rc) {
+		raw3270_put_view(&fp->view);
+		raw3270_del_view(&fp->view);
+		goto out;
+	}
+	nonseekable_open(inode, filp);
+	filp->private_data = fp;
+out:
+	mutex_unlock(&fs3270_mutex);
+	return rc;
+}
+
+/*
+ * This routine is called when the 3270 tty is closed. We wait
+ * for the remaining request to be completed. Then we clean up.
+ */
+static int
+fs3270_close(struct inode *inode, struct file *filp)
+{
+	struct fs3270 *fp;
+
+	fp = filp->private_data;
+	filp->private_data = NULL;
+	if (fp) {
+		put_pid(fp->fs_pid);
+		fp->fs_pid = NULL;
+		raw3270_reset(&fp->view);
+		raw3270_put_view(&fp->view);
+		raw3270_del_view(&fp->view);
+	}
+	return 0;
+}
+
+static const struct file_operations fs3270_fops = {
+	.owner		 = THIS_MODULE,		/* owner */
+	.read		 = fs3270_read,		/* read */
+	.write		 = fs3270_write,	/* write */
+	.unlocked_ioctl	 = fs3270_ioctl,	/* ioctl */
+	.compat_ioctl	 = fs3270_ioctl,	/* ioctl */
+	.open		 = fs3270_open,		/* open */
+	.release	 = fs3270_close,	/* release */
+	.llseek		= no_llseek,
+};
+
+static void fs3270_create_cb(int minor)
+{
+	__register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops);
+	device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
+		      NULL, "3270/tub%d", minor);
+}
+
+static void fs3270_destroy_cb(int minor)
+{
+	device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor));
+	__unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
+}
+
+static struct raw3270_notifier fs3270_notifier =
+{
+	.create = fs3270_create_cb,
+	.destroy = fs3270_destroy_cb,
+};
+
+/*
+ * 3270 fullscreen driver initialization.
+ */
+static int __init
+fs3270_init(void)
+{
+	int rc;
+
+	rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops);
+	if (rc)
+		return rc;
+	device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0),
+		      NULL, "3270/tub");
+	raw3270_register_notifier(&fs3270_notifier);
+	return 0;
+}
+
+static void __exit
+fs3270_exit(void)
+{
+	raw3270_unregister_notifier(&fs3270_notifier);
+	device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0));
+	__unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
+}
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(IBM_FS3270_MAJOR);
+
+module_init(fs3270_init);
+module_exit(fs3270_exit);
diff --git a/drivers/s390/char/hmcdrv_cache.c b/drivers/s390/char/hmcdrv_cache.c
new file mode 100644
index 0000000..1f5bdb2
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_cache.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    SE/HMC Drive (Read) Cache Functions
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ *
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/jiffies.h>
+
+#include "hmcdrv_ftp.h"
+#include "hmcdrv_cache.h"
+
+#define HMCDRV_CACHE_TIMEOUT		30 /* aging timeout in seconds */
+
+/**
+ * struct hmcdrv_cache_entry - file cache (only used on read/dir)
+ * @id: FTP command ID
+ * @content: kernel-space buffer, 4k aligned
+ * @len: size of @content cache (0 if caching disabled)
+ * @ofs: start of content within file (-1 if no cached content)
+ * @fname: file name
+ * @fsize: file size
+ * @timeout: cache timeout in jiffies
+ *
+ * Notice that the first three members (id, fname, fsize) are cached on all
+ * read/dir requests. But content is cached only under some preconditions.
+ * Uncached content is signalled by a negative value of @ofs.
+ */
+struct hmcdrv_cache_entry {
+	enum hmcdrv_ftp_cmdid id;
+	char fname[HMCDRV_FTP_FIDENT_MAX];
+	size_t fsize;
+	loff_t ofs;
+	unsigned long timeout;
+	void *content;
+	size_t len;
+};
+
+static int hmcdrv_cache_order; /* cache allocated page order */
+
+static struct hmcdrv_cache_entry hmcdrv_cache_file = {
+	.fsize = SIZE_MAX,
+	.ofs = -1,
+	.len = 0,
+	.fname = {'\0'}
+};
+
+/**
+ * hmcdrv_cache_get() - looks for file data/content in read cache
+ * @ftp: pointer to FTP command specification
+ *
+ * Return: number of bytes read from cache or a negative number if nothing
+ * in content cache (for the file/cmd specified in @ftp)
+ */
+static ssize_t hmcdrv_cache_get(const struct hmcdrv_ftp_cmdspec *ftp)
+{
+	loff_t pos; /* position in cache (signed) */
+	ssize_t len;
+
+	if ((ftp->id != hmcdrv_cache_file.id) ||
+	    strcmp(hmcdrv_cache_file.fname, ftp->fname))
+		return -1;
+
+	if (ftp->ofs >= hmcdrv_cache_file.fsize) /* EOF ? */
+		return 0;
+
+	if ((hmcdrv_cache_file.ofs < 0) || /* has content? */
+	    time_after(jiffies, hmcdrv_cache_file.timeout))
+		return -1;
+
+	/* there seems to be cached content - calculate the maximum number
+	 * of bytes that can be returned (regarding file size and offset)
+	 */
+	len = hmcdrv_cache_file.fsize - ftp->ofs;
+
+	if (len > ftp->len)
+		len = ftp->len;
+
+	/* check if the requested chunk falls into our cache (which starts
+	 * at offset 'hmcdrv_cache_file.ofs' in the file of interest)
+	 */
+	pos = ftp->ofs - hmcdrv_cache_file.ofs;
+
+	if ((pos >= 0) &&
+	    ((pos + len) <= hmcdrv_cache_file.len)) {
+
+		memcpy(ftp->buf,
+		       hmcdrv_cache_file.content + pos,
+		       len);
+		pr_debug("using cached content of '%s', returning %zd/%zd bytes\n",
+			 hmcdrv_cache_file.fname, len,
+			 hmcdrv_cache_file.fsize);
+
+		return len;
+	}
+
+	return -1;
+}
+
+/**
+ * hmcdrv_cache_do() - do a HMC drive CD/DVD transfer with cache update
+ * @ftp: pointer to FTP command specification
+ * @func: FTP transfer function to be used
+ *
+ * Return: number of bytes read/written or a (negative) error code
+ */
+static ssize_t hmcdrv_cache_do(const struct hmcdrv_ftp_cmdspec *ftp,
+			       hmcdrv_cache_ftpfunc func)
+{
+	ssize_t len;
+
+	/* only cache content if the read/dir cache really exists
+	 * (hmcdrv_cache_file.len > 0), is large enough to handle the
+	 * request (hmcdrv_cache_file.len >= ftp->len) and there is a need
+	 * to do so (ftp->len > 0)
+	 */
+	if ((ftp->len > 0) && (hmcdrv_cache_file.len >= ftp->len)) {
+
+		/* because the cache is not located at ftp->buf, we have to
+		 * assemble a new HMC drive FTP cmd specification (pointing
+		 * to our cache, and using the increased size)
+		 */
+		struct hmcdrv_ftp_cmdspec cftp = *ftp; /* make a copy */
+		cftp.buf = hmcdrv_cache_file.content;  /* and update */
+		cftp.len = hmcdrv_cache_file.len;      /* buffer data */
+
+		len = func(&cftp, &hmcdrv_cache_file.fsize); /* now do */
+
+		if (len > 0) {
+			pr_debug("caching %zd bytes content for '%s'\n",
+				 len, ftp->fname);
+
+			if (len > ftp->len)
+				len = ftp->len;
+
+			hmcdrv_cache_file.ofs = ftp->ofs;
+			hmcdrv_cache_file.timeout = jiffies +
+				HMCDRV_CACHE_TIMEOUT * HZ;
+			memcpy(ftp->buf, hmcdrv_cache_file.content, len);
+		}
+	} else {
+		len = func(ftp, &hmcdrv_cache_file.fsize);
+		hmcdrv_cache_file.ofs = -1; /* invalidate content */
+	}
+
+	if (len > 0) {
+		/* cache some file info (FTP command, file name and file
+		 * size) unconditionally
+		 */
+		strlcpy(hmcdrv_cache_file.fname, ftp->fname,
+			HMCDRV_FTP_FIDENT_MAX);
+		hmcdrv_cache_file.id = ftp->id;
+		pr_debug("caching cmd %d, file size %zu for '%s'\n",
+			 ftp->id, hmcdrv_cache_file.fsize, ftp->fname);
+	}
+
+	return len;
+}
+
+/**
+ * hmcdrv_cache_cmd() - perform a cached HMC drive CD/DVD transfer
+ * @ftp: pointer to FTP command specification
+ * @func: FTP transfer function to be used
+ *
+ * Attention: Notice that this function is not reentrant - so the caller
+ * must ensure exclusive execution.
+ *
+ * Return: number of bytes read/written or a (negative) error code
+ */
+ssize_t hmcdrv_cache_cmd(const struct hmcdrv_ftp_cmdspec *ftp,
+			 hmcdrv_cache_ftpfunc func)
+{
+	ssize_t len;
+
+	if ((ftp->id == HMCDRV_FTP_DIR) || /* read cache */
+	    (ftp->id == HMCDRV_FTP_NLIST) ||
+	    (ftp->id == HMCDRV_FTP_GET)) {
+
+		len = hmcdrv_cache_get(ftp);
+
+		if (len >= 0) /* got it from cache ? */
+			return len; /* yes */
+
+		len = hmcdrv_cache_do(ftp, func);
+
+		if (len >= 0)
+			return len;
+
+	} else {
+		len = func(ftp, NULL); /* simply do original command */
+	}
+
+	/* invalidate the (read) cache in case there was a write operation
+	 * or an error on read/dir
+	 */
+	hmcdrv_cache_file.id = HMCDRV_FTP_NOOP;
+	hmcdrv_cache_file.fsize = LLONG_MAX;
+	hmcdrv_cache_file.ofs = -1;
+
+	return len;
+}
+
+/**
+ * hmcdrv_cache_startup() - startup of HMC drive cache
+ * @cachesize: cache size
+ *
+ * Return: 0 on success, else a (negative) error code
+ */
+int hmcdrv_cache_startup(size_t cachesize)
+{
+	if (cachesize > 0) { /* perform caching ? */
+		hmcdrv_cache_order = get_order(cachesize);
+		hmcdrv_cache_file.content =
+			(void *) __get_free_pages(GFP_KERNEL | GFP_DMA,
+						  hmcdrv_cache_order);
+
+		if (!hmcdrv_cache_file.content) {
+			pr_err("Allocating the requested cache size of %zu bytes failed\n",
+			       cachesize);
+			return -ENOMEM;
+		}
+
+		pr_debug("content cache enabled, size is %zu bytes\n",
+			 cachesize);
+	}
+
+	hmcdrv_cache_file.len = cachesize;
+	return 0;
+}
+
+/**
+ * hmcdrv_cache_shutdown() - shutdown of HMC drive cache
+ */
+void hmcdrv_cache_shutdown(void)
+{
+	if (hmcdrv_cache_file.content) {
+		free_pages((unsigned long) hmcdrv_cache_file.content,
+			   hmcdrv_cache_order);
+		hmcdrv_cache_file.content = NULL;
+	}
+
+	hmcdrv_cache_file.id = HMCDRV_FTP_NOOP;
+	hmcdrv_cache_file.fsize = LLONG_MAX;
+	hmcdrv_cache_file.ofs = -1;
+	hmcdrv_cache_file.len = 0; /* no cache */
+}
diff --git a/drivers/s390/char/hmcdrv_cache.h b/drivers/s390/char/hmcdrv_cache.h
new file mode 100644
index 0000000..d69f9fe
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_cache.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    SE/HMC Drive (Read) Cache Functions
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef __HMCDRV_CACHE_H__
+#define __HMCDRV_CACHE_H__
+
+#include <linux/mmzone.h>
+#include "hmcdrv_ftp.h"
+
+#define HMCDRV_CACHE_SIZE_DFLT	(MAX_ORDER_NR_PAGES * PAGE_SIZE / 2UL)
+
+typedef ssize_t (*hmcdrv_cache_ftpfunc)(const struct hmcdrv_ftp_cmdspec *ftp,
+					size_t *fsize);
+
+ssize_t hmcdrv_cache_cmd(const struct hmcdrv_ftp_cmdspec *ftp,
+			 hmcdrv_cache_ftpfunc func);
+int hmcdrv_cache_startup(size_t cachesize);
+void hmcdrv_cache_shutdown(void);
+
+#endif	 /* __HMCDRV_CACHE_H__ */
diff --git a/drivers/s390/char/hmcdrv_dev.c b/drivers/s390/char/hmcdrv_dev.c
new file mode 100644
index 0000000..20e9cd5
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_dev.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    HMC Drive CD/DVD Device
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ *
+ *    This file provides a Linux "misc" character device for access to an
+ *    assigned HMC drive CD/DVD-ROM. It works as follows: First create the
+ *    device by calling hmcdrv_dev_init(). After open() a lseek(fd, 0,
+ *    SEEK_END) indicates that a new FTP command follows (not needed on the
+ *    first command after open). Then write() the FTP command ASCII string
+ *    to it, e.g. "dir /" or "nls <directory>" or "get <filename>". At the
+ *    end read() the response.
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+#include <linux/capability.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+
+#include "hmcdrv_dev.h"
+#include "hmcdrv_ftp.h"
+
+/* If the following macro is defined, then the HMC device creates it's own
+ * separated device class (and dynamically assigns a major number). If not
+ * defined then the HMC device is assigned to the "misc" class devices.
+ *
+#define HMCDRV_DEV_CLASS "hmcftp"
+ */
+
+#define HMCDRV_DEV_NAME  "hmcdrv"
+#define HMCDRV_DEV_BUSY_DELAY	 500 /* delay between -EBUSY trials in ms */
+#define HMCDRV_DEV_BUSY_RETRIES  3   /* number of retries on -EBUSY */
+
+struct hmcdrv_dev_node {
+
+#ifdef HMCDRV_DEV_CLASS
+	struct cdev dev; /* character device structure */
+	umode_t mode;	 /* mode of device node (unused, zero) */
+#else
+	struct miscdevice dev; /* "misc" device structure */
+#endif
+
+};
+
+static int hmcdrv_dev_open(struct inode *inode, struct file *fp);
+static int hmcdrv_dev_release(struct inode *inode, struct file *fp);
+static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence);
+static ssize_t hmcdrv_dev_read(struct file *fp, char __user *ubuf,
+			       size_t len, loff_t *pos);
+static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
+				size_t len, loff_t *pos);
+static ssize_t hmcdrv_dev_transfer(char __kernel *cmd, loff_t offset,
+				   char __user *buf, size_t len);
+
+/*
+ * device operations
+ */
+static const struct file_operations hmcdrv_dev_fops = {
+	.open = hmcdrv_dev_open,
+	.llseek = hmcdrv_dev_seek,
+	.release = hmcdrv_dev_release,
+	.read = hmcdrv_dev_read,
+	.write = hmcdrv_dev_write,
+};
+
+static struct hmcdrv_dev_node hmcdrv_dev; /* HMC device struct (static) */
+
+#ifdef HMCDRV_DEV_CLASS
+
+static struct class *hmcdrv_dev_class; /* device class pointer */
+static dev_t hmcdrv_dev_no; /* device number (major/minor) */
+
+/**
+ * hmcdrv_dev_name() - provides a naming hint for a device node in /dev
+ * @dev: device for which the naming/mode hint is
+ * @mode: file mode for device node created in /dev
+ *
+ * See: devtmpfs.c, function devtmpfs_create_node()
+ *
+ * Return: recommended device file name in /dev
+ */
+static char *hmcdrv_dev_name(struct device *dev, umode_t *mode)
+{
+	char *nodename = NULL;
+	const char *devname = dev_name(dev); /* kernel device name */
+
+	if (devname)
+		nodename = kasprintf(GFP_KERNEL, "%s", devname);
+
+	/* on device destroy (rmmod) the mode pointer may be NULL
+	 */
+	if (mode)
+		*mode = hmcdrv_dev.mode;
+
+	return nodename;
+}
+
+#endif	/* HMCDRV_DEV_CLASS */
+
+/*
+ * open()
+ */
+static int hmcdrv_dev_open(struct inode *inode, struct file *fp)
+{
+	int rc;
+
+	/* check for non-blocking access, which is really unsupported
+	 */
+	if (fp->f_flags & O_NONBLOCK)
+		return -EINVAL;
+
+	/* Because it makes no sense to open this device read-only (then a
+	 * FTP command cannot be emitted), we respond with an error.
+	 */
+	if ((fp->f_flags & O_ACCMODE) == O_RDONLY)
+		return -EINVAL;
+
+	/* prevent unloading this module as long as anyone holds the
+	 * device file open - so increment the reference count here
+	 */
+	if (!try_module_get(THIS_MODULE))
+		return -ENODEV;
+
+	fp->private_data = NULL; /* no command yet */
+	rc = hmcdrv_ftp_startup();
+	if (rc)
+		module_put(THIS_MODULE);
+
+	pr_debug("open file '/dev/%pD' with return code %d\n", fp, rc);
+	return rc;
+}
+
+/*
+ * release()
+ */
+static int hmcdrv_dev_release(struct inode *inode, struct file *fp)
+{
+	pr_debug("closing file '/dev/%pD'\n", fp);
+	kfree(fp->private_data);
+	fp->private_data = NULL;
+	hmcdrv_ftp_shutdown();
+	module_put(THIS_MODULE);
+	return 0;
+}
+
+/*
+ * lseek()
+ */
+static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence)
+{
+	switch (whence) {
+	case SEEK_CUR: /* relative to current file position */
+		pos += fp->f_pos; /* new position stored in 'pos' */
+		break;
+
+	case SEEK_SET: /* absolute (relative to beginning of file) */
+		break; /* SEEK_SET */
+
+		/* We use SEEK_END as a special indicator for a SEEK_SET
+		 * (set absolute position), combined with a FTP command
+		 * clear.
+		 */
+	case SEEK_END:
+		if (fp->private_data) {
+			kfree(fp->private_data);
+			fp->private_data = NULL;
+		}
+
+		break; /* SEEK_END */
+
+	default: /* SEEK_DATA, SEEK_HOLE: unsupported */
+		return -EINVAL;
+	}
+
+	if (pos < 0)
+		return -EINVAL;
+
+	if (fp->f_pos != pos)
+		++fp->f_version;
+
+	fp->f_pos = pos;
+	return pos;
+}
+
+/*
+ * transfer (helper function)
+ */
+static ssize_t hmcdrv_dev_transfer(char __kernel *cmd, loff_t offset,
+				   char __user *buf, size_t len)
+{
+	ssize_t retlen;
+	unsigned trials = HMCDRV_DEV_BUSY_RETRIES;
+
+	do {
+		retlen = hmcdrv_ftp_cmd(cmd, offset, buf, len);
+
+		if (retlen != -EBUSY)
+			break;
+
+		msleep(HMCDRV_DEV_BUSY_DELAY);
+
+	} while (--trials > 0);
+
+	return retlen;
+}
+
+/*
+ * read()
+ */
+static ssize_t hmcdrv_dev_read(struct file *fp, char __user *ubuf,
+			       size_t len, loff_t *pos)
+{
+	ssize_t retlen;
+
+	if (((fp->f_flags & O_ACCMODE) == O_WRONLY) ||
+	    (fp->private_data == NULL)) { /* no FTP cmd defined ? */
+		return -EBADF;
+	}
+
+	retlen = hmcdrv_dev_transfer((char *) fp->private_data,
+				     *pos, ubuf, len);
+
+	pr_debug("read from file '/dev/%pD' at %lld returns %zd/%zu\n",
+		 fp, (long long) *pos, retlen, len);
+
+	if (retlen > 0)
+		*pos += retlen;
+
+	return retlen;
+}
+
+/*
+ * write()
+ */
+static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
+				size_t len, loff_t *pos)
+{
+	ssize_t retlen;
+
+	pr_debug("writing file '/dev/%pD' at pos. %lld with length %zd\n",
+		 fp, (long long) *pos, len);
+
+	if (!fp->private_data) { /* first expect a cmd write */
+		fp->private_data = kmalloc(len + 1, GFP_KERNEL);
+
+		if (!fp->private_data)
+			return -ENOMEM;
+
+		if (!copy_from_user(fp->private_data, ubuf, len)) {
+			((char *)fp->private_data)[len] = '\0';
+			return len;
+		}
+
+		kfree(fp->private_data);
+		fp->private_data = NULL;
+		return -EFAULT;
+	}
+
+	retlen = hmcdrv_dev_transfer((char *) fp->private_data,
+				     *pos, (char __user *) ubuf, len);
+	if (retlen > 0)
+		*pos += retlen;
+
+	pr_debug("write to file '/dev/%pD' returned %zd\n", fp, retlen);
+
+	return retlen;
+}
+
+/**
+ * hmcdrv_dev_init() - creates a HMC drive CD/DVD device
+ *
+ * This function creates a HMC drive CD/DVD kernel device and an associated
+ * device under /dev, using a dynamically allocated major number.
+ *
+ * Return: 0 on success, else an error code.
+ */
+int hmcdrv_dev_init(void)
+{
+	int rc;
+
+#ifdef HMCDRV_DEV_CLASS
+	struct device *dev;
+
+	rc = alloc_chrdev_region(&hmcdrv_dev_no, 0, 1, HMCDRV_DEV_NAME);
+
+	if (rc)
+		goto out_err;
+
+	cdev_init(&hmcdrv_dev.dev, &hmcdrv_dev_fops);
+	hmcdrv_dev.dev.owner = THIS_MODULE;
+	rc = cdev_add(&hmcdrv_dev.dev, hmcdrv_dev_no, 1);
+
+	if (rc)
+		goto out_unreg;
+
+	/* At this point the character device exists in the kernel (see
+	 * /proc/devices), but not under /dev nor /sys/devices/virtual. So
+	 * we have to create an associated class (see /sys/class).
+	 */
+	hmcdrv_dev_class = class_create(THIS_MODULE, HMCDRV_DEV_CLASS);
+
+	if (IS_ERR(hmcdrv_dev_class)) {
+		rc = PTR_ERR(hmcdrv_dev_class);
+		goto out_devdel;
+	}
+
+	/* Finally a device node in /dev has to be established (as 'mkdev'
+	 * does from the command line). Notice that assignment of a device
+	 * node name/mode function is optional (only for mode != 0600).
+	 */
+	hmcdrv_dev.mode = 0; /* "unset" */
+	hmcdrv_dev_class->devnode = hmcdrv_dev_name;
+
+	dev = device_create(hmcdrv_dev_class, NULL, hmcdrv_dev_no, NULL,
+			    "%s", HMCDRV_DEV_NAME);
+	if (!IS_ERR(dev))
+		return 0;
+
+	rc = PTR_ERR(dev);
+	class_destroy(hmcdrv_dev_class);
+	hmcdrv_dev_class = NULL;
+
+out_devdel:
+	cdev_del(&hmcdrv_dev.dev);
+
+out_unreg:
+	unregister_chrdev_region(hmcdrv_dev_no, 1);
+
+out_err:
+
+#else  /* !HMCDRV_DEV_CLASS */
+	hmcdrv_dev.dev.minor = MISC_DYNAMIC_MINOR;
+	hmcdrv_dev.dev.name = HMCDRV_DEV_NAME;
+	hmcdrv_dev.dev.fops = &hmcdrv_dev_fops;
+	hmcdrv_dev.dev.mode = 0; /* finally produces 0600 */
+	rc = misc_register(&hmcdrv_dev.dev);
+#endif	/* HMCDRV_DEV_CLASS */
+
+	return rc;
+}
+
+/**
+ * hmcdrv_dev_exit() - destroys a HMC drive CD/DVD device
+ */
+void hmcdrv_dev_exit(void)
+{
+#ifdef HMCDRV_DEV_CLASS
+	if (!IS_ERR_OR_NULL(hmcdrv_dev_class)) {
+		device_destroy(hmcdrv_dev_class, hmcdrv_dev_no);
+		class_destroy(hmcdrv_dev_class);
+	}
+
+	cdev_del(&hmcdrv_dev.dev);
+	unregister_chrdev_region(hmcdrv_dev_no, 1);
+#else  /* !HMCDRV_DEV_CLASS */
+	misc_deregister(&hmcdrv_dev.dev);
+#endif	/* HMCDRV_DEV_CLASS */
+}
diff --git a/drivers/s390/char/hmcdrv_dev.h b/drivers/s390/char/hmcdrv_dev.h
new file mode 100644
index 0000000..558eba9
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_dev.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    SE/HMC Drive FTP Device
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef __HMCDRV_DEV_H__
+#define __HMCDRV_DEV_H__
+
+int hmcdrv_dev_init(void);
+void hmcdrv_dev_exit(void);
+
+#endif	 /* __HMCDRV_DEV_H__ */
diff --git a/drivers/s390/char/hmcdrv_ftp.c b/drivers/s390/char/hmcdrv_ftp.c
new file mode 100644
index 0000000..0e70397
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_ftp.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    HMC Drive FTP Services
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+
+#include <linux/ctype.h>
+#include <linux/crc16.h>
+
+#include "hmcdrv_ftp.h"
+#include "hmcdrv_cache.h"
+#include "sclp_ftp.h"
+#include "diag_ftp.h"
+
+/**
+ * struct hmcdrv_ftp_ops - HMC drive FTP operations
+ * @startup: startup function
+ * @shutdown: shutdown function
+ * @cmd: FTP transfer function
+ */
+struct hmcdrv_ftp_ops {
+	int (*startup)(void);
+	void (*shutdown)(void);
+	ssize_t (*transfer)(const struct hmcdrv_ftp_cmdspec *ftp,
+			    size_t *fsize);
+};
+
+static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len);
+static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp);
+
+static const struct hmcdrv_ftp_ops *hmcdrv_ftp_funcs; /* current operations */
+static DEFINE_MUTEX(hmcdrv_ftp_mutex); /* mutex for hmcdrv_ftp_funcs */
+static unsigned hmcdrv_ftp_refcnt; /* start/shutdown reference counter */
+
+/**
+ * hmcdrv_ftp_cmd_getid() - determine FTP command ID from a command string
+ * @cmd: FTP command string (NOT zero-terminated)
+ * @len: length of FTP command string in @cmd
+ */
+static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len)
+{
+	/* HMC FTP command descriptor */
+	struct hmcdrv_ftp_cmd_desc {
+		const char *str;	   /* command string */
+		enum hmcdrv_ftp_cmdid cmd; /* associated command as enum */
+	};
+
+	/* Description of all HMC drive FTP commands
+	 *
+	 * Notes:
+	 * 1. Array size should be a prime number.
+	 * 2. Do not change the order of commands in table (because the
+	 *    index is determined by CRC % ARRAY_SIZE).
+	 * 3. Original command 'nlist' was renamed, else the CRC would
+	 *    collide with 'append' (see point 2).
+	 */
+	static const struct hmcdrv_ftp_cmd_desc ftpcmds[7] = {
+
+		{.str = "get", /* [0] get (CRC = 0x68eb) */
+		 .cmd = HMCDRV_FTP_GET},
+		{.str = "dir", /* [1] dir (CRC = 0x6a9e) */
+		 .cmd = HMCDRV_FTP_DIR},
+		{.str = "delete", /* [2] delete (CRC = 0x53ae) */
+		 .cmd = HMCDRV_FTP_DELETE},
+		{.str = "nls", /* [3] nls (CRC = 0xf87c) */
+		 .cmd = HMCDRV_FTP_NLIST},
+		{.str = "put", /* [4] put (CRC = 0xac56) */
+		 .cmd = HMCDRV_FTP_PUT},
+		{.str = "append", /* [5] append (CRC = 0xf56e) */
+		 .cmd = HMCDRV_FTP_APPEND},
+		{.str = NULL} /* [6] unused */
+	};
+
+	const struct hmcdrv_ftp_cmd_desc *pdesc;
+
+	u16 crc = 0xffffU;
+
+	if (len == 0)
+		return HMCDRV_FTP_NOOP; /* error indiactor */
+
+	crc = crc16(crc, cmd, len);
+	pdesc = ftpcmds + (crc % ARRAY_SIZE(ftpcmds));
+	pr_debug("FTP command '%s' has CRC 0x%04x, at table pos. %lu\n",
+		 cmd, crc, (crc % ARRAY_SIZE(ftpcmds)));
+
+	if (!pdesc->str || strncmp(pdesc->str, cmd, len))
+		return HMCDRV_FTP_NOOP;
+
+	pr_debug("FTP command '%s' found, with ID %d\n",
+		 pdesc->str, pdesc->cmd);
+
+	return pdesc->cmd;
+}
+
+/**
+ * hmcdrv_ftp_parse() - HMC drive FTP command parser
+ * @cmd: FTP command string "<cmd> <filename>"
+ * @ftp: Pointer to FTP command specification buffer (output)
+ *
+ * Return: 0 on success, else a (negative) error code
+ */
+static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp)
+{
+	char *start;
+	int argc = 0;
+
+	ftp->id = HMCDRV_FTP_NOOP;
+	ftp->fname = NULL;
+
+	while (*cmd != '\0') {
+
+		while (isspace(*cmd))
+			++cmd;
+
+		if (*cmd == '\0')
+			break;
+
+		start = cmd;
+
+		switch (argc) {
+		case 0: /* 1st argument (FTP command) */
+			while ((*cmd != '\0') && !isspace(*cmd))
+				++cmd;
+			ftp->id = hmcdrv_ftp_cmd_getid(start, cmd - start);
+			break;
+		case 1: /* 2nd / last argument (rest of line) */
+			while ((*cmd != '\0') && !iscntrl(*cmd))
+				++cmd;
+			ftp->fname = start;
+			/* fall through */
+		default:
+			*cmd = '\0';
+			break;
+		} /* switch */
+
+		++argc;
+	} /* while */
+
+	if (!ftp->fname || (ftp->id == HMCDRV_FTP_NOOP))
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * hmcdrv_ftp_do() - perform a HMC drive FTP, with data from kernel-space
+ * @ftp: pointer to FTP command specification
+ *
+ * Return: number of bytes read/written or a negative error code
+ */
+ssize_t hmcdrv_ftp_do(const struct hmcdrv_ftp_cmdspec *ftp)
+{
+	ssize_t len;
+
+	mutex_lock(&hmcdrv_ftp_mutex);
+
+	if (hmcdrv_ftp_funcs && hmcdrv_ftp_refcnt) {
+		pr_debug("starting transfer, cmd %d for '%s' at %lld with %zd bytes\n",
+			 ftp->id, ftp->fname, (long long) ftp->ofs, ftp->len);
+		len = hmcdrv_cache_cmd(ftp, hmcdrv_ftp_funcs->transfer);
+	} else {
+		len = -ENXIO;
+	}
+
+	mutex_unlock(&hmcdrv_ftp_mutex);
+	return len;
+}
+EXPORT_SYMBOL(hmcdrv_ftp_do);
+
+/**
+ * hmcdrv_ftp_probe() - probe for the HMC drive FTP service
+ *
+ * Return: 0 if service is available, else an (negative) error code
+ */
+int hmcdrv_ftp_probe(void)
+{
+	int rc;
+
+	struct hmcdrv_ftp_cmdspec ftp = {
+		.id = HMCDRV_FTP_NOOP,
+		.ofs = 0,
+		.fname = "",
+		.len = PAGE_SIZE
+	};
+
+	ftp.buf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+
+	if (!ftp.buf)
+		return -ENOMEM;
+
+	rc = hmcdrv_ftp_startup();
+
+	if (rc)
+		goto out;
+
+	rc = hmcdrv_ftp_do(&ftp);
+	hmcdrv_ftp_shutdown();
+
+	switch (rc) {
+	case -ENOENT: /* no such file/media or currently busy, */
+	case -EBUSY:  /* but service seems to be available */
+		rc = 0;
+		break;
+	default: /* leave 'rc' as it is for [0, -EPERM, -E...] */
+		if (rc > 0)
+			rc = 0; /* clear length (success) */
+		break;
+	} /* switch */
+out:
+	free_page((unsigned long) ftp.buf);
+	return rc;
+}
+EXPORT_SYMBOL(hmcdrv_ftp_probe);
+
+/**
+ * hmcdrv_ftp_cmd() - Perform a HMC drive FTP, with data from user-space
+ *
+ * @cmd: FTP command string "<cmd> <filename>"
+ * @offset: file position to read/write
+ * @buf: user-space buffer for read/written directory/file
+ * @len: size of @buf (read/dir) or number of bytes to write
+ *
+ * This function must not be called before hmcdrv_ftp_startup() was called.
+ *
+ * Return: number of bytes read/written or a negative error code
+ */
+ssize_t hmcdrv_ftp_cmd(char __kernel *cmd, loff_t offset,
+		       char __user *buf, size_t len)
+{
+	int order;
+
+	struct hmcdrv_ftp_cmdspec ftp = {.len = len, .ofs = offset};
+	ssize_t retlen = hmcdrv_ftp_parse(cmd, &ftp);
+
+	if (retlen)
+		return retlen;
+
+	order = get_order(ftp.len);
+	ftp.buf = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, order);
+
+	if (!ftp.buf)
+		return -ENOMEM;
+
+	switch (ftp.id) {
+	case HMCDRV_FTP_DIR:
+	case HMCDRV_FTP_NLIST:
+	case HMCDRV_FTP_GET:
+		retlen = hmcdrv_ftp_do(&ftp);
+
+		if ((retlen >= 0) &&
+		    copy_to_user(buf, ftp.buf, retlen))
+			retlen = -EFAULT;
+		break;
+
+	case HMCDRV_FTP_PUT:
+	case HMCDRV_FTP_APPEND:
+		if (!copy_from_user(ftp.buf, buf, ftp.len))
+			retlen = hmcdrv_ftp_do(&ftp);
+		else
+			retlen = -EFAULT;
+		break;
+
+	case HMCDRV_FTP_DELETE:
+		retlen = hmcdrv_ftp_do(&ftp);
+		break;
+
+	default:
+		retlen = -EOPNOTSUPP;
+		break;
+	}
+
+	free_pages((unsigned long) ftp.buf, order);
+	return retlen;
+}
+
+/**
+ * hmcdrv_ftp_startup() - startup of HMC drive FTP functionality for a
+ * dedicated (owner) instance
+ *
+ * Return: 0 on success, else an (negative) error code
+ */
+int hmcdrv_ftp_startup(void)
+{
+	static const struct hmcdrv_ftp_ops hmcdrv_ftp_zvm = {
+		.startup = diag_ftp_startup,
+		.shutdown = diag_ftp_shutdown,
+		.transfer = diag_ftp_cmd
+	};
+
+	static const struct hmcdrv_ftp_ops hmcdrv_ftp_lpar = {
+		.startup = sclp_ftp_startup,
+		.shutdown = sclp_ftp_shutdown,
+		.transfer = sclp_ftp_cmd
+	};
+
+	int rc = 0;
+
+	mutex_lock(&hmcdrv_ftp_mutex); /* block transfers while start-up */
+
+	if (hmcdrv_ftp_refcnt == 0) {
+		if (MACHINE_IS_VM)
+			hmcdrv_ftp_funcs = &hmcdrv_ftp_zvm;
+		else if (MACHINE_IS_LPAR || MACHINE_IS_KVM)
+			hmcdrv_ftp_funcs = &hmcdrv_ftp_lpar;
+		else
+			rc = -EOPNOTSUPP;
+
+		if (hmcdrv_ftp_funcs)
+			rc = hmcdrv_ftp_funcs->startup();
+	}
+
+	if (!rc)
+		++hmcdrv_ftp_refcnt;
+
+	mutex_unlock(&hmcdrv_ftp_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(hmcdrv_ftp_startup);
+
+/**
+ * hmcdrv_ftp_shutdown() - shutdown of HMC drive FTP functionality for a
+ * dedicated (owner) instance
+ */
+void hmcdrv_ftp_shutdown(void)
+{
+	mutex_lock(&hmcdrv_ftp_mutex);
+	--hmcdrv_ftp_refcnt;
+
+	if ((hmcdrv_ftp_refcnt == 0) && hmcdrv_ftp_funcs)
+		hmcdrv_ftp_funcs->shutdown();
+
+	mutex_unlock(&hmcdrv_ftp_mutex);
+}
+EXPORT_SYMBOL(hmcdrv_ftp_shutdown);
diff --git a/drivers/s390/char/hmcdrv_ftp.h b/drivers/s390/char/hmcdrv_ftp.h
new file mode 100644
index 0000000..d12ca12
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_ftp.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    SE/HMC Drive FTP Services
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef __HMCDRV_FTP_H__
+#define __HMCDRV_FTP_H__
+
+#include <linux/types.h> /* size_t, loff_t */
+
+/*
+ * HMC drive FTP Service max. length of path (w/ EOS)
+ */
+#define HMCDRV_FTP_FIDENT_MAX 192
+
+/**
+ * enum hmcdrv_ftp_cmdid - HMC drive FTP commands
+ * @HMCDRV_FTP_NOOP: do nothing (only for probing)
+ * @HMCDRV_FTP_GET: read a file
+ * @HMCDRV_FTP_PUT: (over-) write a file
+ * @HMCDRV_FTP_APPEND: append to a file
+ * @HMCDRV_FTP_DIR: list directory long (ls -l)
+ * @HMCDRV_FTP_NLIST: list files, no directories (name list)
+ * @HMCDRV_FTP_DELETE: delete a file
+ * @HMCDRV_FTP_CANCEL: cancel operation (SCLP/LPAR only)
+ */
+enum hmcdrv_ftp_cmdid {
+	HMCDRV_FTP_NOOP = 0,
+	HMCDRV_FTP_GET = 1,
+	HMCDRV_FTP_PUT = 2,
+	HMCDRV_FTP_APPEND = 3,
+	HMCDRV_FTP_DIR = 4,
+	HMCDRV_FTP_NLIST = 5,
+	HMCDRV_FTP_DELETE = 6,
+	HMCDRV_FTP_CANCEL = 7
+};
+
+/**
+ * struct hmcdrv_ftp_cmdspec - FTP command specification
+ * @id: FTP command ID
+ * @ofs: offset in file
+ * @fname: filename (ASCII), null-terminated
+ * @buf: kernel-space transfer data buffer, 4k aligned
+ * @len: (max) number of bytes to transfer from/to @buf
+ */
+struct hmcdrv_ftp_cmdspec {
+	enum hmcdrv_ftp_cmdid id;
+	loff_t ofs;
+	const char *fname;
+	void __kernel *buf;
+	size_t len;
+};
+
+int hmcdrv_ftp_startup(void);
+void hmcdrv_ftp_shutdown(void);
+int hmcdrv_ftp_probe(void);
+ssize_t hmcdrv_ftp_do(const struct hmcdrv_ftp_cmdspec *ftp);
+ssize_t hmcdrv_ftp_cmd(char __kernel *cmd, loff_t offset,
+		       char __user *buf, size_t len);
+
+#endif	 /* __HMCDRV_FTP_H__ */
diff --git a/drivers/s390/char/hmcdrv_mod.c b/drivers/s390/char/hmcdrv_mod.c
new file mode 100644
index 0000000..1447d08
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_mod.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    HMC Drive DVD Module
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/stat.h>
+
+#include "hmcdrv_ftp.h"
+#include "hmcdrv_dev.h"
+#include "hmcdrv_cache.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Copyright 2013 IBM Corporation");
+MODULE_DESCRIPTION("HMC drive DVD access");
+
+/*
+ * module parameter 'cachesize'
+ */
+static size_t hmcdrv_mod_cachesize = HMCDRV_CACHE_SIZE_DFLT;
+module_param_named(cachesize, hmcdrv_mod_cachesize, ulong, S_IRUGO);
+
+/**
+ * hmcdrv_mod_init() - module init function
+ */
+static int __init hmcdrv_mod_init(void)
+{
+	int rc = hmcdrv_ftp_probe(); /* perform w/o cache */
+
+	if (rc)
+		return rc;
+
+	rc = hmcdrv_cache_startup(hmcdrv_mod_cachesize);
+
+	if (rc)
+		return rc;
+
+	rc = hmcdrv_dev_init();
+
+	if (rc)
+		hmcdrv_cache_shutdown();
+
+	return rc;
+}
+
+/**
+ * hmcdrv_mod_exit() - module exit function
+ */
+static void __exit hmcdrv_mod_exit(void)
+{
+	hmcdrv_dev_exit();
+	hmcdrv_cache_shutdown();
+}
+
+module_init(hmcdrv_mod_init);
+module_exit(hmcdrv_mod_exit);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
new file mode 100644
index 0000000..567aedc
--- /dev/null
+++ b/drivers/s390/char/keyboard.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    ebcdic keycode functions for s390 console drivers
+ *
+ *  S390 version
+ *    Copyright IBM Corp. 2003
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#include <linux/module.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/sysrq.h>
+
+#include <linux/consolemap.h>
+#include <linux/kbd_kern.h>
+#include <linux/kbd_diacr.h>
+#include <linux/uaccess.h>
+
+#include "keyboard.h"
+
+/*
+ * Handler Tables.
+ */
+#define K_HANDLERS\
+	k_self,		k_fn,		k_spec,		k_ignore,\
+	k_dead,		k_ignore,	k_ignore,	k_ignore,\
+	k_ignore,	k_ignore,	k_ignore,	k_ignore,\
+	k_ignore,	k_ignore,	k_ignore,	k_ignore
+
+typedef void (k_handler_fn)(struct kbd_data *, unsigned char);
+static k_handler_fn K_HANDLERS;
+static k_handler_fn *k_handler[16] = { K_HANDLERS };
+
+/* maximum values each key_handler can handle */
+static const int kbd_max_vals[] = {
+	255, ARRAY_SIZE(func_table) - 1, NR_FN_HANDLER - 1, 0,
+	NR_DEAD - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+static const int KBD_NR_TYPES = ARRAY_SIZE(kbd_max_vals);
+
+static const unsigned char ret_diacr[NR_DEAD] = {
+	'`',	/* dead_grave */
+	'\'',	/* dead_acute */
+	'^',	/* dead_circumflex */
+	'~',	/* dead_tilda */
+	'"',	/* dead_diaeresis */
+	',',	/* dead_cedilla */
+	'_',	/* dead_macron */
+	'U',	/* dead_breve */
+	'.',	/* dead_abovedot */
+	'*',	/* dead_abovering */
+	'=',	/* dead_doubleacute */
+	'c',	/* dead_caron */
+	'k',	/* dead_ogonek */
+	'i',	/* dead_iota */
+	'#',	/* dead_voiced_sound */
+	'o',	/* dead_semivoiced_sound */
+	'!',	/* dead_belowdot */
+	'?',	/* dead_hook */
+	'+',	/* dead_horn */
+	'-',	/* dead_stroke */
+	')',	/* dead_abovecomma */
+	'(',	/* dead_abovereversedcomma */
+	':',	/* dead_doublegrave */
+	'n',	/* dead_invertedbreve */
+	';',	/* dead_belowcomma */
+	'$',	/* dead_currency */
+	'@',	/* dead_greek */
+};
+
+/*
+ * Alloc/free of kbd_data structures.
+ */
+struct kbd_data *
+kbd_alloc(void) {
+	struct kbd_data *kbd;
+	int i;
+
+	kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL);
+	if (!kbd)
+		goto out;
+	kbd->key_maps = kzalloc(sizeof(ebc_key_maps), GFP_KERNEL);
+	if (!kbd->key_maps)
+		goto out_kbd;
+	for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) {
+		if (ebc_key_maps[i]) {
+			kbd->key_maps[i] = kmemdup(ebc_key_maps[i],
+						   sizeof(u_short) * NR_KEYS,
+						   GFP_KERNEL);
+			if (!kbd->key_maps[i])
+				goto out_maps;
+		}
+	}
+	kbd->func_table = kzalloc(sizeof(ebc_func_table), GFP_KERNEL);
+	if (!kbd->func_table)
+		goto out_maps;
+	for (i = 0; i < ARRAY_SIZE(ebc_func_table); i++) {
+		if (ebc_func_table[i]) {
+			kbd->func_table[i] = kstrdup(ebc_func_table[i],
+						     GFP_KERNEL);
+			if (!kbd->func_table[i])
+				goto out_func;
+		}
+	}
+	kbd->fn_handler =
+		kcalloc(NR_FN_HANDLER, sizeof(fn_handler_fn *), GFP_KERNEL);
+	if (!kbd->fn_handler)
+		goto out_func;
+	kbd->accent_table = kmemdup(ebc_accent_table,
+				    sizeof(struct kbdiacruc) * MAX_DIACR,
+				    GFP_KERNEL);
+	if (!kbd->accent_table)
+		goto out_fn_handler;
+	kbd->accent_table_size = ebc_accent_table_size;
+	return kbd;
+
+out_fn_handler:
+	kfree(kbd->fn_handler);
+out_func:
+	for (i = 0; i < ARRAY_SIZE(ebc_func_table); i++)
+		kfree(kbd->func_table[i]);
+	kfree(kbd->func_table);
+out_maps:
+	for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++)
+		kfree(kbd->key_maps[i]);
+	kfree(kbd->key_maps);
+out_kbd:
+	kfree(kbd);
+out:
+	return NULL;
+}
+
+void
+kbd_free(struct kbd_data *kbd)
+{
+	int i;
+
+	kfree(kbd->accent_table);
+	kfree(kbd->fn_handler);
+	for (i = 0; i < ARRAY_SIZE(ebc_func_table); i++)
+		kfree(kbd->func_table[i]);
+	kfree(kbd->func_table);
+	for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++)
+		kfree(kbd->key_maps[i]);
+	kfree(kbd->key_maps);
+	kfree(kbd);
+}
+
+/*
+ * Generate ascii -> ebcdic translation table from kbd_data.
+ */
+void
+kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
+{
+	unsigned short *keymap, keysym;
+	int i, j, k;
+
+	memset(ascebc, 0x40, 256);
+	for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) {
+		keymap = kbd->key_maps[i];
+		if (!keymap)
+			continue;
+		for (j = 0; j < NR_KEYS; j++) {
+			k = ((i & 1) << 7) + j;
+			keysym = keymap[j];
+			if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
+			    KTYP(keysym) == (KT_LETTER | 0xf0))
+				ascebc[KVAL(keysym)] = k;
+			else if (KTYP(keysym) == (KT_DEAD | 0xf0))
+				ascebc[ret_diacr[KVAL(keysym)]] = k;
+		}
+	}
+}
+
+#if 0
+/*
+ * Generate ebcdic -> ascii translation table from kbd_data.
+ */
+void
+kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
+{
+	unsigned short *keymap, keysym;
+	int i, j, k;
+
+	memset(ebcasc, ' ', 256);
+	for (i = 0; i < ARRAY_SIZE(ebc_key_maps); i++) {
+		keymap = kbd->key_maps[i];
+		if (!keymap)
+			continue;
+		for (j = 0; j < NR_KEYS; j++) {
+			keysym = keymap[j];
+			k = ((i & 1) << 7) + j;
+			if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
+			    KTYP(keysym) == (KT_LETTER | 0xf0))
+				ebcasc[k] = KVAL(keysym);
+			else if (KTYP(keysym) == (KT_DEAD | 0xf0))
+				ebcasc[k] = ret_diacr[KVAL(keysym)];
+		}
+	}
+}
+#endif
+
+/*
+ * We have a combining character DIACR here, followed by the character CH.
+ * If the combination occurs in the table, return the corresponding value.
+ * Otherwise, if CH is a space or equals DIACR, return DIACR.
+ * Otherwise, conclude that DIACR was not combining after all,
+ * queue it and return CH.
+ */
+static unsigned int
+handle_diacr(struct kbd_data *kbd, unsigned int ch)
+{
+	int i, d;
+
+	d = kbd->diacr;
+	kbd->diacr = 0;
+
+	for (i = 0; i < kbd->accent_table_size; i++) {
+		if (kbd->accent_table[i].diacr == d &&
+		    kbd->accent_table[i].base == ch)
+			return kbd->accent_table[i].result;
+	}
+
+	if (ch == ' ' || ch == d)
+		return d;
+
+	kbd_put_queue(kbd->port, d);
+	return ch;
+}
+
+/*
+ * Handle dead key.
+ */
+static void
+k_dead(struct kbd_data *kbd, unsigned char value)
+{
+	value = ret_diacr[value];
+	kbd->diacr = (kbd->diacr ? handle_diacr(kbd, value) : value);
+}
+
+/*
+ * Normal character handler.
+ */
+static void
+k_self(struct kbd_data *kbd, unsigned char value)
+{
+	if (kbd->diacr)
+		value = handle_diacr(kbd, value);
+	kbd_put_queue(kbd->port, value);
+}
+
+/*
+ * Special key handlers
+ */
+static void
+k_ignore(struct kbd_data *kbd, unsigned char value)
+{
+}
+
+/*
+ * Function key handler.
+ */
+static void
+k_fn(struct kbd_data *kbd, unsigned char value)
+{
+	if (kbd->func_table[value])
+		kbd_puts_queue(kbd->port, kbd->func_table[value]);
+}
+
+static void
+k_spec(struct kbd_data *kbd, unsigned char value)
+{
+	if (value >= NR_FN_HANDLER)
+		return;
+	if (kbd->fn_handler[value])
+		kbd->fn_handler[value](kbd);
+}
+
+/*
+ * Put utf8 character to tty flip buffer.
+ * UTF-8 is defined for words of up to 31 bits,
+ * but we need only 16 bits here
+ */
+static void
+to_utf8(struct tty_port *port, ushort c)
+{
+	if (c < 0x80)
+		/*  0******* */
+		kbd_put_queue(port, c);
+	else if (c < 0x800) {
+		/* 110***** 10****** */
+		kbd_put_queue(port, 0xc0 | (c >> 6));
+		kbd_put_queue(port, 0x80 | (c & 0x3f));
+	} else {
+		/* 1110**** 10****** 10****** */
+		kbd_put_queue(port, 0xe0 | (c >> 12));
+		kbd_put_queue(port, 0x80 | ((c >> 6) & 0x3f));
+		kbd_put_queue(port, 0x80 | (c & 0x3f));
+	}
+}
+
+/*
+ * Process keycode.
+ */
+void
+kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
+{
+	unsigned short keysym;
+	unsigned char type, value;
+
+	if (!kbd)
+		return;
+
+	if (keycode >= 384)
+		keysym = kbd->key_maps[5][keycode - 384];
+	else if (keycode >= 256)
+		keysym = kbd->key_maps[4][keycode - 256];
+	else if (keycode >= 128)
+		keysym = kbd->key_maps[1][keycode - 128];
+	else
+		keysym = kbd->key_maps[0][keycode];
+
+	type = KTYP(keysym);
+	if (type >= 0xf0) {
+		type -= 0xf0;
+		if (type == KT_LETTER)
+			type = KT_LATIN;
+		value = KVAL(keysym);
+#ifdef CONFIG_MAGIC_SYSRQ	       /* Handle the SysRq Hack */
+		if (kbd->sysrq) {
+			if (kbd->sysrq == K(KT_LATIN, '-')) {
+				kbd->sysrq = 0;
+				handle_sysrq(value);
+				return;
+			}
+			if (value == '-') {
+				kbd->sysrq = K(KT_LATIN, '-');
+				return;
+			}
+			/* Incomplete sysrq sequence. */
+			(*k_handler[KTYP(kbd->sysrq)])(kbd, KVAL(kbd->sysrq));
+			kbd->sysrq = 0;
+		} else if ((type == KT_LATIN && value == '^') ||
+			   (type == KT_DEAD && ret_diacr[value] == '^')) {
+			kbd->sysrq = K(type, value);
+			return;
+		}
+#endif
+		(*k_handler[type])(kbd, value);
+	} else
+		to_utf8(kbd->port, keysym);
+}
+
+/*
+ * Ioctl stuff.
+ */
+static int
+do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
+	      int cmd, int perm)
+{
+	struct kbentry tmp;
+	unsigned long kb_index, kb_table;
+	ushort *key_map, val, ov;
+
+	if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
+		return -EFAULT;
+	kb_index = (unsigned long) tmp.kb_index;
+#if NR_KEYS < 256
+	if (kb_index >= NR_KEYS)
+		return -EINVAL;
+#endif
+	kb_table = (unsigned long) tmp.kb_table;
+#if MAX_NR_KEYMAPS < 256
+	if (kb_table >= MAX_NR_KEYMAPS)
+		return -EINVAL;	
+	kb_table = array_index_nospec(kb_table , MAX_NR_KEYMAPS);
+#endif
+
+	switch (cmd) {
+	case KDGKBENT:
+		key_map = kbd->key_maps[kb_table];
+		if (key_map) {
+		    val = U(key_map[kb_index]);
+		    if (KTYP(val) >= KBD_NR_TYPES)
+			val = K_HOLE;
+		} else
+		    val = (kb_index ? K_HOLE : K_NOSUCHMAP);
+		return put_user(val, &user_kbe->kb_value);
+	case KDSKBENT:
+		if (!perm)
+			return -EPERM;
+		if (!kb_index && tmp.kb_value == K_NOSUCHMAP) {
+			/* disallocate map */
+			key_map = kbd->key_maps[kb_table];
+			if (key_map) {
+			    kbd->key_maps[kb_table] = NULL;
+			    kfree(key_map);
+			}
+			break;
+		}
+
+		if (KTYP(tmp.kb_value) >= KBD_NR_TYPES)
+			return -EINVAL;
+		if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)])
+			return -EINVAL;
+
+		if (!(key_map = kbd->key_maps[kb_table])) {
+			int j;
+
+			key_map = kmalloc(sizeof(plain_map),
+						     GFP_KERNEL);
+			if (!key_map)
+				return -ENOMEM;
+			kbd->key_maps[kb_table] = key_map;
+			for (j = 0; j < NR_KEYS; j++)
+				key_map[j] = U(K_HOLE);
+		}
+		ov = U(key_map[kb_index]);
+		if (tmp.kb_value == ov)
+			break;	/* nothing to do */
+		/*
+		 * Attention Key.
+		 */
+		if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) &&
+		    !capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		key_map[kb_index] = U(tmp.kb_value);
+		break;
+	}
+	return 0;
+}
+
+static int
+do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs,
+	       int cmd, int perm)
+{
+	unsigned char kb_func;
+	char *p;
+	int len;
+
+	/* Get u_kbs->kb_func. */
+	if (get_user(kb_func, &u_kbs->kb_func))
+		return -EFAULT;
+#if MAX_NR_FUNC < 256
+	if (kb_func >= MAX_NR_FUNC)
+		return -EINVAL;
+#endif
+
+	switch (cmd) {
+	case KDGKBSENT:
+		p = kbd->func_table[kb_func];
+		if (p) {
+			len = strlen(p);
+			if (len >= sizeof(u_kbs->kb_string))
+				len = sizeof(u_kbs->kb_string) - 1;
+			if (copy_to_user(u_kbs->kb_string, p, len))
+				return -EFAULT;
+		} else
+			len = 0;
+		if (put_user('\0', u_kbs->kb_string + len))
+			return -EFAULT;
+		break;
+	case KDSKBSENT:
+		if (!perm)
+			return -EPERM;
+		p = strndup_user(u_kbs->kb_string, sizeof(u_kbs->kb_string));
+		if (IS_ERR(p))
+			return PTR_ERR(p);
+		kfree(kbd->func_table[kb_func]);
+		kbd->func_table[kb_func] = p;
+		break;
+	}
+	return 0;
+}
+
+int kbd_ioctl(struct kbd_data *kbd, unsigned int cmd, unsigned long arg)
+{
+	struct tty_struct *tty;
+	void __user *argp;
+	unsigned int ct;
+	int perm;
+
+	argp = (void __user *)arg;
+
+	/*
+	 * To have permissions to do most of the vt ioctls, we either have
+	 * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
+	 */
+	tty = tty_port_tty_get(kbd->port);
+	/* FIXME this test is pretty racy */
+	perm = current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG);
+	tty_kref_put(tty);
+	switch (cmd) {
+	case KDGKBTYPE:
+		return put_user(KB_101, (char __user *)argp);
+	case KDGKBENT:
+	case KDSKBENT:
+		return do_kdsk_ioctl(kbd, argp, cmd, perm);
+	case KDGKBSENT:
+	case KDSKBSENT:
+		return do_kdgkb_ioctl(kbd, argp, cmd, perm);
+	case KDGKBDIACR:
+	{
+		struct kbdiacrs __user *a = argp;
+		struct kbdiacr diacr;
+		int i;
+
+		if (put_user(kbd->accent_table_size, &a->kb_cnt))
+			return -EFAULT;
+		for (i = 0; i < kbd->accent_table_size; i++) {
+			diacr.diacr = kbd->accent_table[i].diacr;
+			diacr.base = kbd->accent_table[i].base;
+			diacr.result = kbd->accent_table[i].result;
+			if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr)))
+			return -EFAULT;
+		}
+		return 0;
+	}
+	case KDGKBDIACRUC:
+	{
+		struct kbdiacrsuc __user *a = argp;
+
+		ct = kbd->accent_table_size;
+		if (put_user(ct, &a->kb_cnt))
+			return -EFAULT;
+		if (copy_to_user(a->kbdiacruc, kbd->accent_table,
+				 ct * sizeof(struct kbdiacruc)))
+			return -EFAULT;
+		return 0;
+	}
+	case KDSKBDIACR:
+	{
+		struct kbdiacrs __user *a = argp;
+		struct kbdiacr diacr;
+		int i;
+
+		if (!perm)
+			return -EPERM;
+		if (get_user(ct, &a->kb_cnt))
+			return -EFAULT;
+		if (ct >= MAX_DIACR)
+			return -EINVAL;
+		kbd->accent_table_size = ct;
+		for (i = 0; i < ct; i++) {
+			if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr)))
+				return -EFAULT;
+			kbd->accent_table[i].diacr = diacr.diacr;
+			kbd->accent_table[i].base = diacr.base;
+			kbd->accent_table[i].result = diacr.result;
+		}
+		return 0;
+	}
+	case KDSKBDIACRUC:
+	{
+		struct kbdiacrsuc __user *a = argp;
+
+		if (!perm)
+			return -EPERM;
+		if (get_user(ct, &a->kb_cnt))
+			return -EFAULT;
+		if (ct >= MAX_DIACR)
+			return -EINVAL;
+		kbd->accent_table_size = ct;
+		if (copy_from_user(kbd->accent_table, a->kbdiacruc,
+				   ct * sizeof(struct kbdiacruc)))
+			return -EFAULT;
+		return 0;
+	}
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+
+EXPORT_SYMBOL(kbd_ioctl);
+EXPORT_SYMBOL(kbd_ascebc);
+EXPORT_SYMBOL(kbd_free);
+EXPORT_SYMBOL(kbd_alloc);
+EXPORT_SYMBOL(kbd_keycode);
diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h
new file mode 100644
index 0000000..c467589
--- /dev/null
+++ b/drivers/s390/char/keyboard.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    ebcdic keycode functions for s390 console drivers
+ *
+ *    Copyright IBM Corp. 2003
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/keyboard.h>
+
+#define NR_FN_HANDLER	20
+
+struct kbd_data;
+
+extern int ebc_funcbufsize, ebc_funcbufleft;
+extern char *ebc_func_table[MAX_NR_FUNC];
+extern char ebc_func_buf[];
+extern char *ebc_funcbufptr;
+extern unsigned int ebc_keymap_count;
+
+extern struct kbdiacruc ebc_accent_table[];
+extern unsigned int ebc_accent_table_size;
+extern unsigned short *ebc_key_maps[MAX_NR_KEYMAPS];
+extern unsigned short ebc_plain_map[NR_KEYS];
+
+typedef void (fn_handler_fn)(struct kbd_data *);
+
+/*
+ * FIXME: explain key_maps tricks.
+ */
+
+struct kbd_data {
+	struct tty_port *port;
+	unsigned short **key_maps;
+	char **func_table;
+	fn_handler_fn **fn_handler;
+	struct kbdiacruc *accent_table;
+	unsigned int accent_table_size;
+	unsigned int diacr;
+	unsigned short sysrq;
+};
+
+struct kbd_data *kbd_alloc(void);
+void kbd_free(struct kbd_data *);
+void kbd_ascebc(struct kbd_data *, unsigned char *);
+
+void kbd_keycode(struct kbd_data *, unsigned int);
+int kbd_ioctl(struct kbd_data *, unsigned int, unsigned long);
+
+/*
+ * Helper Functions.
+ */
+static inline void
+kbd_put_queue(struct tty_port *port, int ch)
+{
+	tty_insert_flip_char(port, ch, 0);
+	tty_schedule_flip(port);
+}
+
+static inline void
+kbd_puts_queue(struct tty_port *port, char *cp)
+{
+	while (*cp)
+		tty_insert_flip_char(port, *cp++, 0);
+	tty_schedule_flip(port);
+}
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
new file mode 100644
index 0000000..7bc616b
--- /dev/null
+++ b/drivers/s390/char/monreader.c
@@ -0,0 +1,652 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Character device driver for reading z/VM *MONITOR service records.
+ *
+ * Copyright IBM Corp. 2004, 2009
+ *
+ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "monreader"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/poll.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <net/iucv/iucv.h>
+#include <linux/uaccess.h>
+#include <asm/ebcdic.h>
+#include <asm/extmem.h>
+
+
+#define MON_COLLECT_SAMPLE 0x80
+#define MON_COLLECT_EVENT  0x40
+#define MON_SERVICE	   "*MONITOR"
+#define MON_IN_USE	   0x01
+#define MON_MSGLIM	   255
+
+static char mon_dcss_name[9] = "MONDCSS\0";
+
+struct mon_msg {
+	u32 pos;
+	u32 mca_offset;
+	struct iucv_message msg;
+	char msglim_reached;
+	char replied_msglim;
+};
+
+struct mon_private {
+	struct iucv_path *path;
+	struct mon_msg *msg_array[MON_MSGLIM];
+	unsigned int   write_index;
+	unsigned int   read_index;
+	atomic_t msglim_count;
+	atomic_t read_ready;
+	atomic_t iucv_connected;
+	atomic_t iucv_severed;
+};
+
+static unsigned long mon_in_use = 0;
+
+static unsigned long mon_dcss_start;
+static unsigned long mon_dcss_end;
+
+static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue);
+static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue);
+
+static u8 user_data_connect[16] = {
+	/* Version code, must be 0x01 for shared mode */
+	0x01,
+	/* what to collect */
+	MON_COLLECT_SAMPLE | MON_COLLECT_EVENT,
+	/* DCSS name in EBCDIC, 8 bytes padded with blanks */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+};
+
+static u8 user_data_sever[16] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+};
+
+static struct device *monreader_device;
+
+/******************************************************************************
+ *                             helper functions                               *
+ *****************************************************************************/
+/*
+ * Create the 8 bytes EBCDIC DCSS segment name from
+ * an ASCII name, incl. padding
+ */
+static void dcss_mkname(char *ascii_name, char *ebcdic_name)
+{
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		if (ascii_name[i] == '\0')
+			break;
+		ebcdic_name[i] = toupper(ascii_name[i]);
+	}
+	for (; i < 8; i++)
+		ebcdic_name[i] = ' ';
+	ASCEBC(ebcdic_name, 8);
+}
+
+static inline unsigned long mon_mca_start(struct mon_msg *monmsg)
+{
+	return *(u32 *) &monmsg->msg.rmmsg;
+}
+
+static inline unsigned long mon_mca_end(struct mon_msg *monmsg)
+{
+	return *(u32 *) &monmsg->msg.rmmsg[4];
+}
+
+static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index)
+{
+	return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index);
+}
+
+static inline u32 mon_mca_size(struct mon_msg *monmsg)
+{
+	return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1;
+}
+
+static inline u32 mon_rec_start(struct mon_msg *monmsg)
+{
+	return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4));
+}
+
+static inline u32 mon_rec_end(struct mon_msg *monmsg)
+{
+	return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
+}
+
+static int mon_check_mca(struct mon_msg *monmsg)
+{
+	if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
+	    (mon_rec_start(monmsg) < mon_dcss_start) ||
+	    (mon_rec_end(monmsg) > mon_dcss_end) ||
+	    (mon_mca_type(monmsg, 0) == 0) ||
+	    (mon_mca_size(monmsg) % 12 != 0) ||
+	    (mon_mca_end(monmsg) <= mon_mca_start(monmsg)) ||
+	    (mon_mca_end(monmsg) > mon_dcss_end) ||
+	    (mon_mca_start(monmsg) < mon_dcss_start) ||
+	    ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0)))
+		return -EINVAL;
+	return 0;
+}
+
+static int mon_send_reply(struct mon_msg *monmsg,
+			  struct mon_private *monpriv)
+{
+	int rc;
+
+	rc = iucv_message_reply(monpriv->path, &monmsg->msg,
+				IUCV_IPRMDATA, NULL, 0);
+	atomic_dec(&monpriv->msglim_count);
+	if (likely(!monmsg->msglim_reached)) {
+		monmsg->pos = 0;
+		monmsg->mca_offset = 0;
+		monpriv->read_index = (monpriv->read_index + 1) %
+				      MON_MSGLIM;
+		atomic_dec(&monpriv->read_ready);
+	} else
+		monmsg->replied_msglim = 1;
+	if (rc) {
+		pr_err("Reading monitor data failed with rc=%i\n", rc);
+		return -EIO;
+	}
+	return 0;
+}
+
+static void mon_free_mem(struct mon_private *monpriv)
+{
+	int i;
+
+	for (i = 0; i < MON_MSGLIM; i++)
+		kfree(monpriv->msg_array[i]);
+	kfree(monpriv);
+}
+
+static struct mon_private *mon_alloc_mem(void)
+{
+	int i;
+	struct mon_private *monpriv;
+
+	monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
+	if (!monpriv)
+		return NULL;
+	for (i = 0; i < MON_MSGLIM; i++) {
+		monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg),
+						    GFP_KERNEL);
+		if (!monpriv->msg_array[i]) {
+			mon_free_mem(monpriv);
+			return NULL;
+		}
+	}
+	return monpriv;
+}
+
+static inline void mon_next_mca(struct mon_msg *monmsg)
+{
+	if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
+		return;
+	monmsg->mca_offset += 12;
+	monmsg->pos = 0;
+}
+
+static struct mon_msg *mon_next_message(struct mon_private *monpriv)
+{
+	struct mon_msg *monmsg;
+
+	if (!atomic_read(&monpriv->read_ready))
+		return NULL;
+	monmsg = monpriv->msg_array[monpriv->read_index];
+	if (unlikely(monmsg->replied_msglim)) {
+		monmsg->replied_msglim = 0;
+		monmsg->msglim_reached = 0;
+		monmsg->pos = 0;
+		monmsg->mca_offset = 0;
+		monpriv->read_index = (monpriv->read_index + 1) %
+				      MON_MSGLIM;
+		atomic_dec(&monpriv->read_ready);
+		return ERR_PTR(-EOVERFLOW);
+	}
+	return monmsg;
+}
+
+
+/******************************************************************************
+ *                               IUCV handler                                 *
+ *****************************************************************************/
+static void mon_iucv_path_complete(struct iucv_path *path, u8 *ipuser)
+{
+	struct mon_private *monpriv = path->private;
+
+	atomic_set(&monpriv->iucv_connected, 1);
+	wake_up(&mon_conn_wait_queue);
+}
+
+static void mon_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
+{
+	struct mon_private *monpriv = path->private;
+
+	pr_err("z/VM *MONITOR system service disconnected with rc=%i\n",
+	       ipuser[0]);
+	iucv_path_sever(path, NULL);
+	atomic_set(&monpriv->iucv_severed, 1);
+	wake_up(&mon_conn_wait_queue);
+	wake_up_interruptible(&mon_read_wait_queue);
+}
+
+static void mon_iucv_message_pending(struct iucv_path *path,
+				     struct iucv_message *msg)
+{
+	struct mon_private *monpriv = path->private;
+
+	memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
+	       msg, sizeof(*msg));
+	if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
+		pr_warn("The read queue for monitor data is full\n");
+		monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
+	}
+	monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
+	atomic_inc(&monpriv->read_ready);
+	wake_up_interruptible(&mon_read_wait_queue);
+}
+
+static struct iucv_handler monreader_iucv_handler = {
+	.path_complete	 = mon_iucv_path_complete,
+	.path_severed	 = mon_iucv_path_severed,
+	.message_pending = mon_iucv_message_pending,
+};
+
+/******************************************************************************
+ *                               file operations                              *
+ *****************************************************************************/
+static int mon_open(struct inode *inode, struct file *filp)
+{
+	struct mon_private *monpriv;
+	int rc;
+
+	/*
+	 * only one user allowed
+	 */
+	rc = -EBUSY;
+	if (test_and_set_bit(MON_IN_USE, &mon_in_use))
+		goto out;
+
+	rc = -ENOMEM;
+	monpriv = mon_alloc_mem();
+	if (!monpriv)
+		goto out_use;
+
+	/*
+	 * Connect to *MONITOR service
+	 */
+	monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
+	if (!monpriv->path)
+		goto out_priv;
+	rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
+			       MON_SERVICE, NULL, user_data_connect, monpriv);
+	if (rc) {
+		pr_err("Connecting to the z/VM *MONITOR system service "
+		       "failed with rc=%i\n", rc);
+		rc = -EIO;
+		goto out_path;
+	}
+	/*
+	 * Wait for connection confirmation
+	 */
+	wait_event(mon_conn_wait_queue,
+		   atomic_read(&monpriv->iucv_connected) ||
+		   atomic_read(&monpriv->iucv_severed));
+	if (atomic_read(&monpriv->iucv_severed)) {
+		atomic_set(&monpriv->iucv_severed, 0);
+		atomic_set(&monpriv->iucv_connected, 0);
+		rc = -EIO;
+		goto out_path;
+	}
+	filp->private_data = monpriv;
+	dev_set_drvdata(monreader_device, monpriv);
+	return nonseekable_open(inode, filp);
+
+out_path:
+	iucv_path_free(monpriv->path);
+out_priv:
+	mon_free_mem(monpriv);
+out_use:
+	clear_bit(MON_IN_USE, &mon_in_use);
+out:
+	return rc;
+}
+
+static int mon_close(struct inode *inode, struct file *filp)
+{
+	int rc, i;
+	struct mon_private *monpriv = filp->private_data;
+
+	/*
+	 * Close IUCV connection and unregister
+	 */
+	if (monpriv->path) {
+		rc = iucv_path_sever(monpriv->path, user_data_sever);
+		if (rc)
+			pr_warn("Disconnecting the z/VM *MONITOR system service failed with rc=%i\n",
+				rc);
+		iucv_path_free(monpriv->path);
+	}
+
+	atomic_set(&monpriv->iucv_severed, 0);
+	atomic_set(&monpriv->iucv_connected, 0);
+	atomic_set(&monpriv->read_ready, 0);
+	atomic_set(&monpriv->msglim_count, 0);
+	monpriv->write_index  = 0;
+	monpriv->read_index   = 0;
+	dev_set_drvdata(monreader_device, NULL);
+
+	for (i = 0; i < MON_MSGLIM; i++)
+		kfree(monpriv->msg_array[i]);
+	kfree(monpriv);
+	clear_bit(MON_IN_USE, &mon_in_use);
+	return 0;
+}
+
+static ssize_t mon_read(struct file *filp, char __user *data,
+			size_t count, loff_t *ppos)
+{
+	struct mon_private *monpriv = filp->private_data;
+	struct mon_msg *monmsg;
+	int ret;
+	u32 mce_start;
+
+	monmsg = mon_next_message(monpriv);
+	if (IS_ERR(monmsg))
+		return PTR_ERR(monmsg);
+
+	if (!monmsg) {
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+		ret = wait_event_interruptible(mon_read_wait_queue,
+					atomic_read(&monpriv->read_ready) ||
+					atomic_read(&monpriv->iucv_severed));
+		if (ret)
+			return ret;
+		if (unlikely(atomic_read(&monpriv->iucv_severed)))
+			return -EIO;
+		monmsg = monpriv->msg_array[monpriv->read_index];
+	}
+
+	if (!monmsg->pos)
+		monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset;
+	if (mon_check_mca(monmsg))
+		goto reply;
+
+	/* read monitor control element (12 bytes) first */
+	mce_start = mon_mca_start(monmsg) + monmsg->mca_offset;
+	if ((monmsg->pos >= mce_start) && (monmsg->pos < mce_start + 12)) {
+		count = min(count, (size_t) mce_start + 12 - monmsg->pos);
+		ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
+				   count);
+		if (ret)
+			return -EFAULT;
+		monmsg->pos += count;
+		if (monmsg->pos == mce_start + 12)
+			monmsg->pos = mon_rec_start(monmsg);
+		goto out_copy;
+	}
+
+	/* read records */
+	if (monmsg->pos <= mon_rec_end(monmsg)) {
+		count = min(count, (size_t) mon_rec_end(monmsg) - monmsg->pos
+					    + 1);
+		ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
+				   count);
+		if (ret)
+			return -EFAULT;
+		monmsg->pos += count;
+		if (monmsg->pos > mon_rec_end(monmsg))
+			mon_next_mca(monmsg);
+		goto out_copy;
+	}
+reply:
+	ret = mon_send_reply(monmsg, monpriv);
+	return ret;
+
+out_copy:
+	*ppos += count;
+	return count;
+}
+
+static __poll_t mon_poll(struct file *filp, struct poll_table_struct *p)
+{
+	struct mon_private *monpriv = filp->private_data;
+
+	poll_wait(filp, &mon_read_wait_queue, p);
+	if (unlikely(atomic_read(&monpriv->iucv_severed)))
+		return EPOLLERR;
+	if (atomic_read(&monpriv->read_ready))
+		return EPOLLIN | EPOLLRDNORM;
+	return 0;
+}
+
+static const struct file_operations mon_fops = {
+	.owner   = THIS_MODULE,
+	.open    = &mon_open,
+	.release = &mon_close,
+	.read    = &mon_read,
+	.poll    = &mon_poll,
+	.llseek  = noop_llseek,
+};
+
+static struct miscdevice mon_dev = {
+	.name       = "monreader",
+	.fops       = &mon_fops,
+	.minor      = MISC_DYNAMIC_MINOR,
+};
+
+
+/******************************************************************************
+ *				suspend / resume			      *
+ *****************************************************************************/
+static int monreader_freeze(struct device *dev)
+{
+	struct mon_private *monpriv = dev_get_drvdata(dev);
+	int rc;
+
+	if (!monpriv)
+		return 0;
+	if (monpriv->path) {
+		rc = iucv_path_sever(monpriv->path, user_data_sever);
+		if (rc)
+			pr_warn("Disconnecting the z/VM *MONITOR system service failed with rc=%i\n",
+				rc);
+		iucv_path_free(monpriv->path);
+	}
+	atomic_set(&monpriv->iucv_severed, 0);
+	atomic_set(&monpriv->iucv_connected, 0);
+	atomic_set(&monpriv->read_ready, 0);
+	atomic_set(&monpriv->msglim_count, 0);
+	monpriv->write_index  = 0;
+	monpriv->read_index   = 0;
+	monpriv->path = NULL;
+	return 0;
+}
+
+static int monreader_thaw(struct device *dev)
+{
+	struct mon_private *monpriv = dev_get_drvdata(dev);
+	int rc;
+
+	if (!monpriv)
+		return 0;
+	rc = -ENOMEM;
+	monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
+	if (!monpriv->path)
+		goto out;
+	rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
+			       MON_SERVICE, NULL, user_data_connect, monpriv);
+	if (rc) {
+		pr_err("Connecting to the z/VM *MONITOR system service "
+		       "failed with rc=%i\n", rc);
+		goto out_path;
+	}
+	wait_event(mon_conn_wait_queue,
+		   atomic_read(&monpriv->iucv_connected) ||
+		   atomic_read(&monpriv->iucv_severed));
+	if (atomic_read(&monpriv->iucv_severed))
+		goto out_path;
+	return 0;
+out_path:
+	rc = -EIO;
+	iucv_path_free(monpriv->path);
+	monpriv->path = NULL;
+out:
+	atomic_set(&monpriv->iucv_severed, 1);
+	return rc;
+}
+
+static int monreader_restore(struct device *dev)
+{
+	int rc;
+
+	segment_unload(mon_dcss_name);
+	rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
+			  &mon_dcss_start, &mon_dcss_end);
+	if (rc < 0) {
+		segment_warning(rc, mon_dcss_name);
+		panic("fatal monreader resume error: no monitor dcss\n");
+	}
+	return monreader_thaw(dev);
+}
+
+static const struct dev_pm_ops monreader_pm_ops = {
+	.freeze  = monreader_freeze,
+	.thaw	 = monreader_thaw,
+	.restore = monreader_restore,
+};
+
+static struct device_driver monreader_driver = {
+	.name = "monreader",
+	.bus  = &iucv_bus,
+	.pm   = &monreader_pm_ops,
+};
+
+
+/******************************************************************************
+ *                              module init/exit                              *
+ *****************************************************************************/
+static int __init mon_init(void)
+{
+	int rc;
+
+	if (!MACHINE_IS_VM) {
+		pr_err("The z/VM *MONITOR record device driver cannot be "
+		       "loaded without z/VM\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * Register with IUCV and connect to *MONITOR service
+	 */
+	rc = iucv_register(&monreader_iucv_handler, 1);
+	if (rc) {
+		pr_err("The z/VM *MONITOR record device driver failed to "
+		       "register with IUCV\n");
+		return rc;
+	}
+
+	rc = driver_register(&monreader_driver);
+	if (rc)
+		goto out_iucv;
+	monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (!monreader_device) {
+		rc = -ENOMEM;
+		goto out_driver;
+	}
+
+	dev_set_name(monreader_device, "monreader-dev");
+	monreader_device->bus = &iucv_bus;
+	monreader_device->parent = iucv_root;
+	monreader_device->driver = &monreader_driver;
+	monreader_device->release = (void (*)(struct device *))kfree;
+	rc = device_register(monreader_device);
+	if (rc) {
+		put_device(monreader_device);
+		goto out_driver;
+	}
+
+	rc = segment_type(mon_dcss_name);
+	if (rc < 0) {
+		segment_warning(rc, mon_dcss_name);
+		goto out_device;
+	}
+	if (rc != SEG_TYPE_SC) {
+		pr_err("The specified *MONITOR DCSS %s does not have the "
+		       "required type SC\n", mon_dcss_name);
+		rc = -EINVAL;
+		goto out_device;
+	}
+
+	rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
+			  &mon_dcss_start, &mon_dcss_end);
+	if (rc < 0) {
+		segment_warning(rc, mon_dcss_name);
+		rc = -EINVAL;
+		goto out_device;
+	}
+	dcss_mkname(mon_dcss_name, &user_data_connect[8]);
+
+	/*
+	 * misc_register() has to be the last action in module_init(), because
+	 * file operations will be available right after this.
+	 */
+	rc = misc_register(&mon_dev);
+	if (rc < 0 )
+		goto out;
+	return 0;
+
+out:
+	segment_unload(mon_dcss_name);
+out_device:
+	device_unregister(monreader_device);
+out_driver:
+	driver_unregister(&monreader_driver);
+out_iucv:
+	iucv_unregister(&monreader_iucv_handler, 1);
+	return rc;
+}
+
+static void __exit mon_exit(void)
+{
+	segment_unload(mon_dcss_name);
+	misc_deregister(&mon_dev);
+	device_unregister(monreader_device);
+	driver_unregister(&monreader_driver);
+	iucv_unregister(&monreader_iucv_handler, 1);
+	return;
+}
+
+
+module_init(mon_init);
+module_exit(mon_exit);
+
+module_param_string(mondcss, mon_dcss_name, 9, 0444);
+MODULE_PARM_DESC(mondcss, "Name of DCSS segment to be used for *MONITOR "
+		 "service, max. 8 chars. Default is MONDCSS");
+
+MODULE_AUTHOR("Gerald Schaefer <geraldsc@de.ibm.com>");
+MODULE_DESCRIPTION("Character device driver for reading z/VM "
+		   "monitor service records.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
new file mode 100644
index 0000000..4f1a69c
--- /dev/null
+++ b/drivers/s390/char/monwriter.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Character device driver for writing z/VM *MONITOR service records.
+ *
+ * Copyright IBM Corp. 2006, 2009
+ *
+ * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
+ */
+
+#define KMSG_COMPONENT "monwriter"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/ctype.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/appldata.h>
+#include <asm/monwriter.h>
+
+#define MONWRITE_MAX_DATALEN	4010
+
+static int mon_max_bufs = 255;
+static int mon_buf_count;
+
+struct mon_buf {
+	struct list_head list;
+	struct monwrite_hdr hdr;
+	int diag_done;
+	char *data;
+};
+
+static LIST_HEAD(mon_priv_list);
+
+struct mon_private {
+	struct list_head priv_list;
+	struct list_head list;
+	struct monwrite_hdr hdr;
+	size_t hdr_to_read;
+	size_t data_to_read;
+	struct mon_buf *current_buf;
+	struct mutex thread_mutex;
+};
+
+/*
+ * helper functions
+ */
+
+static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
+{
+	struct appldata_product_id id;
+	int rc;
+
+	memcpy(id.prod_nr, "LNXAPPL", 7);
+	id.prod_fn = myhdr->applid;
+	id.record_nr = myhdr->record_num;
+	id.version_nr = myhdr->version;
+	id.release_nr = myhdr->release;
+	id.mod_lvl = myhdr->mod_level;
+	rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
+	if (rc <= 0)
+		return rc;
+	pr_err("Writing monitor data failed with rc=%i\n", rc);
+	if (rc == 5)
+		return -EPERM;
+	return -EINVAL;
+}
+
+static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
+					 struct monwrite_hdr *monhdr)
+{
+	struct mon_buf *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &monpriv->list, list)
+		if ((entry->hdr.mon_function == monhdr->mon_function ||
+		     monhdr->mon_function == MONWRITE_STOP_INTERVAL) &&
+		    entry->hdr.applid == monhdr->applid &&
+		    entry->hdr.record_num == monhdr->record_num &&
+		    entry->hdr.version == monhdr->version &&
+		    entry->hdr.release == monhdr->release &&
+		    entry->hdr.mod_level == monhdr->mod_level)
+			return entry;
+
+	return NULL;
+}
+
+static int monwrite_new_hdr(struct mon_private *monpriv)
+{
+	struct monwrite_hdr *monhdr = &monpriv->hdr;
+	struct mon_buf *monbuf;
+	int rc = 0;
+
+	if (monhdr->datalen > MONWRITE_MAX_DATALEN ||
+	    monhdr->mon_function > MONWRITE_START_CONFIG ||
+	    monhdr->hdrlen != sizeof(struct monwrite_hdr))
+		return -EINVAL;
+	monbuf = NULL;
+	if (monhdr->mon_function != MONWRITE_GEN_EVENT)
+		monbuf = monwrite_find_hdr(monpriv, monhdr);
+	if (monbuf) {
+		if (monhdr->mon_function == MONWRITE_STOP_INTERVAL) {
+			monhdr->datalen = monbuf->hdr.datalen;
+			rc = monwrite_diag(monhdr, monbuf->data,
+					   APPLDATA_STOP_REC);
+			list_del(&monbuf->list);
+			mon_buf_count--;
+			kfree(monbuf->data);
+			kfree(monbuf);
+			monbuf = NULL;
+		}
+	} else if (monhdr->mon_function != MONWRITE_STOP_INTERVAL) {
+		if (mon_buf_count >= mon_max_bufs)
+			return -ENOSPC;
+		monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL);
+		if (!monbuf)
+			return -ENOMEM;
+		monbuf->data = kzalloc(monhdr->datalen,
+				       GFP_KERNEL | GFP_DMA);
+		if (!monbuf->data) {
+			kfree(monbuf);
+			return -ENOMEM;
+		}
+		monbuf->hdr = *monhdr;
+		list_add_tail(&monbuf->list, &monpriv->list);
+		if (monhdr->mon_function != MONWRITE_GEN_EVENT)
+			mon_buf_count++;
+	}
+	monpriv->current_buf = monbuf;
+	return rc;
+}
+
+static int monwrite_new_data(struct mon_private *monpriv)
+{
+	struct monwrite_hdr *monhdr = &monpriv->hdr;
+	struct mon_buf *monbuf = monpriv->current_buf;
+	int rc = 0;
+
+	switch (monhdr->mon_function) {
+	case MONWRITE_START_INTERVAL:
+		if (!monbuf->diag_done) {
+			rc = monwrite_diag(monhdr, monbuf->data,
+					   APPLDATA_START_INTERVAL_REC);
+			monbuf->diag_done = 1;
+		}
+		break;
+	case MONWRITE_START_CONFIG:
+		if (!monbuf->diag_done) {
+			rc = monwrite_diag(monhdr, monbuf->data,
+					   APPLDATA_START_CONFIG_REC);
+			monbuf->diag_done = 1;
+		}
+		break;
+	case MONWRITE_GEN_EVENT:
+		rc = monwrite_diag(monhdr, monbuf->data,
+				   APPLDATA_GEN_EVENT_REC);
+		list_del(&monpriv->current_buf->list);
+		kfree(monpriv->current_buf->data);
+		kfree(monpriv->current_buf);
+		monpriv->current_buf = NULL;
+		break;
+	default:
+		/* monhdr->mon_function is checked in monwrite_new_hdr */
+		BUG();
+	}
+	return rc;
+}
+
+/*
+ * file operations
+ */
+
+static int monwrite_open(struct inode *inode, struct file *filp)
+{
+	struct mon_private *monpriv;
+
+	monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
+	if (!monpriv)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&monpriv->list);
+	monpriv->hdr_to_read = sizeof(monpriv->hdr);
+	mutex_init(&monpriv->thread_mutex);
+	filp->private_data = monpriv;
+	list_add_tail(&monpriv->priv_list, &mon_priv_list);
+	return nonseekable_open(inode, filp);
+}
+
+static int monwrite_close(struct inode *inode, struct file *filp)
+{
+	struct mon_private *monpriv = filp->private_data;
+	struct mon_buf *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &monpriv->list, list) {
+		if (entry->hdr.mon_function != MONWRITE_GEN_EVENT)
+			monwrite_diag(&entry->hdr, entry->data,
+				      APPLDATA_STOP_REC);
+		mon_buf_count--;
+		list_del(&entry->list);
+		kfree(entry->data);
+		kfree(entry);
+	}
+	list_del(&monpriv->priv_list);
+	kfree(monpriv);
+	return 0;
+}
+
+static ssize_t monwrite_write(struct file *filp, const char __user *data,
+			      size_t count, loff_t *ppos)
+{
+	struct mon_private *monpriv = filp->private_data;
+	size_t len, written;
+	void *to;
+	int rc;
+
+	mutex_lock(&monpriv->thread_mutex);
+	for (written = 0; written < count; ) {
+		if (monpriv->hdr_to_read) {
+			len = min(count - written, monpriv->hdr_to_read);
+			to = (char *) &monpriv->hdr +
+				sizeof(monpriv->hdr) - monpriv->hdr_to_read;
+			if (copy_from_user(to, data + written, len)) {
+				rc = -EFAULT;
+				goto out_error;
+			}
+			monpriv->hdr_to_read -= len;
+			written += len;
+			if (monpriv->hdr_to_read > 0)
+				continue;
+			rc = monwrite_new_hdr(monpriv);
+			if (rc)
+				goto out_error;
+			monpriv->data_to_read = monpriv->current_buf ?
+				monpriv->current_buf->hdr.datalen : 0;
+		}
+
+		if (monpriv->data_to_read) {
+			len = min(count - written, monpriv->data_to_read);
+			to = monpriv->current_buf->data +
+				monpriv->hdr.datalen - monpriv->data_to_read;
+			if (copy_from_user(to, data + written, len)) {
+				rc = -EFAULT;
+				goto out_error;
+			}
+			monpriv->data_to_read -= len;
+			written += len;
+			if (monpriv->data_to_read > 0)
+				continue;
+			rc = monwrite_new_data(monpriv);
+			if (rc)
+				goto out_error;
+		}
+		monpriv->hdr_to_read = sizeof(monpriv->hdr);
+	}
+	mutex_unlock(&monpriv->thread_mutex);
+	return written;
+
+out_error:
+	monpriv->data_to_read = 0;
+	monpriv->hdr_to_read = sizeof(struct monwrite_hdr);
+	mutex_unlock(&monpriv->thread_mutex);
+	return rc;
+}
+
+static const struct file_operations monwrite_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = &monwrite_open,
+	.release = &monwrite_close,
+	.write	 = &monwrite_write,
+	.llseek  = noop_llseek,
+};
+
+static struct miscdevice mon_dev = {
+	.name	= "monwriter",
+	.fops	= &monwrite_fops,
+	.minor	= MISC_DYNAMIC_MINOR,
+};
+
+/*
+ * suspend/resume
+ */
+
+static int monwriter_freeze(struct device *dev)
+{
+	struct mon_private *monpriv;
+	struct mon_buf *monbuf;
+
+	list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
+		list_for_each_entry(monbuf, &monpriv->list, list) {
+			if (monbuf->hdr.mon_function != MONWRITE_GEN_EVENT)
+				monwrite_diag(&monbuf->hdr, monbuf->data,
+					      APPLDATA_STOP_REC);
+		}
+	}
+	return 0;
+}
+
+static int monwriter_restore(struct device *dev)
+{
+	struct mon_private *monpriv;
+	struct mon_buf *monbuf;
+
+	list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
+		list_for_each_entry(monbuf, &monpriv->list, list) {
+			if (monbuf->hdr.mon_function == MONWRITE_START_INTERVAL)
+				monwrite_diag(&monbuf->hdr, monbuf->data,
+					      APPLDATA_START_INTERVAL_REC);
+			if (monbuf->hdr.mon_function == MONWRITE_START_CONFIG)
+				monwrite_diag(&monbuf->hdr, monbuf->data,
+					      APPLDATA_START_CONFIG_REC);
+		}
+	}
+	return 0;
+}
+
+static int monwriter_thaw(struct device *dev)
+{
+	return monwriter_restore(dev);
+}
+
+static const struct dev_pm_ops monwriter_pm_ops = {
+	.freeze		= monwriter_freeze,
+	.thaw		= monwriter_thaw,
+	.restore	= monwriter_restore,
+};
+
+static struct platform_driver monwriter_pdrv = {
+	.driver = {
+		.name	= "monwriter",
+		.pm	= &monwriter_pm_ops,
+	},
+};
+
+static struct platform_device *monwriter_pdev;
+
+/*
+ * module init/exit
+ */
+
+static int __init mon_init(void)
+{
+	int rc;
+
+	if (!MACHINE_IS_VM)
+		return -ENODEV;
+
+	rc = platform_driver_register(&monwriter_pdrv);
+	if (rc)
+		return rc;
+
+	monwriter_pdev = platform_device_register_simple("monwriter", -1, NULL,
+							0);
+	if (IS_ERR(monwriter_pdev)) {
+		rc = PTR_ERR(monwriter_pdev);
+		goto out_driver;
+	}
+
+	/*
+	 * misc_register() has to be the last action in module_init(), because
+	 * file operations will be available right after this.
+	 */
+	rc = misc_register(&mon_dev);
+	if (rc)
+		goto out_device;
+	return 0;
+
+out_device:
+	platform_device_unregister(monwriter_pdev);
+out_driver:
+	platform_driver_unregister(&monwriter_pdrv);
+	return rc;
+}
+
+static void __exit mon_exit(void)
+{
+	misc_deregister(&mon_dev);
+	platform_device_unregister(monwriter_pdev);
+	platform_driver_unregister(&monwriter_pdrv);
+}
+
+module_init(mon_init);
+module_exit(mon_exit);
+
+module_param_named(max_bufs, mon_max_bufs, int, 0644);
+MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers "
+		 "that can be active at one time");
+
+MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>");
+MODULE_DESCRIPTION("Character device driver for writing z/VM "
+		   "APPLDATA monitor records.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
new file mode 100644
index 0000000..f8cd293
--- /dev/null
+++ b/drivers/s390/char/raw3270.c
@@ -0,0 +1,1357 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IBM/3270 Driver - core functions.
+ *
+ * Author(s):
+ *   Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *   Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *     Copyright IBM Corp. 2003, 2009
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/ebcdic.h>
+#include <asm/diag.h>
+
+#include "raw3270.h"
+
+#include <linux/major.h>
+#include <linux/kdev_t.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+struct class *class3270;
+
+/* The main 3270 data structure. */
+struct raw3270 {
+	struct list_head list;
+	struct ccw_device *cdev;
+	int minor;
+
+	short model, rows, cols;
+	unsigned int state;
+	unsigned long flags;
+
+	struct list_head req_queue;	/* Request queue. */
+	struct list_head view_list;	/* List of available views. */
+	struct raw3270_view *view;	/* Active view. */
+
+	struct timer_list timer;	/* Device timer. */
+
+	unsigned char *ascebc;		/* ascii -> ebcdic table */
+
+	struct raw3270_view init_view;
+	struct raw3270_request init_reset;
+	struct raw3270_request init_readpart;
+	struct raw3270_request init_readmod;
+	unsigned char init_data[256];
+};
+
+/* raw3270->state */
+#define RAW3270_STATE_INIT	0	/* Initial state */
+#define RAW3270_STATE_RESET	1	/* Reset command is pending */
+#define RAW3270_STATE_W4ATTN	2	/* Wait for attention interrupt */
+#define RAW3270_STATE_READMOD	3	/* Read partition is pending */
+#define RAW3270_STATE_READY	4	/* Device is usable by views */
+
+/* raw3270->flags */
+#define RAW3270_FLAGS_14BITADDR	0	/* 14-bit buffer addresses */
+#define RAW3270_FLAGS_BUSY	1	/* Device busy, leave it alone */
+#define RAW3270_FLAGS_CONSOLE	2	/* Device is the console. */
+#define RAW3270_FLAGS_FROZEN	3	/* set if 3270 is frozen for suspend */
+
+/* Semaphore to protect global data of raw3270 (devices, views, etc). */
+static DEFINE_MUTEX(raw3270_mutex);
+
+/* List of 3270 devices. */
+static LIST_HEAD(raw3270_devices);
+
+/*
+ * Flag to indicate if the driver has been registered. Some operations
+ * like waiting for the end of i/o need to be done differently as long
+ * as the kernel is still starting up (console support).
+ */
+static int raw3270_registered;
+
+/* Module parameters */
+static bool tubxcorrect;
+module_param(tubxcorrect, bool, 0);
+
+/*
+ * Wait queue for device init/delete, view delete.
+ */
+DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
+
+static void __raw3270_disconnect(struct raw3270 *rp);
+
+/*
+ * Encode array for 12 bit 3270 addresses.
+ */
+static unsigned char raw3270_ebcgraf[64] =	{
+	0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+	0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+	0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+	0xd8, 0xd9, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+	0x60, 0x61, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+	0xe8, 0xe9, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+	0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f
+};
+
+static inline int raw3270_state_ready(struct raw3270 *rp)
+{
+	return rp->state == RAW3270_STATE_READY;
+}
+
+static inline int raw3270_state_final(struct raw3270 *rp)
+{
+	return rp->state == RAW3270_STATE_INIT ||
+		rp->state == RAW3270_STATE_READY;
+}
+
+void
+raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr)
+{
+	if (test_bit(RAW3270_FLAGS_14BITADDR, &rp->flags)) {
+		cp[0] = (addr >> 8) & 0x3f;
+		cp[1] = addr & 0xff;
+	} else {
+		cp[0] = raw3270_ebcgraf[(addr >> 6) & 0x3f];
+		cp[1] = raw3270_ebcgraf[addr & 0x3f];
+	}
+}
+
+/*
+ * Allocate a new 3270 ccw request
+ */
+struct raw3270_request *
+raw3270_request_alloc(size_t size)
+{
+	struct raw3270_request *rq;
+
+	/* Allocate request structure */
+	rq = kzalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA);
+	if (!rq)
+		return ERR_PTR(-ENOMEM);
+
+	/* alloc output buffer. */
+	if (size > 0) {
+		rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
+		if (!rq->buffer) {
+			kfree(rq);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+	rq->size = size;
+	INIT_LIST_HEAD(&rq->list);
+
+	/*
+	 * Setup ccw.
+	 */
+	rq->ccw.cda = __pa(rq->buffer);
+	rq->ccw.flags = CCW_FLAG_SLI;
+
+	return rq;
+}
+
+/*
+ * Free 3270 ccw request
+ */
+void
+raw3270_request_free (struct raw3270_request *rq)
+{
+	kfree(rq->buffer);
+	kfree(rq);
+}
+
+/*
+ * Reset request to initial state.
+ */
+void
+raw3270_request_reset(struct raw3270_request *rq)
+{
+	BUG_ON(!list_empty(&rq->list));
+	rq->ccw.cmd_code = 0;
+	rq->ccw.count = 0;
+	rq->ccw.cda = __pa(rq->buffer);
+	rq->ccw.flags = CCW_FLAG_SLI;
+	rq->rescnt = 0;
+	rq->rc = 0;
+}
+
+/*
+ * Set command code to ccw of a request.
+ */
+void
+raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd)
+{
+	rq->ccw.cmd_code = cmd;
+}
+
+/*
+ * Add data fragment to output buffer.
+ */
+int
+raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size)
+{
+	if (size + rq->ccw.count > rq->size)
+		return -E2BIG;
+	memcpy(rq->buffer + rq->ccw.count, data, size);
+	rq->ccw.count += size;
+	return 0;
+}
+
+/*
+ * Set address/length pair to ccw of a request.
+ */
+void
+raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size)
+{
+	rq->ccw.cda = __pa(data);
+	rq->ccw.count = size;
+}
+
+/*
+ * Set idal buffer to ccw of a request.
+ */
+void
+raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
+{
+	rq->ccw.cda = __pa(ib->data);
+	rq->ccw.count = ib->size;
+	rq->ccw.flags |= CCW_FLAG_IDA;
+}
+
+/*
+ * Add the request to the request queue, try to start it if the
+ * 3270 device is idle. Return without waiting for end of i/o.
+ */
+static int
+__raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
+		struct raw3270_request *rq)
+{
+	rq->view = view;
+	raw3270_get_view(view);
+	if (list_empty(&rp->req_queue) &&
+	    !test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
+		/* No other requests are on the queue. Start this one. */
+		rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
+					       (unsigned long) rq, 0, 0);
+		if (rq->rc) {
+			raw3270_put_view(view);
+			return rq->rc;
+		}
+	}
+	list_add_tail(&rq->list, &rp->req_queue);
+	return 0;
+}
+
+int
+raw3270_view_active(struct raw3270_view *view)
+{
+	struct raw3270 *rp = view->dev;
+
+	return rp && rp->view == view &&
+		!test_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
+}
+
+int
+raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
+{
+	unsigned long flags;
+	struct raw3270 *rp;
+	int rc;
+
+	spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
+	rp = view->dev;
+	if (!rp || rp->view != view ||
+	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		rc = -EACCES;
+	else if (!raw3270_state_ready(rp))
+		rc = -EBUSY;
+	else
+		rc =  __raw3270_start(rp, view, rq);
+	spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
+	return rc;
+}
+
+int
+raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq)
+{
+	struct raw3270 *rp;
+	int rc;
+
+	rp = view->dev;
+	if (!rp || rp->view != view ||
+	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		rc = -EACCES;
+	else if (!raw3270_state_ready(rp))
+		rc = -EBUSY;
+	else
+		rc =  __raw3270_start(rp, view, rq);
+	return rc;
+}
+
+int
+raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq)
+{
+	struct raw3270 *rp;
+
+	rp = view->dev;
+	rq->view = view;
+	raw3270_get_view(view);
+	list_add_tail(&rq->list, &rp->req_queue);
+	return 0;
+}
+
+/*
+ * 3270 interrupt routine, called from the ccw_device layer
+ */
+static void
+raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+{
+	struct raw3270 *rp;
+	struct raw3270_view *view;
+	struct raw3270_request *rq;
+
+	rp = dev_get_drvdata(&cdev->dev);
+	if (!rp)
+		return;
+	rq = (struct raw3270_request *) intparm;
+	view = rq ? rq->view : rp->view;
+
+	if (!IS_ERR(irb)) {
+		/* Handle CE-DE-UE and subsequent UDE */
+		if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END)
+			clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
+		if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END |
+					    DEV_STAT_DEV_END |
+					    DEV_STAT_UNIT_EXCEP))
+			set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
+		/* Handle disconnected devices */
+		if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+		    (irb->ecw[0] & SNS0_INTERVENTION_REQ)) {
+			set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
+			if (rp->state > RAW3270_STATE_RESET)
+				__raw3270_disconnect(rp);
+		}
+		/* Call interrupt handler of the view */
+		if (view)
+			view->fn->intv(view, rq, irb);
+	}
+
+	if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags))
+		/* Device busy, do not start I/O */
+		return;
+
+	if (rq && !list_empty(&rq->list)) {
+		/* The request completed, remove from queue and do callback. */
+		list_del_init(&rq->list);
+		if (rq->callback)
+			rq->callback(rq, rq->callback_data);
+		/* Do put_device for get_device in raw3270_start. */
+		raw3270_put_view(view);
+	}
+
+	/*
+	 * Try to start each request on request queue until one is
+	 * started successful.
+	 */
+	while (!list_empty(&rp->req_queue)) {
+		rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
+		rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
+					  (unsigned long) rq, 0, 0);
+		if (rq->rc == 0)
+			break;
+		/* Start failed. Remove request and do callback. */
+		list_del_init(&rq->list);
+		if (rq->callback)
+			rq->callback(rq, rq->callback_data);
+		/* Do put_device for get_device in raw3270_start. */
+		raw3270_put_view(view);
+	}
+}
+
+/*
+ * To determine the size of the 3270 device we need to do:
+ * 1) send a 'read partition' data stream to the device
+ * 2) wait for the attn interrupt that precedes the query reply
+ * 3) do a read modified to get the query reply
+ * To make things worse we have to cope with intervention
+ * required (3270 device switched to 'stand-by') and command
+ * rejects (old devices that can't do 'read partition').
+ */
+struct raw3270_ua {	/* Query Reply structure for Usable Area */
+	struct {	/* Usable Area Query Reply Base */
+		short l;	/* Length of this structured field */
+		char  sfid;	/* 0x81 if Query Reply */
+		char  qcode;	/* 0x81 if Usable Area */
+		char  flags0;
+		char  flags1;
+		short w;	/* Width of usable area */
+		short h;	/* Heigth of usavle area */
+		char  units;	/* 0x00:in; 0x01:mm */
+		int   xr;
+		int   yr;
+		char  aw;
+		char  ah;
+		short buffsz;	/* Character buffer size, bytes */
+		char  xmin;
+		char  ymin;
+		char  xmax;
+		char  ymax;
+	} __attribute__ ((packed)) uab;
+	struct {	/* Alternate Usable Area Self-Defining Parameter */
+		char  l;	/* Length of this Self-Defining Parm */
+		char  sdpid;	/* 0x02 if Alternate Usable Area */
+		char  res;
+		char  auaid;	/* 0x01 is Id for the A U A */
+		short wauai;	/* Width of AUAi */
+		short hauai;	/* Height of AUAi */
+		char  auaunits;	/* 0x00:in, 0x01:mm */
+		int   auaxr;
+		int   auayr;
+		char  awauai;
+		char  ahauai;
+	} __attribute__ ((packed)) aua;
+} __attribute__ ((packed));
+
+static void
+raw3270_size_device_vm(struct raw3270 *rp)
+{
+	int rc, model;
+	struct ccw_dev_id dev_id;
+	struct diag210 diag_data;
+
+	ccw_device_get_id(rp->cdev, &dev_id);
+	diag_data.vrdcdvno = dev_id.devno;
+	diag_data.vrdclen = sizeof(struct diag210);
+	rc = diag210(&diag_data);
+	model = diag_data.vrdccrmd;
+	/* Use default model 2 if the size could not be detected */
+	if (rc || model < 2 || model > 5)
+		model = 2;
+	switch (model) {
+	case 2:
+		rp->model = model;
+		rp->rows = 24;
+		rp->cols = 80;
+		break;
+	case 3:
+		rp->model = model;
+		rp->rows = 32;
+		rp->cols = 80;
+		break;
+	case 4:
+		rp->model = model;
+		rp->rows = 43;
+		rp->cols = 80;
+		break;
+	case 5:
+		rp->model = model;
+		rp->rows = 27;
+		rp->cols = 132;
+		break;
+	}
+}
+
+static void
+raw3270_size_device(struct raw3270 *rp)
+{
+	struct raw3270_ua *uap;
+
+	/* Got a Query Reply */
+	uap = (struct raw3270_ua *) (rp->init_data + 1);
+	/* Paranoia check. */
+	if (rp->init_readmod.rc || rp->init_data[0] != 0x88 ||
+	    uap->uab.qcode != 0x81) {
+		/* Couldn't detect size. Use default model 2. */
+		rp->model = 2;
+		rp->rows = 24;
+		rp->cols = 80;
+		return;
+	}
+	/* Copy rows/columns of default Usable Area */
+	rp->rows = uap->uab.h;
+	rp->cols = uap->uab.w;
+	/* Check for 14 bit addressing */
+	if ((uap->uab.flags0 & 0x0d) == 0x01)
+		set_bit(RAW3270_FLAGS_14BITADDR, &rp->flags);
+	/* Check for Alternate Usable Area */
+	if (uap->uab.l == sizeof(struct raw3270_ua) &&
+	    uap->aua.sdpid == 0x02) {
+		rp->rows = uap->aua.hauai;
+		rp->cols = uap->aua.wauai;
+	}
+	/* Try to find a model. */
+	rp->model = 0;
+	if (rp->rows == 24 && rp->cols == 80)
+		rp->model = 2;
+	if (rp->rows == 32 && rp->cols == 80)
+		rp->model = 3;
+	if (rp->rows == 43 && rp->cols == 80)
+		rp->model = 4;
+	if (rp->rows == 27 && rp->cols == 132)
+		rp->model = 5;
+}
+
+static void
+raw3270_size_device_done(struct raw3270 *rp)
+{
+	struct raw3270_view *view;
+
+	rp->view = NULL;
+	rp->state = RAW3270_STATE_READY;
+	/* Notify views about new size */
+	list_for_each_entry(view, &rp->view_list, list)
+		if (view->fn->resize)
+			view->fn->resize(view, rp->model, rp->rows, rp->cols);
+	/* Setup processing done, now activate a view */
+	list_for_each_entry(view, &rp->view_list, list) {
+		rp->view = view;
+		if (view->fn->activate(view) == 0)
+			break;
+		rp->view = NULL;
+	}
+}
+
+static void
+raw3270_read_modified_cb(struct raw3270_request *rq, void *data)
+{
+	struct raw3270 *rp = rq->view->dev;
+
+	raw3270_size_device(rp);
+	raw3270_size_device_done(rp);
+}
+
+static void
+raw3270_read_modified(struct raw3270 *rp)
+{
+	if (rp->state != RAW3270_STATE_W4ATTN)
+		return;
+	/* Use 'read modified' to get the result of a read partition. */
+	memset(&rp->init_readmod, 0, sizeof(rp->init_readmod));
+	memset(&rp->init_data, 0, sizeof(rp->init_data));
+	rp->init_readmod.ccw.cmd_code = TC_READMOD;
+	rp->init_readmod.ccw.flags = CCW_FLAG_SLI;
+	rp->init_readmod.ccw.count = sizeof(rp->init_data);
+	rp->init_readmod.ccw.cda = (__u32) __pa(rp->init_data);
+	rp->init_readmod.callback = raw3270_read_modified_cb;
+	rp->state = RAW3270_STATE_READMOD;
+	raw3270_start_irq(&rp->init_view, &rp->init_readmod);
+}
+
+static void
+raw3270_writesf_readpart(struct raw3270 *rp)
+{
+	static const unsigned char wbuf[] =
+		{ 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
+
+	/* Store 'read partition' data stream to init_data */
+	memset(&rp->init_readpart, 0, sizeof(rp->init_readpart));
+	memset(&rp->init_data, 0, sizeof(rp->init_data));
+	memcpy(&rp->init_data, wbuf, sizeof(wbuf));
+	rp->init_readpart.ccw.cmd_code = TC_WRITESF;
+	rp->init_readpart.ccw.flags = CCW_FLAG_SLI;
+	rp->init_readpart.ccw.count = sizeof(wbuf);
+	rp->init_readpart.ccw.cda = (__u32) __pa(&rp->init_data);
+	rp->state = RAW3270_STATE_W4ATTN;
+	raw3270_start_irq(&rp->init_view, &rp->init_readpart);
+}
+
+/*
+ * Device reset
+ */
+static void
+raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
+{
+	struct raw3270 *rp = rq->view->dev;
+
+	if (rp->state != RAW3270_STATE_RESET)
+		return;
+	if (rq->rc) {
+		/* Reset command failed. */
+		rp->state = RAW3270_STATE_INIT;
+	} else if (MACHINE_IS_VM) {
+		raw3270_size_device_vm(rp);
+		raw3270_size_device_done(rp);
+	} else
+		raw3270_writesf_readpart(rp);
+	memset(&rp->init_reset, 0, sizeof(rp->init_reset));
+}
+
+static int
+__raw3270_reset_device(struct raw3270 *rp)
+{
+	int rc;
+
+	/* Check if reset is already pending */
+	if (rp->init_reset.view)
+		return -EBUSY;
+	/* Store reset data stream to init_data/init_reset */
+	rp->init_data[0] = TW_KR;
+	rp->init_reset.ccw.cmd_code = TC_EWRITEA;
+	rp->init_reset.ccw.flags = CCW_FLAG_SLI;
+	rp->init_reset.ccw.count = 1;
+	rp->init_reset.ccw.cda = (__u32) __pa(rp->init_data);
+	rp->init_reset.callback = raw3270_reset_device_cb;
+	rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset);
+	if (rc == 0 && rp->state == RAW3270_STATE_INIT)
+		rp->state = RAW3270_STATE_RESET;
+	return rc;
+}
+
+static int
+raw3270_reset_device(struct raw3270 *rp)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	rc = __raw3270_reset_device(rp);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	return rc;
+}
+
+int
+raw3270_reset(struct raw3270_view *view)
+{
+	struct raw3270 *rp;
+	int rc;
+
+	rp = view->dev;
+	if (!rp || rp->view != view ||
+	    test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		rc = -EACCES;
+	else if (!raw3270_state_ready(rp))
+		rc = -EBUSY;
+	else
+		rc = raw3270_reset_device(view->dev);
+	return rc;
+}
+
+static void
+__raw3270_disconnect(struct raw3270 *rp)
+{
+	struct raw3270_request *rq;
+	struct raw3270_view *view;
+
+	rp->state = RAW3270_STATE_INIT;
+	rp->view = &rp->init_view;
+	/* Cancel all queued requests */
+	while (!list_empty(&rp->req_queue)) {
+		rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
+		view = rq->view;
+		rq->rc = -EACCES;
+		list_del_init(&rq->list);
+		if (rq->callback)
+			rq->callback(rq, rq->callback_data);
+		raw3270_put_view(view);
+	}
+	/* Start from scratch */
+	__raw3270_reset_device(rp);
+}
+
+static void
+raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
+		 struct irb *irb)
+{
+	struct raw3270 *rp;
+
+	if (rq) {
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+			if (irb->ecw[0] & SNS0_CMD_REJECT)
+				rq->rc = -EOPNOTSUPP;
+			else
+				rq->rc = -EIO;
+		}
+	}
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		/* Queue read modified after attention interrupt */
+		rp = view->dev;
+		raw3270_read_modified(rp);
+	}
+}
+
+static struct raw3270_fn raw3270_init_fn = {
+	.intv = raw3270_init_irq
+};
+
+/*
+ * Setup new 3270 device.
+ */
+static int
+raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
+{
+	struct list_head *l;
+	struct raw3270 *tmp;
+	int minor;
+
+	memset(rp, 0, sizeof(struct raw3270));
+	/* Copy ebcdic -> ascii translation table. */
+	memcpy(ascebc, _ascebc, 256);
+	if (tubxcorrect) {
+		/* correct brackets and circumflex */
+		ascebc['['] = 0xad;
+		ascebc[']'] = 0xbd;
+		ascebc['^'] = 0xb0;
+	}
+	rp->ascebc = ascebc;
+
+	/* Set defaults. */
+	rp->rows = 24;
+	rp->cols = 80;
+
+	INIT_LIST_HEAD(&rp->req_queue);
+	INIT_LIST_HEAD(&rp->view_list);
+
+	rp->init_view.dev = rp;
+	rp->init_view.fn = &raw3270_init_fn;
+	rp->view = &rp->init_view;
+
+	/*
+	 * Add device to list and find the smallest unused minor
+	 * number for it. Note: there is no device with minor 0,
+	 * see special case for fs3270.c:fs3270_open().
+	 */
+	mutex_lock(&raw3270_mutex);
+	/* Keep the list sorted. */
+	minor = RAW3270_FIRSTMINOR;
+	rp->minor = -1;
+	list_for_each(l, &raw3270_devices) {
+		tmp = list_entry(l, struct raw3270, list);
+		if (tmp->minor > minor) {
+			rp->minor = minor;
+			__list_add(&rp->list, l->prev, l);
+			break;
+		}
+		minor++;
+	}
+	if (rp->minor == -1 && minor < RAW3270_MAXDEVS + RAW3270_FIRSTMINOR) {
+		rp->minor = minor;
+		list_add_tail(&rp->list, &raw3270_devices);
+	}
+	mutex_unlock(&raw3270_mutex);
+	/* No free minor number? Then give up. */
+	if (rp->minor == -1)
+		return -EUSERS;
+	rp->cdev = cdev;
+	dev_set_drvdata(&cdev->dev, rp);
+	cdev->handler = raw3270_irq;
+	return 0;
+}
+
+#ifdef CONFIG_TN3270_CONSOLE
+/* Tentative definition - see below for actual definition. */
+static struct ccw_driver raw3270_ccw_driver;
+
+/*
+ * Setup 3270 device configured as console.
+ */
+struct raw3270 __init *raw3270_setup_console(void)
+{
+	struct ccw_device *cdev;
+	unsigned long flags;
+	struct raw3270 *rp;
+	char *ascebc;
+	int rc;
+
+	cdev = ccw_device_create_console(&raw3270_ccw_driver);
+	if (IS_ERR(cdev))
+		return ERR_CAST(cdev);
+
+	rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
+	ascebc = kzalloc(256, GFP_KERNEL);
+	rc = raw3270_setup_device(cdev, rp, ascebc);
+	if (rc)
+		return ERR_PTR(rc);
+	set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
+
+	rc = ccw_device_enable_console(cdev);
+	if (rc) {
+		ccw_device_destroy_console(cdev);
+		return ERR_PTR(rc);
+	}
+
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	do {
+		__raw3270_reset_device(rp);
+		while (!raw3270_state_final(rp)) {
+			ccw_device_wait_idle(rp->cdev);
+			barrier();
+		}
+	} while (rp->state != RAW3270_STATE_READY);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	return rp;
+}
+
+void
+raw3270_wait_cons_dev(struct raw3270 *rp)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	ccw_device_wait_idle(rp->cdev);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+}
+
+#endif
+
+/*
+ * Create a 3270 device structure.
+ */
+static struct raw3270 *
+raw3270_create_device(struct ccw_device *cdev)
+{
+	struct raw3270 *rp;
+	char *ascebc;
+	int rc;
+
+	rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
+	if (!rp)
+		return ERR_PTR(-ENOMEM);
+	ascebc = kmalloc(256, GFP_KERNEL);
+	if (!ascebc) {
+		kfree(rp);
+		return ERR_PTR(-ENOMEM);
+	}
+	rc = raw3270_setup_device(cdev, rp, ascebc);
+	if (rc) {
+		kfree(rp->ascebc);
+		kfree(rp);
+		rp = ERR_PTR(rc);
+	}
+	/* Get reference to ccw_device structure. */
+	get_device(&cdev->dev);
+	return rp;
+}
+
+/*
+ * Activate a view.
+ */
+int
+raw3270_activate_view(struct raw3270_view *view)
+{
+	struct raw3270 *rp;
+	struct raw3270_view *oldview, *nv;
+	unsigned long flags;
+	int rc;
+
+	rp = view->dev;
+	if (!rp)
+		return -ENODEV;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	if (rp->view == view)
+		rc = 0;
+	else if (!raw3270_state_ready(rp))
+		rc = -EBUSY;
+	else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		rc = -EACCES;
+	else {
+		oldview = NULL;
+		if (rp->view && rp->view->fn->deactivate) {
+			oldview = rp->view;
+			oldview->fn->deactivate(oldview);
+		}
+		rp->view = view;
+		rc = view->fn->activate(view);
+		if (rc) {
+			/* Didn't work. Try to reactivate the old view. */
+			rp->view = oldview;
+			if (!oldview || oldview->fn->activate(oldview) != 0) {
+				/* Didn't work as well. Try any other view. */
+				list_for_each_entry(nv, &rp->view_list, list)
+					if (nv != view && nv != oldview) {
+						rp->view = nv;
+						if (nv->fn->activate(nv) == 0)
+							break;
+						rp->view = NULL;
+					}
+			}
+		}
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	return rc;
+}
+
+/*
+ * Deactivate current view.
+ */
+void
+raw3270_deactivate_view(struct raw3270_view *view)
+{
+	unsigned long flags;
+	struct raw3270 *rp;
+
+	rp = view->dev;
+	if (!rp)
+		return;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	if (rp->view == view) {
+		view->fn->deactivate(view);
+		rp->view = NULL;
+		/* Move deactivated view to end of list. */
+		list_del_init(&view->list);
+		list_add_tail(&view->list, &rp->view_list);
+		/* Try to activate another view. */
+		if (raw3270_state_ready(rp) &&
+		    !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
+			list_for_each_entry(view, &rp->view_list, list) {
+				rp->view = view;
+				if (view->fn->activate(view) == 0)
+					break;
+				rp->view = NULL;
+			}
+		}
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+}
+
+/*
+ * Add view to device with minor "minor".
+ */
+int
+raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
+{
+	unsigned long flags;
+	struct raw3270 *rp;
+	int rc;
+
+	if (minor <= 0)
+		return -ENODEV;
+	mutex_lock(&raw3270_mutex);
+	rc = -ENODEV;
+	list_for_each_entry(rp, &raw3270_devices, list) {
+		if (rp->minor != minor)
+			continue;
+		spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+		atomic_set(&view->ref_count, 2);
+		view->dev = rp;
+		view->fn = fn;
+		view->model = rp->model;
+		view->rows = rp->rows;
+		view->cols = rp->cols;
+		view->ascebc = rp->ascebc;
+		spin_lock_init(&view->lock);
+		list_add(&view->list, &rp->view_list);
+		rc = 0;
+		spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+		break;
+	}
+	mutex_unlock(&raw3270_mutex);
+	return rc;
+}
+
+/*
+ * Find specific view of device with minor "minor".
+ */
+struct raw3270_view *
+raw3270_find_view(struct raw3270_fn *fn, int minor)
+{
+	struct raw3270 *rp;
+	struct raw3270_view *view, *tmp;
+	unsigned long flags;
+
+	mutex_lock(&raw3270_mutex);
+	view = ERR_PTR(-ENODEV);
+	list_for_each_entry(rp, &raw3270_devices, list) {
+		if (rp->minor != minor)
+			continue;
+		spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+		list_for_each_entry(tmp, &rp->view_list, list) {
+			if (tmp->fn == fn) {
+				raw3270_get_view(tmp);
+				view = tmp;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+		break;
+	}
+	mutex_unlock(&raw3270_mutex);
+	return view;
+}
+
+/*
+ * Remove view from device and free view structure via call to view->fn->free.
+ */
+void
+raw3270_del_view(struct raw3270_view *view)
+{
+	unsigned long flags;
+	struct raw3270 *rp;
+	struct raw3270_view *nv;
+
+	rp = view->dev;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	if (rp->view == view) {
+		view->fn->deactivate(view);
+		rp->view = NULL;
+	}
+	list_del_init(&view->list);
+	if (!rp->view && raw3270_state_ready(rp) &&
+	    !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
+		/* Try to activate another view. */
+		list_for_each_entry(nv, &rp->view_list, list) {
+			if (nv->fn->activate(nv) == 0) {
+				rp->view = nv;
+				break;
+			}
+		}
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	/* Wait for reference counter to drop to zero. */
+	atomic_dec(&view->ref_count);
+	wait_event(raw3270_wait_queue, atomic_read(&view->ref_count) == 0);
+	if (view->fn->free)
+		view->fn->free(view);
+}
+
+/*
+ * Remove a 3270 device structure.
+ */
+static void
+raw3270_delete_device(struct raw3270 *rp)
+{
+	struct ccw_device *cdev;
+
+	/* Remove from device chain. */
+	mutex_lock(&raw3270_mutex);
+	list_del_init(&rp->list);
+	mutex_unlock(&raw3270_mutex);
+
+	/* Disconnect from ccw_device. */
+	cdev = rp->cdev;
+	rp->cdev = NULL;
+	dev_set_drvdata(&cdev->dev, NULL);
+	cdev->handler = NULL;
+
+	/* Put ccw_device structure. */
+	put_device(&cdev->dev);
+
+	/* Now free raw3270 structure. */
+	kfree(rp->ascebc);
+	kfree(rp);
+}
+
+static int
+raw3270_probe (struct ccw_device *cdev)
+{
+	return 0;
+}
+
+/*
+ * Additional attributes for a 3270 device
+ */
+static ssize_t
+raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%i\n",
+			((struct raw3270 *) dev_get_drvdata(dev))->model);
+}
+static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL);
+
+static ssize_t
+raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%i\n",
+			((struct raw3270 *) dev_get_drvdata(dev))->rows);
+}
+static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL);
+
+static ssize_t
+raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%i\n",
+			((struct raw3270 *) dev_get_drvdata(dev))->cols);
+}
+static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL);
+
+static struct attribute * raw3270_attrs[] = {
+	&dev_attr_model.attr,
+	&dev_attr_rows.attr,
+	&dev_attr_columns.attr,
+	NULL,
+};
+
+static const struct attribute_group raw3270_attr_group = {
+	.attrs = raw3270_attrs,
+};
+
+static int raw3270_create_attributes(struct raw3270 *rp)
+{
+	return sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
+}
+
+/*
+ * Notifier for device addition/removal
+ */
+static LIST_HEAD(raw3270_notifier);
+
+int raw3270_register_notifier(struct raw3270_notifier *notifier)
+{
+	struct raw3270 *rp;
+
+	mutex_lock(&raw3270_mutex);
+	list_add_tail(&notifier->list, &raw3270_notifier);
+	list_for_each_entry(rp, &raw3270_devices, list)
+		notifier->create(rp->minor);
+	mutex_unlock(&raw3270_mutex);
+	return 0;
+}
+
+void raw3270_unregister_notifier(struct raw3270_notifier *notifier)
+{
+	struct raw3270 *rp;
+
+	mutex_lock(&raw3270_mutex);
+	list_for_each_entry(rp, &raw3270_devices, list)
+		notifier->destroy(rp->minor);
+	list_del(&notifier->list);
+	mutex_unlock(&raw3270_mutex);
+}
+
+/*
+ * Set 3270 device online.
+ */
+static int
+raw3270_set_online (struct ccw_device *cdev)
+{
+	struct raw3270_notifier *np;
+	struct raw3270 *rp;
+	int rc;
+
+	rp = raw3270_create_device(cdev);
+	if (IS_ERR(rp))
+		return PTR_ERR(rp);
+	rc = raw3270_create_attributes(rp);
+	if (rc)
+		goto failure;
+	raw3270_reset_device(rp);
+	mutex_lock(&raw3270_mutex);
+	list_for_each_entry(np, &raw3270_notifier, list)
+		np->create(rp->minor);
+	mutex_unlock(&raw3270_mutex);
+	return 0;
+
+failure:
+	raw3270_delete_device(rp);
+	return rc;
+}
+
+/*
+ * Remove 3270 device structure.
+ */
+static void
+raw3270_remove (struct ccw_device *cdev)
+{
+	unsigned long flags;
+	struct raw3270 *rp;
+	struct raw3270_view *v;
+	struct raw3270_notifier *np;
+
+	rp = dev_get_drvdata(&cdev->dev);
+	/*
+	 * _remove is the opposite of _probe; it's probe that
+	 * should set up rp.  raw3270_remove gets entered for
+	 * devices even if they haven't been varied online.
+	 * Thus, rp may validly be NULL here.
+	 */
+	if (rp == NULL)
+		return;
+
+	sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group);
+
+	/* Deactivate current view and remove all views. */
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	if (rp->view) {
+		if (rp->view->fn->deactivate)
+			rp->view->fn->deactivate(rp->view);
+		rp->view = NULL;
+	}
+	while (!list_empty(&rp->view_list)) {
+		v = list_entry(rp->view_list.next, struct raw3270_view, list);
+		if (v->fn->release)
+			v->fn->release(v);
+		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+		raw3270_del_view(v);
+		spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	}
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+	mutex_lock(&raw3270_mutex);
+	list_for_each_entry(np, &raw3270_notifier, list)
+		np->destroy(rp->minor);
+	mutex_unlock(&raw3270_mutex);
+
+	/* Reset 3270 device. */
+	raw3270_reset_device(rp);
+	/* And finally remove it. */
+	raw3270_delete_device(rp);
+}
+
+/*
+ * Set 3270 device offline.
+ */
+static int
+raw3270_set_offline (struct ccw_device *cdev)
+{
+	struct raw3270 *rp;
+
+	rp = dev_get_drvdata(&cdev->dev);
+	if (test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags))
+		return -EBUSY;
+	raw3270_remove(cdev);
+	return 0;
+}
+
+static int raw3270_pm_stop(struct ccw_device *cdev)
+{
+	struct raw3270 *rp;
+	struct raw3270_view *view;
+	unsigned long flags;
+
+	rp = dev_get_drvdata(&cdev->dev);
+	if (!rp)
+		return 0;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	if (rp->view && rp->view->fn->deactivate)
+		rp->view->fn->deactivate(rp->view);
+	if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) {
+		/*
+		 * Release tty and fullscreen for all non-console
+		 * devices.
+		 */
+		list_for_each_entry(view, &rp->view_list, list) {
+			if (view->fn->release)
+				view->fn->release(view);
+		}
+	}
+	set_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	return 0;
+}
+
+static int raw3270_pm_start(struct ccw_device *cdev)
+{
+	struct raw3270 *rp;
+	unsigned long flags;
+
+	rp = dev_get_drvdata(&cdev->dev);
+	if (!rp)
+		return 0;
+	spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
+	clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
+	if (rp->view && rp->view->fn->activate)
+		rp->view->fn->activate(rp->view);
+	spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
+	return 0;
+}
+
+void raw3270_pm_unfreeze(struct raw3270_view *view)
+{
+#ifdef CONFIG_TN3270_CONSOLE
+	struct raw3270 *rp;
+
+	rp = view->dev;
+	if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
+		ccw_device_force_console(rp->cdev);
+#endif
+}
+
+static struct ccw_device_id raw3270_id[] = {
+	{ CCW_DEVICE(0x3270, 0) },
+	{ CCW_DEVICE(0x3271, 0) },
+	{ CCW_DEVICE(0x3272, 0) },
+	{ CCW_DEVICE(0x3273, 0) },
+	{ CCW_DEVICE(0x3274, 0) },
+	{ CCW_DEVICE(0x3275, 0) },
+	{ CCW_DEVICE(0x3276, 0) },
+	{ CCW_DEVICE(0x3277, 0) },
+	{ CCW_DEVICE(0x3278, 0) },
+	{ CCW_DEVICE(0x3279, 0) },
+	{ CCW_DEVICE(0x3174, 0) },
+	{ /* end of list */ },
+};
+
+static struct ccw_driver raw3270_ccw_driver = {
+	.driver = {
+		.name	= "3270",
+		.owner	= THIS_MODULE,
+	},
+	.ids		= raw3270_id,
+	.probe		= &raw3270_probe,
+	.remove		= &raw3270_remove,
+	.set_online	= &raw3270_set_online,
+	.set_offline	= &raw3270_set_offline,
+	.freeze		= &raw3270_pm_stop,
+	.thaw		= &raw3270_pm_start,
+	.restore	= &raw3270_pm_start,
+	.int_class	= IRQIO_C70,
+};
+
+static int
+raw3270_init(void)
+{
+	struct raw3270 *rp;
+	int rc;
+
+	if (raw3270_registered)
+		return 0;
+	raw3270_registered = 1;
+	rc = ccw_driver_register(&raw3270_ccw_driver);
+	if (rc == 0) {
+		/* Create attributes for early (= console) device. */
+		mutex_lock(&raw3270_mutex);
+		class3270 = class_create(THIS_MODULE, "3270");
+		list_for_each_entry(rp, &raw3270_devices, list) {
+			get_device(&rp->cdev->dev);
+			raw3270_create_attributes(rp);
+		}
+		mutex_unlock(&raw3270_mutex);
+	}
+	return rc;
+}
+
+static void
+raw3270_exit(void)
+{
+	ccw_driver_unregister(&raw3270_ccw_driver);
+	class_destroy(class3270);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(raw3270_init);
+module_exit(raw3270_exit);
+
+EXPORT_SYMBOL(class3270);
+EXPORT_SYMBOL(raw3270_request_alloc);
+EXPORT_SYMBOL(raw3270_request_free);
+EXPORT_SYMBOL(raw3270_request_reset);
+EXPORT_SYMBOL(raw3270_request_set_cmd);
+EXPORT_SYMBOL(raw3270_request_add_data);
+EXPORT_SYMBOL(raw3270_request_set_data);
+EXPORT_SYMBOL(raw3270_request_set_idal);
+EXPORT_SYMBOL(raw3270_buffer_address);
+EXPORT_SYMBOL(raw3270_add_view);
+EXPORT_SYMBOL(raw3270_del_view);
+EXPORT_SYMBOL(raw3270_find_view);
+EXPORT_SYMBOL(raw3270_activate_view);
+EXPORT_SYMBOL(raw3270_deactivate_view);
+EXPORT_SYMBOL(raw3270_start);
+EXPORT_SYMBOL(raw3270_start_locked);
+EXPORT_SYMBOL(raw3270_start_irq);
+EXPORT_SYMBOL(raw3270_reset);
+EXPORT_SYMBOL(raw3270_register_notifier);
+EXPORT_SYMBOL(raw3270_unregister_notifier);
+EXPORT_SYMBOL(raw3270_wait_queue);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
new file mode 100644
index 0000000..114ca7c
--- /dev/null
+++ b/drivers/s390/char/raw3270.h
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IBM/3270 Driver
+ *
+ * Author(s):
+ *   Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *   Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *     Copyright IBM Corp. 2003, 2009
+ */
+
+#include <asm/idals.h>
+#include <asm/ioctl.h>
+
+/* ioctls for fullscreen 3270 */
+#define TUBICMD		_IO('3', 3)	/* set ccw command for fs reads. */
+#define TUBOCMD		_IO('3', 4)	/* set ccw command for fs writes. */
+#define TUBGETI		_IO('3', 7)	/* get ccw command for fs reads. */
+#define TUBGETO		_IO('3', 8)	/* get ccw command for fs writes. */
+#define TUBSETMOD	_IO('3',12)	/* FIXME: what does it do ?*/
+#define TUBGETMOD	_IO('3',13)	/* FIXME: what does it do ?*/
+
+/* Local Channel Commands */
+#define TC_WRITE	0x01		/* Write */
+#define TC_RDBUF	0x02		/* Read Buffer */
+#define TC_EWRITE	0x05		/* Erase write */
+#define TC_READMOD	0x06		/* Read modified */
+#define TC_EWRITEA	0x0d		/* Erase write alternate */
+#define TC_WRITESF	0x11		/* Write structured field */
+
+/* Buffer Control Orders */
+#define TO_SF		0x1d		/* Start field */
+#define TO_SBA		0x11		/* Set buffer address */
+#define TO_IC		0x13		/* Insert cursor */
+#define TO_PT		0x05		/* Program tab */
+#define TO_RA		0x3c		/* Repeat to address */
+#define TO_SFE		0x29		/* Start field extended */
+#define TO_EUA		0x12		/* Erase unprotected to address */
+#define TO_MF		0x2c		/* Modify field */
+#define TO_SA		0x28		/* Set attribute */
+
+/* Field Attribute Bytes */
+#define TF_INPUT	0x40		/* Visible input */
+#define TF_INPUTN	0x4c		/* Invisible input */
+#define TF_INMDT	0xc1		/* Visible, Set-MDT */
+#define TF_LOG		0x60
+
+/* Character Attribute Bytes */
+#define TAT_RESET	0x00
+#define TAT_FIELD	0xc0
+#define TAT_EXTHI	0x41
+#define TAT_COLOR	0x42
+#define TAT_CHARS	0x43
+#define TAT_TRANS	0x46
+
+/* Extended-Highlighting Bytes */
+#define TAX_RESET	0x00
+#define TAX_BLINK	0xf1
+#define TAX_REVER	0xf2
+#define TAX_UNDER	0xf4
+
+/* Reset value */
+#define TAR_RESET	0x00
+
+/* Color values */
+#define TAC_RESET	0x00
+#define TAC_BLUE	0xf1
+#define TAC_RED		0xf2
+#define TAC_PINK	0xf3
+#define TAC_GREEN	0xf4
+#define TAC_TURQ	0xf5
+#define TAC_YELLOW	0xf6
+#define TAC_WHITE	0xf7
+#define TAC_DEFAULT	0x00
+
+/* Write Control Characters */
+#define TW_NONE		0x40		/* No particular action */
+#define TW_KR		0xc2		/* Keyboard restore */
+#define TW_PLUSALARM	0x04		/* Add this bit for alarm */
+
+#define RAW3270_FIRSTMINOR	1	/* First minor number */
+#define RAW3270_MAXDEVS		255	/* Max number of 3270 devices */
+
+/* For TUBGETMOD and TUBSETMOD. Should include. */
+struct raw3270_iocb {
+	short model;
+	short line_cnt;
+	short col_cnt;
+	short pf_cnt;
+	short re_cnt;
+	short map;
+};
+
+struct raw3270;
+struct raw3270_view;
+extern struct class *class3270;
+
+/* 3270 CCW request */
+struct raw3270_request {
+	struct list_head list;		/* list head for request queueing. */
+	struct raw3270_view *view;	/* view of this request */
+	struct ccw1 ccw;		/* single ccw. */
+	void *buffer;			/* output buffer. */
+	size_t size;			/* size of output buffer. */
+	int rescnt;			/* residual count from devstat. */
+	int rc;				/* return code for this request. */
+
+	/* Callback for delivering final status. */
+	void (*callback)(struct raw3270_request *, void *);
+	void *callback_data;
+};
+
+struct raw3270_request *raw3270_request_alloc(size_t size);
+struct raw3270_request *raw3270_request_alloc_bootmem(size_t size);
+void raw3270_request_free(struct raw3270_request *);
+void raw3270_request_reset(struct raw3270_request *);
+void raw3270_request_set_cmd(struct raw3270_request *, u8 cmd);
+int  raw3270_request_add_data(struct raw3270_request *, void *, size_t);
+void raw3270_request_set_data(struct raw3270_request *, void *, size_t);
+void raw3270_request_set_idal(struct raw3270_request *, struct idal_buffer *);
+
+static inline int
+raw3270_request_final(struct raw3270_request *rq)
+{
+	return list_empty(&rq->list);
+}
+
+void raw3270_buffer_address(struct raw3270 *, char *, unsigned short);
+
+/*
+ * Functions of a 3270 view.
+ */
+struct raw3270_fn {
+	int  (*activate)(struct raw3270_view *);
+	void (*deactivate)(struct raw3270_view *);
+	void (*intv)(struct raw3270_view *,
+		     struct raw3270_request *, struct irb *);
+	void (*release)(struct raw3270_view *);
+	void (*free)(struct raw3270_view *);
+	void (*resize)(struct raw3270_view *, int, int, int);
+};
+
+/*
+ * View structure chaining. The raw3270_view structure is meant to
+ * be embedded at the start of the real view data structure, e.g.:
+ *   struct example {
+ *     struct raw3270_view view;
+ *     ...
+ *   };
+ */
+struct raw3270_view {
+	struct list_head list;
+	spinlock_t lock;
+	atomic_t ref_count;
+	struct raw3270 *dev;
+	struct raw3270_fn *fn;
+	unsigned int model;
+	unsigned int rows, cols;	/* # of rows & colums of the view */
+	unsigned char *ascebc;		/* ascii -> ebcdic table */
+};
+
+int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
+int raw3270_activate_view(struct raw3270_view *);
+void raw3270_del_view(struct raw3270_view *);
+void raw3270_deactivate_view(struct raw3270_view *);
+struct raw3270_view *raw3270_find_view(struct raw3270_fn *, int);
+int raw3270_start(struct raw3270_view *, struct raw3270_request *);
+int raw3270_start_locked(struct raw3270_view *, struct raw3270_request *);
+int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *);
+int raw3270_reset(struct raw3270_view *);
+struct raw3270_view *raw3270_view(struct raw3270_view *);
+int raw3270_view_active(struct raw3270_view *);
+
+/* Reference count inliner for view structures. */
+static inline void
+raw3270_get_view(struct raw3270_view *view)
+{
+	atomic_inc(&view->ref_count);
+}
+
+extern wait_queue_head_t raw3270_wait_queue;
+
+static inline void
+raw3270_put_view(struct raw3270_view *view)
+{
+	if (atomic_dec_return(&view->ref_count) == 0)
+		wake_up(&raw3270_wait_queue);
+}
+
+struct raw3270 *raw3270_setup_console(void);
+void raw3270_wait_cons_dev(struct raw3270 *);
+
+/* Notifier for device addition/removal */
+struct raw3270_notifier {
+	struct list_head list;
+	void (*create)(int minor);
+	void (*destroy)(int minor);
+};
+
+int raw3270_register_notifier(struct raw3270_notifier *);
+void raw3270_unregister_notifier(struct raw3270_notifier *);
+void raw3270_pm_unfreeze(struct raw3270_view *);
+
+/*
+ * Little memory allocator for string objects. 
+ */
+struct string
+{
+	struct list_head list;
+	struct list_head update;
+	unsigned long size;
+	unsigned long len;
+	char string[0];
+} __attribute__ ((aligned(8)));
+
+static inline struct string *
+alloc_string(struct list_head *free_list, unsigned long len)
+{
+	struct string *cs, *tmp;
+	unsigned long size;
+
+	size = (len + 7L) & -8L;
+	list_for_each_entry(cs, free_list, list) {
+		if (cs->size < size)
+			continue;
+		if (cs->size > size + sizeof(struct string)) {
+			char *endaddr = (char *) (cs + 1) + cs->size;
+			tmp = (struct string *) (endaddr - size) - 1;
+			tmp->size = size;
+			cs->size -= size + sizeof(struct string);
+			cs = tmp;
+		} else
+			list_del(&cs->list);
+		cs->len = len;
+		INIT_LIST_HEAD(&cs->list);
+		INIT_LIST_HEAD(&cs->update);
+		return cs;
+	}
+	return NULL;
+}
+
+static inline unsigned long
+free_string(struct list_head *free_list, struct string *cs)
+{
+	struct string *tmp;
+	struct list_head *p, *left;
+
+	/* Find out the left neighbour in free memory list. */
+	left = free_list;
+	list_for_each(p, free_list) {
+		if (list_entry(p, struct string, list) > cs)
+			break;
+		left = p;
+	}
+	/* Try to merge with right neighbour = next element from left. */
+	if (left->next != free_list) {
+		tmp = list_entry(left->next, struct string, list);
+		if ((char *) (cs + 1) + cs->size == (char *) tmp) {
+			list_del(&tmp->list);
+			cs->size += tmp->size + sizeof(struct string);
+		}
+	}
+	/* Try to merge with left neighbour. */
+	if (left != free_list) {
+		tmp = list_entry(left, struct string, list);
+		if ((char *) (tmp + 1) + tmp->size == (char *) cs) {
+			tmp->size += cs->size + sizeof(struct string);
+			return tmp->size;
+		}
+	}
+	__list_add(&cs->list, left, left->next);
+	return cs->size;
+}
+
+static inline void
+add_string_memory(struct list_head *free_list, void *mem, unsigned long size)
+{
+	struct string *cs;
+
+	cs = (struct string *) mem;
+	cs->size = size - sizeof(struct string);
+	free_string(free_list, cs);
+}
+
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
new file mode 100644
index 0000000..e9aa71c
--- /dev/null
+++ b/drivers/s390/char/sclp.c
@@ -0,0 +1,1259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * core function to access sclp interface
+ *
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/reboot.h>
+#include <linux/jiffies.h>
+#include <linux/init.h>
+#include <linux/suspend.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <asm/types.h>
+#include <asm/irq.h>
+
+#include "sclp.h"
+
+#define SCLP_HEADER		"sclp: "
+
+/* Lock to protect internal data consistency. */
+static DEFINE_SPINLOCK(sclp_lock);
+
+/* Mask of events that we can send to the sclp interface. */
+static sccb_mask_t sclp_receive_mask;
+
+/* Mask of events that we can receive from the sclp interface. */
+static sccb_mask_t sclp_send_mask;
+
+/* List of registered event listeners and senders. */
+static struct list_head sclp_reg_list;
+
+/* List of queued requests. */
+static struct list_head sclp_req_queue;
+
+/* Data for read and and init requests. */
+static struct sclp_req sclp_read_req;
+static struct sclp_req sclp_init_req;
+static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+
+/* Suspend request */
+static DECLARE_COMPLETION(sclp_request_queue_flushed);
+
+/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
+int sclp_console_pages = SCLP_CONSOLE_PAGES;
+/* Flag to indicate if buffer pages are dropped on buffer full condition */
+int sclp_console_drop = 1;
+/* Number of times the console dropped buffer pages */
+unsigned long sclp_console_full;
+
+static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
+{
+	complete(&sclp_request_queue_flushed);
+}
+
+static int __init sclp_setup_console_pages(char *str)
+{
+	int pages, rc;
+
+	rc = kstrtoint(str, 0, &pages);
+	if (!rc && pages >= SCLP_CONSOLE_PAGES)
+		sclp_console_pages = pages;
+	return 1;
+}
+
+__setup("sclp_con_pages=", sclp_setup_console_pages);
+
+static int __init sclp_setup_console_drop(char *str)
+{
+	int drop, rc;
+
+	rc = kstrtoint(str, 0, &drop);
+	if (!rc)
+		sclp_console_drop = drop;
+	return 1;
+}
+
+__setup("sclp_con_drop=", sclp_setup_console_drop);
+
+static struct sclp_req sclp_suspend_req;
+
+/* Timer for request retries. */
+static struct timer_list sclp_request_timer;
+
+/* Timer for queued requests. */
+static struct timer_list sclp_queue_timer;
+
+/* Internal state: is a request active at the sclp? */
+static volatile enum sclp_running_state_t {
+	sclp_running_state_idle,
+	sclp_running_state_running,
+	sclp_running_state_reset_pending
+} sclp_running_state = sclp_running_state_idle;
+
+/* Internal state: is a read request pending? */
+static volatile enum sclp_reading_state_t {
+	sclp_reading_state_idle,
+	sclp_reading_state_reading
+} sclp_reading_state = sclp_reading_state_idle;
+
+/* Internal state: is the driver currently serving requests? */
+static volatile enum sclp_activation_state_t {
+	sclp_activation_state_active,
+	sclp_activation_state_deactivating,
+	sclp_activation_state_inactive,
+	sclp_activation_state_activating
+} sclp_activation_state = sclp_activation_state_active;
+
+/* Internal state: is an init mask request pending? */
+static volatile enum sclp_mask_state_t {
+	sclp_mask_state_idle,
+	sclp_mask_state_initializing
+} sclp_mask_state = sclp_mask_state_idle;
+
+/* Internal state: is the driver suspended? */
+static enum sclp_suspend_state_t {
+	sclp_suspend_state_running,
+	sclp_suspend_state_suspended,
+} sclp_suspend_state = sclp_suspend_state_running;
+
+/* Maximum retry counts */
+#define SCLP_INIT_RETRY		3
+#define SCLP_MASK_RETRY		3
+
+/* Timeout intervals in seconds.*/
+#define SCLP_BUSY_INTERVAL	10
+#define SCLP_RETRY_INTERVAL	30
+
+static void sclp_request_timeout(bool force_restart);
+static void sclp_process_queue(void);
+static void __sclp_make_read_req(void);
+static int sclp_init_mask(int calculate);
+static int sclp_init(void);
+
+static void
+__sclp_queue_read_req(void)
+{
+	if (sclp_reading_state == sclp_reading_state_idle) {
+		sclp_reading_state = sclp_reading_state_reading;
+		__sclp_make_read_req();
+		/* Add request to head of queue */
+		list_add(&sclp_read_req.list, &sclp_req_queue);
+	}
+}
+
+/* Set up request retry timer. Called while sclp_lock is locked. */
+static inline void
+__sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
+{
+	del_timer(&sclp_request_timer);
+	sclp_request_timer.function = cb;
+	sclp_request_timer.expires = jiffies + time;
+	add_timer(&sclp_request_timer);
+}
+
+static void sclp_request_timeout_restart(struct timer_list *unused)
+{
+	sclp_request_timeout(true);
+}
+
+static void sclp_request_timeout_normal(struct timer_list *unused)
+{
+	sclp_request_timeout(false);
+}
+
+/* Request timeout handler. Restart the request queue. If force_restart,
+ * force restart of running request. */
+static void sclp_request_timeout(bool force_restart)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (force_restart) {
+		if (sclp_running_state == sclp_running_state_running) {
+			/* Break running state and queue NOP read event request
+			 * to get a defined interface state. */
+			__sclp_queue_read_req();
+			sclp_running_state = sclp_running_state_idle;
+		}
+	} else {
+		__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
+					 sclp_request_timeout_normal);
+	}
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	sclp_process_queue();
+}
+
+/*
+ * Returns the expire value in jiffies of the next pending request timeout,
+ * if any. Needs to be called with sclp_lock.
+ */
+static unsigned long __sclp_req_queue_find_next_timeout(void)
+{
+	unsigned long expires_next = 0;
+	struct sclp_req *req;
+
+	list_for_each_entry(req, &sclp_req_queue, list) {
+		if (!req->queue_expires)
+			continue;
+		if (!expires_next ||
+		   (time_before(req->queue_expires, expires_next)))
+				expires_next = req->queue_expires;
+	}
+	return expires_next;
+}
+
+/*
+ * Returns expired request, if any, and removes it from the list.
+ */
+static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
+{
+	unsigned long flags, now;
+	struct sclp_req *req;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	now = jiffies;
+	/* Don't need list_for_each_safe because we break out after list_del */
+	list_for_each_entry(req, &sclp_req_queue, list) {
+		if (!req->queue_expires)
+			continue;
+		if (time_before_eq(req->queue_expires, now)) {
+			if (req->status == SCLP_REQ_QUEUED) {
+				req->status = SCLP_REQ_QUEUED_TIMEOUT;
+				list_del(&req->list);
+				goto out;
+			}
+		}
+	}
+	req = NULL;
+out:
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return req;
+}
+
+/*
+ * Timeout handler for queued requests. Removes request from list and
+ * invokes callback. This timer can be set per request in situations where
+ * waiting too long would be harmful to the system, e.g. during SE reboot.
+ */
+static void sclp_req_queue_timeout(struct timer_list *unused)
+{
+	unsigned long flags, expires_next;
+	struct sclp_req *req;
+
+	do {
+		req = __sclp_req_queue_remove_expired_req();
+		if (req && req->callback)
+			req->callback(req, req->callback_data);
+	} while (req);
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	expires_next = __sclp_req_queue_find_next_timeout();
+	if (expires_next)
+		mod_timer(&sclp_queue_timer, expires_next);
+	spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
+/* Try to start a request. Return zero if the request was successfully
+ * started or if it will be started at a later time. Return non-zero otherwise.
+ * Called while sclp_lock is locked. */
+static int
+__sclp_start_request(struct sclp_req *req)
+{
+	int rc;
+
+	if (sclp_running_state != sclp_running_state_idle)
+		return 0;
+	del_timer(&sclp_request_timer);
+	rc = sclp_service_call(req->command, req->sccb);
+	req->start_count++;
+
+	if (rc == 0) {
+		/* Successfully started request */
+		req->status = SCLP_REQ_RUNNING;
+		sclp_running_state = sclp_running_state_running;
+		__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
+					 sclp_request_timeout_restart);
+		return 0;
+	} else if (rc == -EBUSY) {
+		/* Try again later */
+		__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
+					 sclp_request_timeout_normal);
+		return 0;
+	}
+	/* Request failed */
+	req->status = SCLP_REQ_FAILED;
+	return rc;
+}
+
+/* Try to start queued requests. */
+static void
+sclp_process_queue(void)
+{
+	struct sclp_req *req;
+	int rc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (sclp_running_state != sclp_running_state_idle) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return;
+	}
+	del_timer(&sclp_request_timer);
+	while (!list_empty(&sclp_req_queue)) {
+		req = list_entry(sclp_req_queue.next, struct sclp_req, list);
+		if (!req->sccb)
+			goto do_post;
+		rc = __sclp_start_request(req);
+		if (rc == 0)
+			break;
+		/* Request failed */
+		if (req->start_count > 1) {
+			/* Cannot abort already submitted request - could still
+			 * be active at the SCLP */
+			__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
+						 sclp_request_timeout_normal);
+			break;
+		}
+do_post:
+		/* Post-processing for aborted request */
+		list_del(&req->list);
+		if (req->callback) {
+			spin_unlock_irqrestore(&sclp_lock, flags);
+			req->callback(req, req->callback_data);
+			spin_lock_irqsave(&sclp_lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
+static int __sclp_can_add_request(struct sclp_req *req)
+{
+	if (req == &sclp_suspend_req || req == &sclp_init_req)
+		return 1;
+	if (sclp_suspend_state != sclp_suspend_state_running)
+		return 0;
+	if (sclp_init_state != sclp_init_state_initialized)
+		return 0;
+	if (sclp_activation_state != sclp_activation_state_active)
+		return 0;
+	return 1;
+}
+
+/* Queue a new request. Return zero on success, non-zero otherwise. */
+int
+sclp_add_request(struct sclp_req *req)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (!__sclp_can_add_request(req)) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EIO;
+	}
+	req->status = SCLP_REQ_QUEUED;
+	req->start_count = 0;
+	list_add_tail(&req->list, &sclp_req_queue);
+	rc = 0;
+	if (req->queue_timeout) {
+		req->queue_expires = jiffies + req->queue_timeout * HZ;
+		if (!timer_pending(&sclp_queue_timer) ||
+		    time_after(sclp_queue_timer.expires, req->queue_expires))
+			mod_timer(&sclp_queue_timer, req->queue_expires);
+	} else
+		req->queue_expires = 0;
+	/* Start if request is first in list */
+	if (sclp_running_state == sclp_running_state_idle &&
+	    req->list.prev == &sclp_req_queue) {
+		if (!req->sccb) {
+			list_del(&req->list);
+			rc = -ENODATA;
+			goto out;
+		}
+		rc = __sclp_start_request(req);
+		if (rc)
+			list_del(&req->list);
+	}
+out:
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+EXPORT_SYMBOL(sclp_add_request);
+
+/* Dispatch events found in request buffer to registered listeners. Return 0
+ * if all events were dispatched, non-zero otherwise. */
+static int
+sclp_dispatch_evbufs(struct sccb_header *sccb)
+{
+	unsigned long flags;
+	struct evbuf_header *evbuf;
+	struct list_head *l;
+	struct sclp_register *reg;
+	int offset;
+	int rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	rc = 0;
+	for (offset = sizeof(struct sccb_header); offset < sccb->length;
+	     offset += evbuf->length) {
+		evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
+		/* Check for malformed hardware response */
+		if (evbuf->length == 0)
+			break;
+		/* Search for event handler */
+		reg = NULL;
+		list_for_each(l, &sclp_reg_list) {
+			reg = list_entry(l, struct sclp_register, list);
+			if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type))
+				break;
+			else
+				reg = NULL;
+		}
+		if (reg && reg->receiver_fn) {
+			spin_unlock_irqrestore(&sclp_lock, flags);
+			reg->receiver_fn(evbuf);
+			spin_lock_irqsave(&sclp_lock, flags);
+		} else if (reg == NULL)
+			rc = -EOPNOTSUPP;
+	}
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+/* Read event data request callback. */
+static void
+sclp_read_cb(struct sclp_req *req, void *data)
+{
+	unsigned long flags;
+	struct sccb_header *sccb;
+
+	sccb = (struct sccb_header *) req->sccb;
+	if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
+	    sccb->response_code == 0x220))
+		sclp_dispatch_evbufs(sccb);
+	spin_lock_irqsave(&sclp_lock, flags);
+	sclp_reading_state = sclp_reading_state_idle;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
+/* Prepare read event data request. Called while sclp_lock is locked. */
+static void __sclp_make_read_req(void)
+{
+	struct sccb_header *sccb;
+
+	sccb = (struct sccb_header *) sclp_read_sccb;
+	clear_page(sccb);
+	memset(&sclp_read_req, 0, sizeof(struct sclp_req));
+	sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
+	sclp_read_req.status = SCLP_REQ_QUEUED;
+	sclp_read_req.start_count = 0;
+	sclp_read_req.callback = sclp_read_cb;
+	sclp_read_req.sccb = sccb;
+	sccb->length = PAGE_SIZE;
+	sccb->function_code = 0;
+	sccb->control_mask[2] = 0x80;
+}
+
+/* Search request list for request with matching sccb. Return request if found,
+ * NULL otherwise. Called while sclp_lock is locked. */
+static inline struct sclp_req *
+__sclp_find_req(u32 sccb)
+{
+	struct list_head *l;
+	struct sclp_req *req;
+
+	list_for_each(l, &sclp_req_queue) {
+		req = list_entry(l, struct sclp_req, list);
+		if (sccb == (u32) (addr_t) req->sccb)
+				return req;
+	}
+	return NULL;
+}
+
+/* Handler for external interruption. Perform request post-processing.
+ * Prepare read event data request if necessary. Start processing of next
+ * request on queue. */
+static void sclp_interrupt_handler(struct ext_code ext_code,
+				   unsigned int param32, unsigned long param64)
+{
+	struct sclp_req *req;
+	u32 finished_sccb;
+	u32 evbuf_pending;
+
+	inc_irq_stat(IRQEXT_SCP);
+	spin_lock(&sclp_lock);
+	finished_sccb = param32 & 0xfffffff8;
+	evbuf_pending = param32 & 0x3;
+	if (finished_sccb) {
+		del_timer(&sclp_request_timer);
+		sclp_running_state = sclp_running_state_reset_pending;
+		req = __sclp_find_req(finished_sccb);
+		if (req) {
+			/* Request post-processing */
+			list_del(&req->list);
+			req->status = SCLP_REQ_DONE;
+			if (req->callback) {
+				spin_unlock(&sclp_lock);
+				req->callback(req, req->callback_data);
+				spin_lock(&sclp_lock);
+			}
+		}
+		sclp_running_state = sclp_running_state_idle;
+	}
+	if (evbuf_pending &&
+	    sclp_activation_state == sclp_activation_state_active)
+		__sclp_queue_read_req();
+	spin_unlock(&sclp_lock);
+	sclp_process_queue();
+}
+
+/* Convert interval in jiffies to TOD ticks. */
+static inline u64
+sclp_tod_from_jiffies(unsigned long jiffies)
+{
+	return (u64) (jiffies / HZ) << 32;
+}
+
+/* Wait until a currently running request finished. Note: while this function
+ * is running, no timers are served on the calling CPU. */
+void
+sclp_sync_wait(void)
+{
+	unsigned long long old_tick;
+	unsigned long flags;
+	unsigned long cr0, cr0_sync;
+	u64 timeout;
+	int irq_context;
+
+	/* We'll be disabling timer interrupts, so we need a custom timeout
+	 * mechanism */
+	timeout = 0;
+	if (timer_pending(&sclp_request_timer)) {
+		/* Get timeout TOD value */
+		timeout = get_tod_clock_fast() +
+			  sclp_tod_from_jiffies(sclp_request_timer.expires -
+						jiffies);
+	}
+	local_irq_save(flags);
+	/* Prevent bottom half from executing once we force interrupts open */
+	irq_context = in_interrupt();
+	if (!irq_context)
+		local_bh_disable();
+	/* Enable service-signal interruption, disable timer interrupts */
+	old_tick = local_tick_disable();
+	trace_hardirqs_on();
+	__ctl_store(cr0, 0, 0);
+	cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
+	cr0_sync |= 1UL << (63 - 54);
+	__ctl_load(cr0_sync, 0, 0);
+	__arch_local_irq_stosm(0x01);
+	/* Loop until driver state indicates finished request */
+	while (sclp_running_state != sclp_running_state_idle) {
+		/* Check for expired request timer */
+		if (timer_pending(&sclp_request_timer) &&
+		    get_tod_clock_fast() > timeout &&
+		    del_timer(&sclp_request_timer))
+			sclp_request_timer.function(&sclp_request_timer);
+		cpu_relax();
+	}
+	local_irq_disable();
+	__ctl_load(cr0, 0, 0);
+	if (!irq_context)
+		_local_bh_enable();
+	local_tick_enable(old_tick);
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(sclp_sync_wait);
+
+/* Dispatch changes in send and receive mask to registered listeners. */
+static void
+sclp_dispatch_state_change(void)
+{
+	struct list_head *l;
+	struct sclp_register *reg;
+	unsigned long flags;
+	sccb_mask_t receive_mask;
+	sccb_mask_t send_mask;
+
+	do {
+		spin_lock_irqsave(&sclp_lock, flags);
+		reg = NULL;
+		list_for_each(l, &sclp_reg_list) {
+			reg = list_entry(l, struct sclp_register, list);
+			receive_mask = reg->send_mask & sclp_receive_mask;
+			send_mask = reg->receive_mask & sclp_send_mask;
+			if (reg->sclp_receive_mask != receive_mask ||
+			    reg->sclp_send_mask != send_mask) {
+				reg->sclp_receive_mask = receive_mask;
+				reg->sclp_send_mask = send_mask;
+				break;
+			} else
+				reg = NULL;
+		}
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		if (reg && reg->state_change_fn)
+			reg->state_change_fn(reg);
+	} while (reg);
+}
+
+struct sclp_statechangebuf {
+	struct evbuf_header	header;
+	u8		validity_sclp_active_facility_mask : 1;
+	u8		validity_sclp_receive_mask : 1;
+	u8		validity_sclp_send_mask : 1;
+	u8		validity_read_data_function_mask : 1;
+	u16		_zeros : 12;
+	u16		mask_length;
+	u64		sclp_active_facility_mask;
+	u8		masks[2 * 1021 + 4];	/* variable length */
+	/*
+	 * u8		sclp_receive_mask[mask_length];
+	 * u8		sclp_send_mask[mask_length];
+	 * u32		read_data_function_mask;
+	 */
+} __attribute__((packed));
+
+
+/* State change event callback. Inform listeners of changes. */
+static void
+sclp_state_change_cb(struct evbuf_header *evbuf)
+{
+	unsigned long flags;
+	struct sclp_statechangebuf *scbuf;
+
+	BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE);
+
+	scbuf = (struct sclp_statechangebuf *) evbuf;
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (scbuf->validity_sclp_receive_mask)
+		sclp_receive_mask = sccb_get_recv_mask(scbuf);
+	if (scbuf->validity_sclp_send_mask)
+		sclp_send_mask = sccb_get_send_mask(scbuf);
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	if (scbuf->validity_sclp_active_facility_mask)
+		sclp.facilities = scbuf->sclp_active_facility_mask;
+	sclp_dispatch_state_change();
+}
+
+static struct sclp_register sclp_state_change_event = {
+	.receive_mask = EVTYP_STATECHANGE_MASK,
+	.receiver_fn = sclp_state_change_cb
+};
+
+/* Calculate receive and send mask of currently registered listeners.
+ * Called while sclp_lock is locked. */
+static inline void
+__sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
+{
+	struct list_head *l;
+	struct sclp_register *t;
+
+	*receive_mask = 0;
+	*send_mask = 0;
+	list_for_each(l, &sclp_reg_list) {
+		t = list_entry(l, struct sclp_register, list);
+		*receive_mask |= t->receive_mask;
+		*send_mask |= t->send_mask;
+	}
+}
+
+/* Register event listener. Return 0 on success, non-zero otherwise. */
+int
+sclp_register(struct sclp_register *reg)
+{
+	unsigned long flags;
+	sccb_mask_t receive_mask;
+	sccb_mask_t send_mask;
+	int rc;
+
+	rc = sclp_init();
+	if (rc)
+		return rc;
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Check event mask for collisions */
+	__sclp_get_mask(&receive_mask, &send_mask);
+	if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EBUSY;
+	}
+	/* Trigger initial state change callback */
+	reg->sclp_receive_mask = 0;
+	reg->sclp_send_mask = 0;
+	reg->pm_event_posted = 0;
+	list_add(&reg->list, &sclp_reg_list);
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	rc = sclp_init_mask(1);
+	if (rc) {
+		spin_lock_irqsave(&sclp_lock, flags);
+		list_del(&reg->list);
+		spin_unlock_irqrestore(&sclp_lock, flags);
+	}
+	return rc;
+}
+
+EXPORT_SYMBOL(sclp_register);
+
+/* Unregister event listener. */
+void
+sclp_unregister(struct sclp_register *reg)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	list_del(&reg->list);
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	sclp_init_mask(1);
+}
+
+EXPORT_SYMBOL(sclp_unregister);
+
+/* Remove event buffers which are marked processed. Return the number of
+ * remaining event buffers. */
+int
+sclp_remove_processed(struct sccb_header *sccb)
+{
+	struct evbuf_header *evbuf;
+	int unprocessed;
+	u16 remaining;
+
+	evbuf = (struct evbuf_header *) (sccb + 1);
+	unprocessed = 0;
+	remaining = sccb->length - sizeof(struct sccb_header);
+	while (remaining > 0) {
+		remaining -= evbuf->length;
+		if (evbuf->flags & 0x80) {
+			sccb->length -= evbuf->length;
+			memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
+			       remaining);
+		} else {
+			unprocessed++;
+			evbuf = (struct evbuf_header *)
+					((addr_t) evbuf + evbuf->length);
+		}
+	}
+	return unprocessed;
+}
+
+EXPORT_SYMBOL(sclp_remove_processed);
+
+/* Prepare init mask request. Called while sclp_lock is locked. */
+static inline void
+__sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask)
+{
+	struct init_sccb *sccb;
+
+	sccb = (struct init_sccb *) sclp_init_sccb;
+	clear_page(sccb);
+	memset(&sclp_init_req, 0, sizeof(struct sclp_req));
+	sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
+	sclp_init_req.status = SCLP_REQ_FILLED;
+	sclp_init_req.start_count = 0;
+	sclp_init_req.callback = NULL;
+	sclp_init_req.callback_data = NULL;
+	sclp_init_req.sccb = sccb;
+	sccb->header.length = sizeof(*sccb);
+	if (sclp_mask_compat_mode)
+		sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
+	else
+		sccb->mask_length = sizeof(sccb_mask_t);
+	sccb_set_recv_mask(sccb, receive_mask);
+	sccb_set_send_mask(sccb, send_mask);
+	sccb_set_sclp_recv_mask(sccb, 0);
+	sccb_set_sclp_send_mask(sccb, 0);
+}
+
+/* Start init mask request. If calculate is non-zero, calculate the mask as
+ * requested by registered listeners. Use zero mask otherwise. Return 0 on
+ * success, non-zero otherwise. */
+static int
+sclp_init_mask(int calculate)
+{
+	unsigned long flags;
+	struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
+	sccb_mask_t receive_mask;
+	sccb_mask_t send_mask;
+	int retry;
+	int rc;
+	unsigned long wait;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Check if interface is in appropriate state */
+	if (sclp_mask_state != sclp_mask_state_idle) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EBUSY;
+	}
+	if (sclp_activation_state == sclp_activation_state_inactive) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EINVAL;
+	}
+	sclp_mask_state = sclp_mask_state_initializing;
+	/* Determine mask */
+	if (calculate)
+		__sclp_get_mask(&receive_mask, &send_mask);
+	else {
+		receive_mask = 0;
+		send_mask = 0;
+	}
+	rc = -EIO;
+	for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
+		/* Prepare request */
+		__sclp_make_init_req(receive_mask, send_mask);
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		if (sclp_add_request(&sclp_init_req)) {
+			/* Try again later */
+			wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
+			while (time_before(jiffies, wait))
+				sclp_sync_wait();
+			spin_lock_irqsave(&sclp_lock, flags);
+			continue;
+		}
+		while (sclp_init_req.status != SCLP_REQ_DONE &&
+		       sclp_init_req.status != SCLP_REQ_FAILED)
+			sclp_sync_wait();
+		spin_lock_irqsave(&sclp_lock, flags);
+		if (sclp_init_req.status == SCLP_REQ_DONE &&
+		    sccb->header.response_code == 0x20) {
+			/* Successful request */
+			if (calculate) {
+				sclp_receive_mask = sccb_get_sclp_recv_mask(sccb);
+				sclp_send_mask = sccb_get_sclp_send_mask(sccb);
+			} else {
+				sclp_receive_mask = 0;
+				sclp_send_mask = 0;
+			}
+			spin_unlock_irqrestore(&sclp_lock, flags);
+			sclp_dispatch_state_change();
+			spin_lock_irqsave(&sclp_lock, flags);
+			rc = 0;
+			break;
+		}
+	}
+	sclp_mask_state = sclp_mask_state_idle;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+/* Deactivate SCLP interface. On success, new requests will be rejected,
+ * events will no longer be dispatched. Return 0 on success, non-zero
+ * otherwise. */
+int
+sclp_deactivate(void)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Deactivate can only be called when active */
+	if (sclp_activation_state != sclp_activation_state_active) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EINVAL;
+	}
+	sclp_activation_state = sclp_activation_state_deactivating;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	rc = sclp_init_mask(0);
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (rc == 0)
+		sclp_activation_state = sclp_activation_state_inactive;
+	else
+		sclp_activation_state = sclp_activation_state_active;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+EXPORT_SYMBOL(sclp_deactivate);
+
+/* Reactivate SCLP interface after sclp_deactivate. On success, new
+ * requests will be accepted, events will be dispatched again. Return 0 on
+ * success, non-zero otherwise. */
+int
+sclp_reactivate(void)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Reactivate can only be called when inactive */
+	if (sclp_activation_state != sclp_activation_state_inactive) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return -EINVAL;
+	}
+	sclp_activation_state = sclp_activation_state_activating;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	rc = sclp_init_mask(1);
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (rc == 0)
+		sclp_activation_state = sclp_activation_state_active;
+	else
+		sclp_activation_state = sclp_activation_state_inactive;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+EXPORT_SYMBOL(sclp_reactivate);
+
+/* Handler for external interruption used during initialization. Modify
+ * request state to done. */
+static void sclp_check_handler(struct ext_code ext_code,
+			       unsigned int param32, unsigned long param64)
+{
+	u32 finished_sccb;
+
+	inc_irq_stat(IRQEXT_SCP);
+	finished_sccb = param32 & 0xfffffff8;
+	/* Is this the interrupt we are waiting for? */
+	if (finished_sccb == 0)
+		return;
+	if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
+		panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
+		      finished_sccb);
+	spin_lock(&sclp_lock);
+	if (sclp_running_state == sclp_running_state_running) {
+		sclp_init_req.status = SCLP_REQ_DONE;
+		sclp_running_state = sclp_running_state_idle;
+	}
+	spin_unlock(&sclp_lock);
+}
+
+/* Initial init mask request timed out. Modify request state to failed. */
+static void
+sclp_check_timeout(struct timer_list *unused)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (sclp_running_state == sclp_running_state_running) {
+		sclp_init_req.status = SCLP_REQ_FAILED;
+		sclp_running_state = sclp_running_state_idle;
+	}
+	spin_unlock_irqrestore(&sclp_lock, flags);
+}
+
+/* Perform a check of the SCLP interface. Return zero if the interface is
+ * available and there are no pending requests from a previous instance.
+ * Return non-zero otherwise. */
+static int
+sclp_check_interface(void)
+{
+	struct init_sccb *sccb;
+	unsigned long flags;
+	int retry;
+	int rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Prepare init mask command */
+	rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
+	if (rc) {
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return rc;
+	}
+	for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
+		__sclp_make_init_req(0, 0);
+		sccb = (struct init_sccb *) sclp_init_req.sccb;
+		rc = sclp_service_call(sclp_init_req.command, sccb);
+		if (rc == -EIO)
+			break;
+		sclp_init_req.status = SCLP_REQ_RUNNING;
+		sclp_running_state = sclp_running_state_running;
+		__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
+					 sclp_check_timeout);
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		/* Enable service-signal interruption - needs to happen
+		 * with IRQs enabled. */
+		irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
+		/* Wait for signal from interrupt or timeout */
+		sclp_sync_wait();
+		/* Disable service-signal interruption - needs to happen
+		 * with IRQs enabled. */
+		irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
+		spin_lock_irqsave(&sclp_lock, flags);
+		del_timer(&sclp_request_timer);
+		rc = -EBUSY;
+		if (sclp_init_req.status == SCLP_REQ_DONE) {
+			if (sccb->header.response_code == 0x20) {
+				rc = 0;
+				break;
+			} else if (sccb->header.response_code == 0x74f0) {
+				if (!sclp_mask_compat_mode) {
+					sclp_mask_compat_mode = true;
+					retry = 0;
+				}
+			}
+		}
+	}
+	unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+/* Reboot event handler. Reset send and receive mask to prevent pending SCLP
+ * events from interfering with rebooted system. */
+static int
+sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+	sclp_deactivate();
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block sclp_reboot_notifier = {
+	.notifier_call = sclp_reboot_event
+};
+
+/*
+ * Suspend/resume SCLP notifier implementation
+ */
+
+static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
+{
+	struct sclp_register *reg;
+	unsigned long flags;
+
+	if (!rollback) {
+		spin_lock_irqsave(&sclp_lock, flags);
+		list_for_each_entry(reg, &sclp_reg_list, list)
+			reg->pm_event_posted = 0;
+		spin_unlock_irqrestore(&sclp_lock, flags);
+	}
+	do {
+		spin_lock_irqsave(&sclp_lock, flags);
+		list_for_each_entry(reg, &sclp_reg_list, list) {
+			if (rollback && reg->pm_event_posted)
+				goto found;
+			if (!rollback && !reg->pm_event_posted)
+				goto found;
+		}
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		return;
+found:
+		spin_unlock_irqrestore(&sclp_lock, flags);
+		if (reg->pm_event_fn)
+			reg->pm_event_fn(reg, sclp_pm_event);
+		reg->pm_event_posted = rollback ? 0 : 1;
+	} while (1);
+}
+
+/*
+ * Susend/resume callbacks for platform device
+ */
+
+static int sclp_freeze(struct device *dev)
+{
+	unsigned long flags;
+	int rc;
+
+	sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	sclp_suspend_state = sclp_suspend_state_suspended;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+
+	/* Init supend data */
+	memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
+	sclp_suspend_req.callback = sclp_suspend_req_cb;
+	sclp_suspend_req.status = SCLP_REQ_FILLED;
+	init_completion(&sclp_request_queue_flushed);
+
+	rc = sclp_add_request(&sclp_suspend_req);
+	if (rc == 0)
+		wait_for_completion(&sclp_request_queue_flushed);
+	else if (rc != -ENODATA)
+		goto fail_thaw;
+
+	rc = sclp_deactivate();
+	if (rc)
+		goto fail_thaw;
+	return 0;
+
+fail_thaw:
+	spin_lock_irqsave(&sclp_lock, flags);
+	sclp_suspend_state = sclp_suspend_state_running;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
+	return rc;
+}
+
+static int sclp_undo_suspend(enum sclp_pm_event event)
+{
+	unsigned long flags;
+	int rc;
+
+	rc = sclp_reactivate();
+	if (rc)
+		return rc;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	sclp_suspend_state = sclp_suspend_state_running;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+
+	sclp_pm_event(event, 0);
+	return 0;
+}
+
+static int sclp_thaw(struct device *dev)
+{
+	return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
+}
+
+static int sclp_restore(struct device *dev)
+{
+	return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
+}
+
+static const struct dev_pm_ops sclp_pm_ops = {
+	.freeze		= sclp_freeze,
+	.thaw		= sclp_thaw,
+	.restore	= sclp_restore,
+};
+
+static ssize_t con_pages_show(struct device_driver *dev, char *buf)
+{
+	return sprintf(buf, "%i\n", sclp_console_pages);
+}
+
+static DRIVER_ATTR_RO(con_pages);
+
+static ssize_t con_drop_show(struct device_driver *dev, char *buf)
+{
+	return sprintf(buf, "%i\n", sclp_console_drop);
+}
+
+static DRIVER_ATTR_RO(con_drop);
+
+static ssize_t con_full_show(struct device_driver *dev, char *buf)
+{
+	return sprintf(buf, "%lu\n", sclp_console_full);
+}
+
+static DRIVER_ATTR_RO(con_full);
+
+static struct attribute *sclp_drv_attrs[] = {
+	&driver_attr_con_pages.attr,
+	&driver_attr_con_drop.attr,
+	&driver_attr_con_full.attr,
+	NULL,
+};
+static struct attribute_group sclp_drv_attr_group = {
+	.attrs = sclp_drv_attrs,
+};
+static const struct attribute_group *sclp_drv_attr_groups[] = {
+	&sclp_drv_attr_group,
+	NULL,
+};
+
+static struct platform_driver sclp_pdrv = {
+	.driver = {
+		.name	= "sclp",
+		.pm	= &sclp_pm_ops,
+		.groups = sclp_drv_attr_groups,
+	},
+};
+
+static struct platform_device *sclp_pdev;
+
+/* Initialize SCLP driver. Return zero if driver is operational, non-zero
+ * otherwise. */
+static int
+sclp_init(void)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&sclp_lock, flags);
+	/* Check for previous or running initialization */
+	if (sclp_init_state != sclp_init_state_uninitialized)
+		goto fail_unlock;
+	sclp_init_state = sclp_init_state_initializing;
+	/* Set up variables */
+	INIT_LIST_HEAD(&sclp_req_queue);
+	INIT_LIST_HEAD(&sclp_reg_list);
+	list_add(&sclp_state_change_event.list, &sclp_reg_list);
+	timer_setup(&sclp_request_timer, NULL, 0);
+	timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0);
+	/* Check interface */
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	rc = sclp_check_interface();
+	spin_lock_irqsave(&sclp_lock, flags);
+	if (rc)
+		goto fail_init_state_uninitialized;
+	/* Register reboot handler */
+	rc = register_reboot_notifier(&sclp_reboot_notifier);
+	if (rc)
+		goto fail_init_state_uninitialized;
+	/* Register interrupt handler */
+	rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
+	if (rc)
+		goto fail_unregister_reboot_notifier;
+	sclp_init_state = sclp_init_state_initialized;
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	/* Enable service-signal external interruption - needs to happen with
+	 * IRQs enabled. */
+	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
+	sclp_init_mask(1);
+	return 0;
+
+fail_unregister_reboot_notifier:
+	unregister_reboot_notifier(&sclp_reboot_notifier);
+fail_init_state_uninitialized:
+	sclp_init_state = sclp_init_state_uninitialized;
+fail_unlock:
+	spin_unlock_irqrestore(&sclp_lock, flags);
+	return rc;
+}
+
+/*
+ * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
+ * to print the panic message.
+ */
+static int sclp_panic_notify(struct notifier_block *self,
+			     unsigned long event, void *data)
+{
+	if (sclp_suspend_state == sclp_suspend_state_suspended)
+		sclp_undo_suspend(SCLP_PM_EVENT_THAW);
+	return NOTIFY_OK;
+}
+
+static struct notifier_block sclp_on_panic_nb = {
+	.notifier_call = sclp_panic_notify,
+	.priority = SCLP_PANIC_PRIO,
+};
+
+static __init int sclp_initcall(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&sclp_pdrv);
+	if (rc)
+		return rc;
+
+	sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
+	rc = PTR_ERR_OR_ZERO(sclp_pdev);
+	if (rc)
+		goto fail_platform_driver_unregister;
+
+	rc = atomic_notifier_chain_register(&panic_notifier_list,
+					    &sclp_on_panic_nb);
+	if (rc)
+		goto fail_platform_device_unregister;
+
+	return sclp_init();
+
+fail_platform_device_unregister:
+	platform_device_unregister(sclp_pdev);
+fail_platform_driver_unregister:
+	platform_driver_unregister(&sclp_pdrv);
+	return rc;
+}
+
+arch_initcall(sclp_initcall);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
new file mode 100644
index 0000000..1fe4918
--- /dev/null
+++ b/drivers/s390/char/sclp.h
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 1999,2012
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __SCLP_H__
+#define __SCLP_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <asm/sclp.h>
+#include <asm/ebcdic.h>
+
+/* maximum number of pages concerning our own memory management */
+#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
+#define SCLP_CONSOLE_PAGES	6
+
+#define SCLP_EVTYP_MASK(T) (1UL << (sizeof(sccb_mask_t) * BITS_PER_BYTE - (T)))
+
+#define EVTYP_OPCMD		0x01
+#define EVTYP_MSG		0x02
+#define EVTYP_CONFMGMDATA	0x04
+#define EVTYP_DIAG_TEST		0x07
+#define EVTYP_STATECHANGE	0x08
+#define EVTYP_PMSGCMD		0x09
+#define EVTYP_ASYNC		0x0A
+#define EVTYP_CTLPROGIDENT	0x0B
+#define EVTYP_STORE_DATA	0x0C
+#define EVTYP_ERRNOTIFY		0x18
+#define EVTYP_VT220MSG		0x1A
+#define EVTYP_SDIAS		0x1C
+#define EVTYP_SIGQUIESCE	0x1D
+#define EVTYP_OCF		0x1E
+
+#define EVTYP_OPCMD_MASK	SCLP_EVTYP_MASK(EVTYP_OPCMD)
+#define EVTYP_MSG_MASK		SCLP_EVTYP_MASK(EVTYP_MSG)
+#define EVTYP_CONFMGMDATA_MASK	SCLP_EVTYP_MASK(EVTYP_CONFMGMDATA)
+#define EVTYP_DIAG_TEST_MASK	SCLP_EVTYP_MASK(EVTYP_DIAG_TEST)
+#define EVTYP_STATECHANGE_MASK	SCLP_EVTYP_MASK(EVTYP_STATECHANGE)
+#define EVTYP_PMSGCMD_MASK	SCLP_EVTYP_MASK(EVTYP_PMSGCMD)
+#define EVTYP_ASYNC_MASK	SCLP_EVTYP_MASK(EVTYP_ASYNC)
+#define EVTYP_CTLPROGIDENT_MASK	SCLP_EVTYP_MASK(EVTYP_CTLPROGIDENT)
+#define EVTYP_STORE_DATA_MASK	SCLP_EVTYP_MASK(EVTYP_STORE_DATA)
+#define EVTYP_ERRNOTIFY_MASK	SCLP_EVTYP_MASK(EVTYP_ERRNOTIFY)
+#define EVTYP_VT220MSG_MASK	SCLP_EVTYP_MASK(EVTYP_VT220MSG)
+#define EVTYP_SDIAS_MASK	SCLP_EVTYP_MASK(EVTYP_SDIAS)
+#define EVTYP_SIGQUIESCE_MASK	SCLP_EVTYP_MASK(EVTYP_SIGQUIESCE)
+#define EVTYP_OCF_MASK		SCLP_EVTYP_MASK(EVTYP_OCF)
+
+#define GNRLMSGFLGS_DOM		0x8000
+#define GNRLMSGFLGS_SNDALRM	0x4000
+#define GNRLMSGFLGS_HOLDMSG	0x2000
+
+#define LNTPFLGS_CNTLTEXT	0x8000
+#define LNTPFLGS_LABELTEXT	0x4000
+#define LNTPFLGS_DATATEXT	0x2000
+#define LNTPFLGS_ENDTEXT	0x1000
+#define LNTPFLGS_PROMPTTEXT	0x0800
+
+typedef unsigned int sclp_cmdw_t;
+
+#define SCLP_CMDW_READ_CPU_INFO		0x00010001
+#define SCLP_CMDW_READ_EVENT_DATA	0x00770005
+#define SCLP_CMDW_WRITE_EVENT_DATA	0x00760005
+#define SCLP_CMDW_WRITE_EVENT_MASK	0x00780005
+
+#define GDS_ID_MDSMU		0x1310
+#define GDS_ID_MDSROUTEINFO	0x1311
+#define GDS_ID_AGUNWRKCORR	0x1549
+#define GDS_ID_SNACONDREPORT	0x1532
+#define GDS_ID_CPMSU		0x1212
+#define GDS_ID_ROUTTARGINSTR	0x154D
+#define GDS_ID_OPREQ		0x8070
+#define GDS_ID_TEXTCMD		0x1320
+
+#define GDS_KEY_SELFDEFTEXTMSG	0x31
+
+enum sclp_pm_event {
+	SCLP_PM_EVENT_FREEZE,
+	SCLP_PM_EVENT_THAW,
+	SCLP_PM_EVENT_RESTORE,
+};
+
+#define SCLP_PANIC_PRIO		1
+#define SCLP_PANIC_PRIO_CLIENT	0
+
+typedef u64 sccb_mask_t;
+
+struct sccb_header {
+	u16	length;
+	u8	function_code;
+	u8	control_mask[3];
+	u16	response_code;
+} __attribute__((packed));
+
+struct init_sccb {
+	struct sccb_header header;
+	u16 _reserved;
+	u16 mask_length;
+	u8 masks[4 * 1021];	/* variable length */
+	/*
+	 * u8 receive_mask[mask_length];
+	 * u8 send_mask[mask_length];
+	 * u8 sclp_receive_mask[mask_length];
+	 * u8 sclp_send_mask[mask_length];
+	 */
+} __attribute__((packed));
+
+#define SCLP_MASK_SIZE_COMPAT 4
+
+static inline sccb_mask_t sccb_get_mask(u8 *masks, size_t len, int i)
+{
+	sccb_mask_t res = 0;
+
+	memcpy(&res, masks + i * len, min(sizeof(res), len));
+	return res;
+}
+
+static inline void sccb_set_mask(u8 *masks, size_t len, int i, sccb_mask_t val)
+{
+	memset(masks + i * len, 0, len);
+	memcpy(masks + i * len, &val, min(sizeof(val), len));
+}
+
+#define sccb_get_generic_mask(sccb, i)					\
+({									\
+	__typeof__(sccb) __sccb = sccb;					\
+									\
+	sccb_get_mask(__sccb->masks, __sccb->mask_length, i);		\
+})
+#define sccb_get_recv_mask(sccb)	sccb_get_generic_mask(sccb, 0)
+#define sccb_get_send_mask(sccb)	sccb_get_generic_mask(sccb, 1)
+#define sccb_get_sclp_recv_mask(sccb)	sccb_get_generic_mask(sccb, 2)
+#define sccb_get_sclp_send_mask(sccb)	sccb_get_generic_mask(sccb, 3)
+
+#define sccb_set_generic_mask(sccb, i, val)				\
+({									\
+	__typeof__(sccb) __sccb = sccb;					\
+									\
+	sccb_set_mask(__sccb->masks, __sccb->mask_length, i, val);	\
+})
+#define sccb_set_recv_mask(sccb, val)	    sccb_set_generic_mask(sccb, 0, val)
+#define sccb_set_send_mask(sccb, val)	    sccb_set_generic_mask(sccb, 1, val)
+#define sccb_set_sclp_recv_mask(sccb, val)  sccb_set_generic_mask(sccb, 2, val)
+#define sccb_set_sclp_send_mask(sccb, val)  sccb_set_generic_mask(sccb, 3, val)
+
+struct read_cpu_info_sccb {
+	struct	sccb_header header;
+	u16	nr_configured;
+	u16	offset_configured;
+	u16	nr_standby;
+	u16	offset_standby;
+	u8	reserved[4096 - 16];
+} __attribute__((packed, aligned(PAGE_SIZE)));
+
+static inline void sclp_fill_core_info(struct sclp_core_info *info,
+				       struct read_cpu_info_sccb *sccb)
+{
+	char *page = (char *) sccb;
+
+	memset(info, 0, sizeof(*info));
+	info->configured = sccb->nr_configured;
+	info->standby = sccb->nr_standby;
+	info->combined = sccb->nr_configured + sccb->nr_standby;
+	memcpy(&info->core, page + sccb->offset_configured,
+	       info->combined * sizeof(struct sclp_core_entry));
+}
+
+#define SCLP_HAS_CHP_INFO	(sclp.facilities & 0x8000000000000000ULL)
+#define SCLP_HAS_CHP_RECONFIG	(sclp.facilities & 0x2000000000000000ULL)
+#define SCLP_HAS_CPU_INFO	(sclp.facilities & 0x0800000000000000ULL)
+#define SCLP_HAS_CPU_RECONFIG	(sclp.facilities & 0x0400000000000000ULL)
+#define SCLP_HAS_PCI_RECONFIG	(sclp.facilities & 0x0000000040000000ULL)
+
+
+struct gds_subvector {
+	u8	length;
+	u8	key;
+} __attribute__((packed));
+
+struct gds_vector {
+	u16	length;
+	u16	gds_id;
+} __attribute__((packed));
+
+struct evbuf_header {
+	u16	length;
+	u8	type;
+	u8	flags;
+	u16	_reserved;
+} __attribute__((packed));
+
+struct sclp_req {
+	struct list_head list;		/* list_head for request queueing. */
+	sclp_cmdw_t command;		/* sclp command to execute */
+	void	*sccb;			/* pointer to the sccb to execute */
+	char	status;			/* status of this request */
+	int     start_count;		/* number of SVCs done for this req */
+	/* Callback that is called after reaching final status. */
+	void (*callback)(struct sclp_req *, void *data);
+	void *callback_data;
+	int queue_timeout;		/* request queue timeout (sec), set by
+					   caller of sclp_add_request(), if
+					   needed */
+	/* Internal fields */
+	unsigned long queue_expires;	/* request queue timeout (jiffies) */
+};
+
+#define SCLP_REQ_FILLED	  0x00	/* request is ready to be processed */
+#define SCLP_REQ_QUEUED	  0x01	/* request is queued to be processed */
+#define SCLP_REQ_RUNNING  0x02	/* request is currently running */
+#define SCLP_REQ_DONE	  0x03	/* request is completed successfully */
+#define SCLP_REQ_FAILED	  0x05	/* request is finally failed */
+#define SCLP_REQ_QUEUED_TIMEOUT 0x06	/* request on queue timed out */
+
+#define SCLP_QUEUE_INTERVAL 5	/* timeout interval for request queue */
+
+/* function pointers that a high level driver has to use for registration */
+/* of some routines it wants to be called from the low level driver */
+struct sclp_register {
+	struct list_head list;
+	/* User wants to receive: */
+	sccb_mask_t receive_mask;
+	/* User wants to send: */
+	sccb_mask_t send_mask;
+	/* H/W can receive: */
+	sccb_mask_t sclp_receive_mask;
+	/* H/W can send: */
+	sccb_mask_t sclp_send_mask;
+	/* called if event type availability changes */
+	void (*state_change_fn)(struct sclp_register *);
+	/* called for events in cp_receive_mask/sclp_receive_mask */
+	void (*receiver_fn)(struct evbuf_header *);
+	/* called for power management events */
+	void (*pm_event_fn)(struct sclp_register *, enum sclp_pm_event);
+	/* pm event posted flag */
+	int pm_event_posted;
+};
+
+/* externals from sclp.c */
+int sclp_add_request(struct sclp_req *req);
+void sclp_sync_wait(void);
+int sclp_register(struct sclp_register *reg);
+void sclp_unregister(struct sclp_register *reg);
+int sclp_remove_processed(struct sccb_header *sccb);
+int sclp_deactivate(void);
+int sclp_reactivate(void);
+int sclp_sync_request(sclp_cmdw_t command, void *sccb);
+int sclp_sync_request_timeout(sclp_cmdw_t command, void *sccb, int timeout);
+
+int sclp_sdias_init(void);
+void sclp_sdias_exit(void);
+
+enum {
+	sclp_init_state_uninitialized,
+	sclp_init_state_initializing,
+	sclp_init_state_initialized
+};
+
+extern int sclp_init_state;
+extern int sclp_console_pages;
+extern int sclp_console_drop;
+extern unsigned long sclp_console_full;
+extern bool sclp_mask_compat_mode;
+
+extern char sclp_early_sccb[PAGE_SIZE];
+
+void sclp_early_wait_irq(void);
+int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb);
+unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb);
+unsigned int sclp_early_con_check_vt220(struct init_sccb *sccb);
+int sclp_early_set_event_mask(struct init_sccb *sccb,
+			      sccb_mask_t receive_mask,
+			      sccb_mask_t send_mask);
+
+/* useful inlines */
+
+/* Perform service call. Return 0 on success, non-zero otherwise. */
+static inline int sclp_service_call(sclp_cmdw_t command, void *sccb)
+{
+	int cc = 4; /* Initialize for program check handling */
+
+	asm volatile(
+		"0:	.insn	rre,0xb2200000,%1,%2\n"	 /* servc %1,%2 */
+		"1:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"2:\n"
+		EX_TABLE(0b, 2b)
+		EX_TABLE(1b, 2b)
+		: "+&d" (cc) : "d" (command), "a" ((unsigned long)sccb)
+		: "cc", "memory");
+	if (cc == 4)
+		return -EINVAL;
+	if (cc == 3)
+		return -EIO;
+	if (cc == 2)
+		return -EBUSY;
+	return 0;
+}
+
+/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */
+/* translate single character from ASCII to EBCDIC */
+static inline unsigned char
+sclp_ascebc(unsigned char ch)
+{
+	return (MACHINE_IS_VM) ? _ascebc[ch] : _ascebc_500[ch];
+}
+
+/* translate string from EBCDIC to ASCII */
+static inline void
+sclp_ebcasc_str(unsigned char *str, int nr)
+{
+	(MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr);
+}
+
+/* translate string from ASCII to EBCDIC */
+static inline void
+sclp_ascebc_str(unsigned char *str, int nr)
+{
+	(MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
+}
+
+static inline struct gds_vector *
+sclp_find_gds_vector(void *start, void *end, u16 id)
+{
+	struct gds_vector *v;
+
+	for (v = start; (void *) v < end; v = (void *) v + v->length)
+		if (v->gds_id == id)
+			return v;
+	return NULL;
+}
+
+static inline struct gds_subvector *
+sclp_find_gds_subvector(void *start, void *end, u8 key)
+{
+	struct gds_subvector *sv;
+
+	for (sv = start; (void *) sv < end; sv = (void *) sv + sv->length)
+		if (sv->key == key)
+			return sv;
+	return NULL;
+}
+
+#endif	 /* __SCLP_H__ */
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
new file mode 100644
index 0000000..e69b12a
--- /dev/null
+++ b/drivers/s390/char/sclp_async.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Enable Asynchronous Notification via SCLP.
+ *
+ * Copyright IBM Corp. 2009
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/kmod.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/sysctl.h>
+#include <linux/utsname.h>
+#include "sclp.h"
+
+static int callhome_enabled;
+static struct sclp_req *request;
+static struct sclp_async_sccb *sccb;
+static int sclp_async_send_wait(char *message);
+static struct ctl_table_header *callhome_sysctl_header;
+static DEFINE_SPINLOCK(sclp_async_lock);
+#define SCLP_NORMAL_WRITE	0x00
+
+struct async_evbuf {
+	struct evbuf_header header;
+	u64 reserved;
+	u8 rflags;
+	u8 empty;
+	u8 rtype;
+	u8 otype;
+	char comp_id[12];
+	char data[3000]; /* there is still some space left */
+} __attribute__((packed));
+
+struct sclp_async_sccb {
+	struct sccb_header header;
+	struct async_evbuf evbuf;
+} __attribute__((packed));
+
+static struct sclp_register sclp_async_register = {
+	.send_mask = EVTYP_ASYNC_MASK,
+};
+
+static int call_home_on_panic(struct notifier_block *self,
+			      unsigned long event, void *data)
+{
+	strncat(data, init_utsname()->nodename,
+		sizeof(init_utsname()->nodename));
+	sclp_async_send_wait(data);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block call_home_panic_nb = {
+	.notifier_call = call_home_on_panic,
+	.priority = INT_MAX,
+};
+
+static int zero;
+static int one = 1;
+
+static struct ctl_table callhome_table[] = {
+	{
+		.procname	= "callhome",
+		.data		= &callhome_enabled,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &one,
+	},
+	{}
+};
+
+static struct ctl_table kern_dir_table[] = {
+	{
+		.procname	= "kernel",
+		.maxlen		= 0,
+		.mode		= 0555,
+		.child		= callhome_table,
+	},
+	{}
+};
+
+/*
+ * Function used to transfer asynchronous notification
+ * records which waits for send completion
+ */
+static int sclp_async_send_wait(char *message)
+{
+	struct async_evbuf *evb;
+	int rc;
+	unsigned long flags;
+
+	if (!callhome_enabled)
+		return 0;
+	sccb->evbuf.header.type = EVTYP_ASYNC;
+	sccb->evbuf.rtype = 0xA5;
+	sccb->evbuf.otype = 0x00;
+	evb = &sccb->evbuf;
+	request->command = SCLP_CMDW_WRITE_EVENT_DATA;
+	request->sccb = sccb;
+	request->status = SCLP_REQ_FILLED;
+	strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data));
+	/*
+	 * Retain Queue
+	 * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
+	 */
+	strncpy(sccb->evbuf.comp_id, CONFIG_SCLP_ASYNC_ID,
+		sizeof(sccb->evbuf.comp_id));
+	sccb->evbuf.header.length = sizeof(sccb->evbuf);
+	sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
+	sccb->header.function_code = SCLP_NORMAL_WRITE;
+	rc = sclp_add_request(request);
+	if (rc)
+		return rc;
+	spin_lock_irqsave(&sclp_async_lock, flags);
+	while (request->status != SCLP_REQ_DONE &&
+		request->status != SCLP_REQ_FAILED) {
+		 sclp_sync_wait();
+	}
+	spin_unlock_irqrestore(&sclp_async_lock, flags);
+	if (request->status != SCLP_REQ_DONE)
+		return -EIO;
+	rc = ((struct sclp_async_sccb *)
+	       request->sccb)->header.response_code;
+	if (rc != 0x0020)
+		return -EIO;
+	if (evb->header.flags != 0x80)
+		return -EIO;
+	return rc;
+}
+
+static int __init sclp_async_init(void)
+{
+	int rc;
+
+	rc = sclp_register(&sclp_async_register);
+	if (rc)
+		return rc;
+	rc = -EOPNOTSUPP;
+	if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK))
+		goto out_sclp;
+	rc = -ENOMEM;
+	callhome_sysctl_header = register_sysctl_table(kern_dir_table);
+	if (!callhome_sysctl_header)
+		goto out_sclp;
+	request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
+	sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!request || !sccb)
+		goto out_mem;
+	rc = atomic_notifier_chain_register(&panic_notifier_list,
+					    &call_home_panic_nb);
+	if (!rc)
+		goto out;
+out_mem:
+	kfree(request);
+	free_page((unsigned long) sccb);
+	unregister_sysctl_table(callhome_sysctl_header);
+out_sclp:
+	sclp_unregister(&sclp_async_register);
+out:
+	return rc;
+}
+module_init(sclp_async_init);
+
+static void __exit sclp_async_exit(void)
+{
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &call_home_panic_nb);
+	unregister_sysctl_table(callhome_sysctl_header);
+	sclp_unregister(&sclp_async_register);
+	free_page((unsigned long) sccb);
+	kfree(request);
+}
+module_exit(sclp_async_exit);
+
+MODULE_AUTHOR("Copyright IBM Corp. 2009");
+MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SCLP Asynchronous Notification Records");
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
new file mode 100644
index 0000000..d7686a6
--- /dev/null
+++ b/drivers/s390/char/sclp_cmd.c
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2007,2012
+ *
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *	      Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "sclp_cmd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/memory.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <asm/ctl_reg.h>
+#include <asm/chpid.h>
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/sclp.h>
+#include <asm/numa.h>
+
+#include "sclp.h"
+
+static void sclp_sync_callback(struct sclp_req *req, void *data)
+{
+	struct completion *completion = data;
+
+	complete(completion);
+}
+
+int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
+{
+	return sclp_sync_request_timeout(cmd, sccb, 0);
+}
+
+int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
+{
+	struct completion completion;
+	struct sclp_req *request;
+	int rc;
+
+	request = kzalloc(sizeof(*request), GFP_KERNEL);
+	if (!request)
+		return -ENOMEM;
+	if (timeout)
+		request->queue_timeout = timeout;
+	request->command = cmd;
+	request->sccb = sccb;
+	request->status = SCLP_REQ_FILLED;
+	request->callback = sclp_sync_callback;
+	request->callback_data = &completion;
+	init_completion(&completion);
+
+	/* Perform sclp request. */
+	rc = sclp_add_request(request);
+	if (rc)
+		goto out;
+	wait_for_completion(&completion);
+
+	/* Check response. */
+	if (request->status != SCLP_REQ_DONE) {
+		pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
+			cmd, request->status);
+		rc = -EIO;
+	}
+out:
+	kfree(request);
+	return rc;
+}
+
+/*
+ * CPU configuration related functions.
+ */
+
+#define SCLP_CMDW_CONFIGURE_CPU		0x00110001
+#define SCLP_CMDW_DECONFIGURE_CPU	0x00100001
+
+int _sclp_get_core_info(struct sclp_core_info *info)
+{
+	int rc;
+	struct read_cpu_info_sccb *sccb;
+
+	if (!SCLP_HAS_CPU_INFO)
+		return -EOPNOTSUPP;
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = sizeof(*sccb);
+	rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb,
+				       SCLP_QUEUE_INTERVAL);
+	if (rc)
+		goto out;
+	if (sccb->header.response_code != 0x0010) {
+		pr_warn("readcpuinfo failed (response=0x%04x)\n",
+			sccb->header.response_code);
+		rc = -EIO;
+		goto out;
+	}
+	sclp_fill_core_info(info, sccb);
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+struct cpu_configure_sccb {
+	struct sccb_header header;
+} __attribute__((packed, aligned(8)));
+
+static int do_core_configure(sclp_cmdw_t cmd)
+{
+	struct cpu_configure_sccb *sccb;
+	int rc;
+
+	if (!SCLP_HAS_CPU_RECONFIG)
+		return -EOPNOTSUPP;
+	/*
+	 * This is not going to cross a page boundary since we force
+	 * kmalloc to have a minimum alignment of 8 bytes on s390.
+	 */
+	sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = sizeof(*sccb);
+	rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
+	if (rc)
+		goto out;
+	switch (sccb->header.response_code) {
+	case 0x0020:
+	case 0x0120:
+		break;
+	default:
+		pr_warn("configure cpu failed (cmd=0x%08x, response=0x%04x)\n",
+			cmd, sccb->header.response_code);
+		rc = -EIO;
+		break;
+	}
+out:
+	kfree(sccb);
+	return rc;
+}
+
+int sclp_core_configure(u8 core)
+{
+	return do_core_configure(SCLP_CMDW_CONFIGURE_CPU | core << 8);
+}
+
+int sclp_core_deconfigure(u8 core)
+{
+	return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8);
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+
+static DEFINE_MUTEX(sclp_mem_mutex);
+static LIST_HEAD(sclp_mem_list);
+static u8 sclp_max_storage_id;
+static DECLARE_BITMAP(sclp_storage_ids, 256);
+static int sclp_mem_state_changed;
+
+struct memory_increment {
+	struct list_head list;
+	u16 rn;
+	int standby;
+};
+
+struct assign_storage_sccb {
+	struct sccb_header header;
+	u16 rn;
+} __packed;
+
+int arch_get_memory_phys_device(unsigned long start_pfn)
+{
+	if (!sclp.rzm)
+		return 0;
+	return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
+}
+
+static unsigned long long rn2addr(u16 rn)
+{
+	return (unsigned long long) (rn - 1) * sclp.rzm;
+}
+
+static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
+{
+	struct assign_storage_sccb *sccb;
+	int rc;
+
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = PAGE_SIZE;
+	sccb->rn = rn;
+	rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
+	if (rc)
+		goto out;
+	switch (sccb->header.response_code) {
+	case 0x0020:
+	case 0x0120:
+		break;
+	default:
+		pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
+			cmd, sccb->header.response_code, rn);
+		rc = -EIO;
+		break;
+	}
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+static int sclp_assign_storage(u16 rn)
+{
+	unsigned long long start;
+	int rc;
+
+	rc = do_assign_storage(0x000d0001, rn);
+	if (rc)
+		return rc;
+	start = rn2addr(rn);
+	storage_key_init_range(start, start + sclp.rzm);
+	return 0;
+}
+
+static int sclp_unassign_storage(u16 rn)
+{
+	return do_assign_storage(0x000c0001, rn);
+}
+
+struct attach_storage_sccb {
+	struct sccb_header header;
+	u16 :16;
+	u16 assigned;
+	u32 :32;
+	u32 entries[0];
+} __packed;
+
+static int sclp_attach_storage(u8 id)
+{
+	struct attach_storage_sccb *sccb;
+	int rc;
+	int i;
+
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = PAGE_SIZE;
+	sccb->header.function_code = 0x40;
+	rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
+				       SCLP_QUEUE_INTERVAL);
+	if (rc)
+		goto out;
+	switch (sccb->header.response_code) {
+	case 0x0020:
+		set_bit(id, sclp_storage_ids);
+		for (i = 0; i < sccb->assigned; i++) {
+			if (sccb->entries[i])
+				sclp_unassign_storage(sccb->entries[i] >> 16);
+		}
+		break;
+	default:
+		rc = -EIO;
+		break;
+	}
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+static int sclp_mem_change_state(unsigned long start, unsigned long size,
+				 int online)
+{
+	struct memory_increment *incr;
+	unsigned long long istart;
+	int rc = 0;
+
+	list_for_each_entry(incr, &sclp_mem_list, list) {
+		istart = rn2addr(incr->rn);
+		if (start + size - 1 < istart)
+			break;
+		if (start > istart + sclp.rzm - 1)
+			continue;
+		if (online)
+			rc |= sclp_assign_storage(incr->rn);
+		else
+			sclp_unassign_storage(incr->rn);
+		if (rc == 0)
+			incr->standby = online ? 0 : 1;
+	}
+	return rc ? -EIO : 0;
+}
+
+static bool contains_standby_increment(unsigned long start, unsigned long end)
+{
+	struct memory_increment *incr;
+	unsigned long istart;
+
+	list_for_each_entry(incr, &sclp_mem_list, list) {
+		istart = rn2addr(incr->rn);
+		if (end - 1 < istart)
+			continue;
+		if (start > istart + sclp.rzm - 1)
+			continue;
+		if (incr->standby)
+			return true;
+	}
+	return false;
+}
+
+static int sclp_mem_notifier(struct notifier_block *nb,
+			     unsigned long action, void *data)
+{
+	unsigned long start, size;
+	struct memory_notify *arg;
+	unsigned char id;
+	int rc = 0;
+
+	arg = data;
+	start = arg->start_pfn << PAGE_SHIFT;
+	size = arg->nr_pages << PAGE_SHIFT;
+	mutex_lock(&sclp_mem_mutex);
+	for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
+		sclp_attach_storage(id);
+	switch (action) {
+	case MEM_GOING_OFFLINE:
+		/*
+		 * We do not allow to set memory blocks offline that contain
+		 * standby memory. This is done to simplify the "memory online"
+		 * case.
+		 */
+		if (contains_standby_increment(start, start + size))
+			rc = -EPERM;
+		break;
+	case MEM_ONLINE:
+	case MEM_CANCEL_OFFLINE:
+		break;
+	case MEM_GOING_ONLINE:
+		rc = sclp_mem_change_state(start, size, 1);
+		break;
+	case MEM_CANCEL_ONLINE:
+		sclp_mem_change_state(start, size, 0);
+		break;
+	case MEM_OFFLINE:
+		sclp_mem_change_state(start, size, 0);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	if (!rc)
+		sclp_mem_state_changed = 1;
+	mutex_unlock(&sclp_mem_mutex);
+	return rc ? NOTIFY_BAD : NOTIFY_OK;
+}
+
+static struct notifier_block sclp_mem_nb = {
+	.notifier_call = sclp_mem_notifier,
+};
+
+static void __init align_to_block_size(unsigned long long *start,
+				       unsigned long long *size,
+				       unsigned long long alignment)
+{
+	unsigned long long start_align, size_align;
+
+	start_align = roundup(*start, alignment);
+	size_align = rounddown(*start + *size, alignment) - start_align;
+
+	pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
+		*start, size_align >> 20, *size >> 20);
+	*start = start_align;
+	*size = size_align;
+}
+
+static void __init add_memory_merged(u16 rn)
+{
+	unsigned long long start, size, addr, block_size;
+	static u16 first_rn, num;
+
+	if (rn && first_rn && (first_rn + num == rn)) {
+		num++;
+		return;
+	}
+	if (!first_rn)
+		goto skip_add;
+	start = rn2addr(first_rn);
+	size = (unsigned long long) num * sclp.rzm;
+	if (start >= VMEM_MAX_PHYS)
+		goto skip_add;
+	if (start + size > VMEM_MAX_PHYS)
+		size = VMEM_MAX_PHYS - start;
+	if (memory_end_set && (start >= memory_end))
+		goto skip_add;
+	if (memory_end_set && (start + size > memory_end))
+		size = memory_end - start;
+	block_size = memory_block_size_bytes();
+	align_to_block_size(&start, &size, block_size);
+	if (!size)
+		goto skip_add;
+	for (addr = start; addr < start + size; addr += block_size)
+		add_memory(numa_pfn_to_nid(PFN_DOWN(addr)), addr, block_size);
+skip_add:
+	first_rn = rn;
+	num = 1;
+}
+
+static void __init sclp_add_standby_memory(void)
+{
+	struct memory_increment *incr;
+
+	list_for_each_entry(incr, &sclp_mem_list, list)
+		if (incr->standby)
+			add_memory_merged(incr->rn);
+	add_memory_merged(0);
+}
+
+static void __init insert_increment(u16 rn, int standby, int assigned)
+{
+	struct memory_increment *incr, *new_incr;
+	struct list_head *prev;
+	u16 last_rn;
+
+	new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
+	if (!new_incr)
+		return;
+	new_incr->rn = rn;
+	new_incr->standby = standby;
+	last_rn = 0;
+	prev = &sclp_mem_list;
+	list_for_each_entry(incr, &sclp_mem_list, list) {
+		if (assigned && incr->rn > rn)
+			break;
+		if (!assigned && incr->rn - last_rn > 1)
+			break;
+		last_rn = incr->rn;
+		prev = &incr->list;
+	}
+	if (!assigned)
+		new_incr->rn = last_rn + 1;
+	if (new_incr->rn > sclp.rnmax) {
+		kfree(new_incr);
+		return;
+	}
+	list_add(&new_incr->list, prev);
+}
+
+static int sclp_mem_freeze(struct device *dev)
+{
+	if (!sclp_mem_state_changed)
+		return 0;
+	pr_err("Memory hotplug state changed, suspend refused.\n");
+	return -EPERM;
+}
+
+struct read_storage_sccb {
+	struct sccb_header header;
+	u16 max_id;
+	u16 assigned;
+	u16 standby;
+	u16 :16;
+	u32 entries[0];
+} __packed;
+
+static const struct dev_pm_ops sclp_mem_pm_ops = {
+	.freeze		= sclp_mem_freeze,
+};
+
+static struct platform_driver sclp_mem_pdrv = {
+	.driver = {
+		.name	= "sclp_mem",
+		.pm	= &sclp_mem_pm_ops,
+	},
+};
+
+static int __init sclp_detect_standby_memory(void)
+{
+	struct platform_device *sclp_pdev;
+	struct read_storage_sccb *sccb;
+	int i, id, assigned, rc;
+
+	if (OLDMEM_BASE) /* No standby memory in kdump mode */
+		return 0;
+	if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
+		return 0;
+	rc = -ENOMEM;
+	sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		goto out;
+	assigned = 0;
+	for (id = 0; id <= sclp_max_storage_id; id++) {
+		memset(sccb, 0, PAGE_SIZE);
+		sccb->header.length = PAGE_SIZE;
+		rc = sclp_sync_request(0x00040001 | id << 8, sccb);
+		if (rc)
+			goto out;
+		switch (sccb->header.response_code) {
+		case 0x0010:
+			set_bit(id, sclp_storage_ids);
+			for (i = 0; i < sccb->assigned; i++) {
+				if (!sccb->entries[i])
+					continue;
+				assigned++;
+				insert_increment(sccb->entries[i] >> 16, 0, 1);
+			}
+			break;
+		case 0x0310:
+			break;
+		case 0x0410:
+			for (i = 0; i < sccb->assigned; i++) {
+				if (!sccb->entries[i])
+					continue;
+				assigned++;
+				insert_increment(sccb->entries[i] >> 16, 1, 1);
+			}
+			break;
+		default:
+			rc = -EIO;
+			break;
+		}
+		if (!rc)
+			sclp_max_storage_id = sccb->max_id;
+	}
+	if (rc || list_empty(&sclp_mem_list))
+		goto out;
+	for (i = 1; i <= sclp.rnmax - assigned; i++)
+		insert_increment(0, 1, 0);
+	rc = register_memory_notifier(&sclp_mem_nb);
+	if (rc)
+		goto out;
+	rc = platform_driver_register(&sclp_mem_pdrv);
+	if (rc)
+		goto out;
+	sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
+	rc = PTR_ERR_OR_ZERO(sclp_pdev);
+	if (rc)
+		goto out_driver;
+	sclp_add_standby_memory();
+	goto out;
+out_driver:
+	platform_driver_unregister(&sclp_mem_pdrv);
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+__initcall(sclp_detect_standby_memory);
+
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
+/*
+ * Channel path configuration related functions.
+ */
+
+#define SCLP_CMDW_CONFIGURE_CHPATH		0x000f0001
+#define SCLP_CMDW_DECONFIGURE_CHPATH		0x000e0001
+#define SCLP_CMDW_READ_CHPATH_INFORMATION	0x00030001
+
+struct chp_cfg_sccb {
+	struct sccb_header header;
+	u8 ccm;
+	u8 reserved[6];
+	u8 cssid;
+} __attribute__((packed));
+
+static int do_chp_configure(sclp_cmdw_t cmd)
+{
+	struct chp_cfg_sccb *sccb;
+	int rc;
+
+	if (!SCLP_HAS_CHP_RECONFIG)
+		return -EOPNOTSUPP;
+	/* Prepare sccb. */
+	sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = sizeof(*sccb);
+	rc = sclp_sync_request(cmd, sccb);
+	if (rc)
+		goto out;
+	switch (sccb->header.response_code) {
+	case 0x0020:
+	case 0x0120:
+	case 0x0440:
+	case 0x0450:
+		break;
+	default:
+		pr_warn("configure channel-path failed (cmd=0x%08x, response=0x%04x)\n",
+			cmd, sccb->header.response_code);
+		rc = -EIO;
+		break;
+	}
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+/**
+ * sclp_chp_configure - perform configure channel-path sclp command
+ * @chpid: channel-path ID
+ *
+ * Perform configure channel-path command sclp command for specified chpid.
+ * Return 0 after command successfully finished, non-zero otherwise.
+ */
+int sclp_chp_configure(struct chp_id chpid)
+{
+	return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
+}
+
+/**
+ * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
+ * @chpid: channel-path ID
+ *
+ * Perform deconfigure channel-path command sclp command for specified chpid
+ * and wait for completion. On success return 0. Return non-zero otherwise.
+ */
+int sclp_chp_deconfigure(struct chp_id chpid)
+{
+	return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
+}
+
+struct chp_info_sccb {
+	struct sccb_header header;
+	u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
+	u8 standby[SCLP_CHP_INFO_MASK_SIZE];
+	u8 configured[SCLP_CHP_INFO_MASK_SIZE];
+	u8 ccm;
+	u8 reserved[6];
+	u8 cssid;
+} __attribute__((packed));
+
+/**
+ * sclp_chp_read_info - perform read channel-path information sclp command
+ * @info: resulting channel-path information data
+ *
+ * Perform read channel-path information sclp command and wait for completion.
+ * On success, store channel-path information in @info and return 0. Return
+ * non-zero otherwise.
+ */
+int sclp_chp_read_info(struct sclp_chp_info *info)
+{
+	struct chp_info_sccb *sccb;
+	int rc;
+
+	if (!SCLP_HAS_CHP_INFO)
+		return -EOPNOTSUPP;
+	/* Prepare sccb. */
+	sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	sccb->header.length = sizeof(*sccb);
+	rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
+	if (rc)
+		goto out;
+	if (sccb->header.response_code != 0x0010) {
+		pr_warn("read channel-path info failed (response=0x%04x)\n",
+			sccb->header.response_code);
+		rc = -EIO;
+		goto out;
+	}
+	memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
+	memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
+	memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
new file mode 100644
index 0000000..8966a1c
--- /dev/null
+++ b/drivers/s390/char/sclp_con.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCLP line mode console driver
+ *
+ * Copyright IBM Corp. 1999, 2009
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kmod.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/termios.h>
+#include <linux/err.h>
+#include <linux/reboot.h>
+#include <linux/gfp.h>
+
+#include "sclp.h"
+#include "sclp_rw.h"
+#include "sclp_tty.h"
+
+#define sclp_console_major 4		/* TTYAUX_MAJOR */
+#define sclp_console_minor 64
+#define sclp_console_name  "ttyS"
+
+/* Lock to guard over changes to global variables */
+static spinlock_t sclp_con_lock;
+/* List of free pages that can be used for console output buffering */
+static struct list_head sclp_con_pages;
+/* List of full struct sclp_buffer structures ready for output */
+static struct list_head sclp_con_outqueue;
+/* Pointer to current console buffer */
+static struct sclp_buffer *sclp_conbuf;
+/* Timer for delayed output of console messages */
+static struct timer_list sclp_con_timer;
+/* Suspend mode flag */
+static int sclp_con_suspended;
+/* Flag that output queue is currently running */
+static int sclp_con_queue_running;
+
+/* Output format for console messages */
+static unsigned short sclp_con_columns;
+static unsigned short sclp_con_width_htab;
+
+static void
+sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
+{
+	unsigned long flags;
+	void *page;
+
+	do {
+		page = sclp_unmake_buffer(buffer);
+		spin_lock_irqsave(&sclp_con_lock, flags);
+
+		/* Remove buffer from outqueue */
+		list_del(&buffer->list);
+		list_add_tail((struct list_head *) page, &sclp_con_pages);
+
+		/* Check if there is a pending buffer on the out queue. */
+		buffer = NULL;
+		if (!list_empty(&sclp_con_outqueue))
+			buffer = list_first_entry(&sclp_con_outqueue,
+						  struct sclp_buffer, list);
+		if (!buffer || sclp_con_suspended) {
+			sclp_con_queue_running = 0;
+			spin_unlock_irqrestore(&sclp_con_lock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&sclp_con_lock, flags);
+	} while (sclp_emit_buffer(buffer, sclp_conbuf_callback));
+}
+
+/*
+ * Finalize and emit first pending buffer.
+ */
+static void sclp_conbuf_emit(void)
+{
+	struct sclp_buffer* buffer;
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	if (sclp_conbuf)
+		list_add_tail(&sclp_conbuf->list, &sclp_con_outqueue);
+	sclp_conbuf = NULL;
+	if (sclp_con_queue_running || sclp_con_suspended)
+		goto out_unlock;
+	if (list_empty(&sclp_con_outqueue))
+		goto out_unlock;
+	buffer = list_first_entry(&sclp_con_outqueue, struct sclp_buffer,
+				  list);
+	sclp_con_queue_running = 1;
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+
+	rc = sclp_emit_buffer(buffer, sclp_conbuf_callback);
+	if (rc)
+		sclp_conbuf_callback(buffer, rc);
+	return;
+out_unlock:
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+}
+
+/*
+ * Wait until out queue is empty
+ */
+static void sclp_console_sync_queue(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	if (timer_pending(&sclp_con_timer))
+		del_timer(&sclp_con_timer);
+	while (sclp_con_queue_running) {
+		spin_unlock_irqrestore(&sclp_con_lock, flags);
+		sclp_sync_wait();
+		spin_lock_irqsave(&sclp_con_lock, flags);
+	}
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+}
+
+/*
+ * When this routine is called from the timer then we flush the
+ * temporary write buffer without further waiting on a final new line.
+ */
+static void
+sclp_console_timeout(struct timer_list *unused)
+{
+	sclp_conbuf_emit();
+}
+
+/*
+ * Drop oldest console buffer if sclp_con_drop is set
+ */
+static int
+sclp_console_drop_buffer(void)
+{
+	struct list_head *list;
+	struct sclp_buffer *buffer;
+	void *page;
+
+	if (!sclp_console_drop)
+		return 0;
+	list = sclp_con_outqueue.next;
+	if (sclp_con_queue_running)
+		/* The first element is in I/O */
+		list = list->next;
+	if (list == &sclp_con_outqueue)
+		return 0;
+	list_del(list);
+	buffer = list_entry(list, struct sclp_buffer, list);
+	page = sclp_unmake_buffer(buffer);
+	list_add_tail((struct list_head *) page, &sclp_con_pages);
+	return 1;
+}
+
+/*
+ * Writes the given message to S390 system console
+ */
+static void
+sclp_console_write(struct console *console, const char *message,
+		   unsigned int count)
+{
+	unsigned long flags;
+	void *page;
+	int written;
+
+	if (count == 0)
+		return;
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	/*
+	 * process escape characters, write message into buffer,
+	 * send buffer to SCLP
+	 */
+	do {
+		/* make sure we have a console output buffer */
+		if (sclp_conbuf == NULL) {
+			if (list_empty(&sclp_con_pages))
+				sclp_console_full++;
+			while (list_empty(&sclp_con_pages)) {
+				if (sclp_con_suspended)
+					goto out;
+				if (sclp_console_drop_buffer())
+					break;
+				spin_unlock_irqrestore(&sclp_con_lock, flags);
+				sclp_sync_wait();
+				spin_lock_irqsave(&sclp_con_lock, flags);
+			}
+			page = sclp_con_pages.next;
+			list_del((struct list_head *) page);
+			sclp_conbuf = sclp_make_buffer(page, sclp_con_columns,
+						       sclp_con_width_htab);
+		}
+		/* try to write the string to the current output buffer */
+		written = sclp_write(sclp_conbuf, (const unsigned char *)
+				     message, count);
+		if (written == count)
+			break;
+		/*
+		 * Not all characters could be written to the current
+		 * output buffer. Emit the buffer, create a new buffer
+		 * and then output the rest of the string.
+		 */
+		spin_unlock_irqrestore(&sclp_con_lock, flags);
+		sclp_conbuf_emit();
+		spin_lock_irqsave(&sclp_con_lock, flags);
+		message += written;
+		count -= written;
+	} while (count > 0);
+	/* Setup timer to output current console buffer after 1/10 second */
+	if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 &&
+	    !timer_pending(&sclp_con_timer)) {
+		mod_timer(&sclp_con_timer, jiffies + HZ / 10);
+	}
+out:
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+}
+
+static struct tty_driver *
+sclp_console_device(struct console *c, int *index)
+{
+	*index = c->index;
+	return sclp_tty_driver;
+}
+
+/*
+ * Make sure that all buffers will be flushed to the SCLP.
+ */
+static void
+sclp_console_flush(void)
+{
+	sclp_conbuf_emit();
+	sclp_console_sync_queue();
+}
+
+/*
+ * Resume console: If there are cached messages, emit them.
+ */
+static void sclp_console_resume(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	sclp_con_suspended = 0;
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+	sclp_conbuf_emit();
+}
+
+/*
+ * Suspend console: Set suspend flag and flush console
+ */
+static void sclp_console_suspend(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_con_lock, flags);
+	sclp_con_suspended = 1;
+	spin_unlock_irqrestore(&sclp_con_lock, flags);
+	sclp_console_flush();
+}
+
+static int sclp_console_notify(struct notifier_block *self,
+			       unsigned long event, void *data)
+{
+	sclp_console_flush();
+	return NOTIFY_OK;
+}
+
+static struct notifier_block on_panic_nb = {
+	.notifier_call = sclp_console_notify,
+	.priority = SCLP_PANIC_PRIO_CLIENT,
+};
+
+static struct notifier_block on_reboot_nb = {
+	.notifier_call = sclp_console_notify,
+	.priority = 1,
+};
+
+/*
+ * used to register the SCLP console to the kernel and to
+ * give printk necessary information
+ */
+static struct console sclp_console =
+{
+	.name = sclp_console_name,
+	.write = sclp_console_write,
+	.device = sclp_console_device,
+	.flags = CON_PRINTBUFFER,
+	.index = 0 /* ttyS0 */
+};
+
+/*
+ * This function is called for SCLP suspend and resume events.
+ */
+void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event)
+{
+	switch (sclp_pm_event) {
+	case SCLP_PM_EVENT_FREEZE:
+		sclp_console_suspend();
+		break;
+	case SCLP_PM_EVENT_RESTORE:
+	case SCLP_PM_EVENT_THAW:
+		sclp_console_resume();
+		break;
+	}
+}
+
+/*
+ * called by console_init() in drivers/char/tty_io.c at boot-time.
+ */
+static int __init
+sclp_console_init(void)
+{
+	void *page;
+	int i;
+	int rc;
+
+	/* SCLP consoles are handled together */
+	if (!(CONSOLE_IS_SCLP || CONSOLE_IS_VT220))
+		return 0;
+	rc = sclp_rw_init();
+	if (rc)
+		return rc;
+	/* Allocate pages for output buffering */
+	INIT_LIST_HEAD(&sclp_con_pages);
+	for (i = 0; i < sclp_console_pages; i++) {
+		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		list_add_tail(page, &sclp_con_pages);
+	}
+	INIT_LIST_HEAD(&sclp_con_outqueue);
+	spin_lock_init(&sclp_con_lock);
+	sclp_conbuf = NULL;
+	timer_setup(&sclp_con_timer, sclp_console_timeout, 0);
+
+	/* Set output format */
+	if (MACHINE_IS_VM)
+		/*
+		 * save 4 characters for the CPU number
+		 * written at start of each line by VM/CP
+		 */
+		sclp_con_columns = 76;
+	else
+		sclp_con_columns = 80;
+	sclp_con_width_htab = 8;
+
+	/* enable printk-access to this driver */
+	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+	register_reboot_notifier(&on_reboot_nb);
+	register_console(&sclp_console);
+	return 0;
+}
+
+console_initcall(sclp_console_init);
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
new file mode 100644
index 0000000..194ffd5
--- /dev/null
+++ b/drivers/s390/char/sclp_config.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "sclp_config"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <asm/smp.h>
+
+#include "sclp.h"
+
+struct conf_mgm_data {
+	u8 reserved;
+	u8 ev_qualifier;
+} __attribute__((packed));
+
+#define OFB_DATA_MAX 64
+
+struct sclp_ofb_evbuf {
+	struct evbuf_header header;
+	struct conf_mgm_data cm_data;
+	char ev_data[OFB_DATA_MAX];
+} __packed;
+
+struct sclp_ofb_sccb {
+	struct sccb_header header;
+	struct sclp_ofb_evbuf ofb_evbuf;
+} __packed;
+
+#define EV_QUAL_CPU_CHANGE	1
+#define EV_QUAL_CAP_CHANGE	3
+#define EV_QUAL_OPEN4BUSINESS	5
+
+static struct work_struct sclp_cpu_capability_work;
+static struct work_struct sclp_cpu_change_work;
+
+static void sclp_cpu_capability_notify(struct work_struct *work)
+{
+	int cpu;
+	struct device *dev;
+
+	s390_update_cpu_mhz();
+	pr_info("CPU capability may have changed\n");
+	get_online_cpus();
+	for_each_online_cpu(cpu) {
+		dev = get_cpu_device(cpu);
+		kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+	}
+	put_online_cpus();
+}
+
+static void __ref sclp_cpu_change_notify(struct work_struct *work)
+{
+	smp_rescan_cpus();
+}
+
+static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
+{
+	struct conf_mgm_data *cdata;
+
+	cdata = (struct conf_mgm_data *)(evbuf + 1);
+	switch (cdata->ev_qualifier) {
+	case EV_QUAL_CPU_CHANGE:
+		schedule_work(&sclp_cpu_change_work);
+		break;
+	case EV_QUAL_CAP_CHANGE:
+		schedule_work(&sclp_cpu_capability_work);
+		break;
+	}
+}
+
+static struct sclp_register sclp_conf_register =
+{
+#ifdef CONFIG_SCLP_OFB
+	.send_mask    = EVTYP_CONFMGMDATA_MASK,
+#endif
+	.receive_mask = EVTYP_CONFMGMDATA_MASK,
+	.receiver_fn  = sclp_conf_receiver_fn,
+};
+
+#ifdef CONFIG_SCLP_OFB
+static int sclp_ofb_send_req(char *ev_data, size_t len)
+{
+	static DEFINE_MUTEX(send_mutex);
+	struct sclp_ofb_sccb *sccb;
+	int rc, response;
+
+	if (len > OFB_DATA_MAX)
+		return -EINVAL;
+	sccb = (struct sclp_ofb_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	/* Setup SCCB for Control-Program Identification */
+	sccb->header.length = sizeof(struct sclp_ofb_sccb);
+	sccb->ofb_evbuf.header.length = sizeof(struct sclp_ofb_evbuf);
+	sccb->ofb_evbuf.header.type = EVTYP_CONFMGMDATA;
+	sccb->ofb_evbuf.cm_data.ev_qualifier = EV_QUAL_OPEN4BUSINESS;
+	memcpy(sccb->ofb_evbuf.ev_data, ev_data, len);
+
+	if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK))
+		pr_warn("SCLP receiver did not register to receive "
+			"Configuration Management Data Events.\n");
+
+	mutex_lock(&send_mutex);
+	rc = sclp_sync_request(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
+	mutex_unlock(&send_mutex);
+	if (rc)
+		goto out;
+	response = sccb->header.response_code;
+	if (response != 0x0020) {
+		pr_err("Open for Business request failed with response code "
+		       "0x%04x\n", response);
+		rc = -EIO;
+	}
+out:
+	free_page((unsigned long)sccb);
+	return rc;
+}
+
+static ssize_t sysfs_ofb_data_write(struct file *filp, struct kobject *kobj,
+				    struct bin_attribute *bin_attr,
+				    char *buf, loff_t off, size_t count)
+{
+	int rc;
+
+	rc = sclp_ofb_send_req(buf, count);
+	return rc ?: count;
+}
+
+static const struct bin_attribute ofb_bin_attr = {
+	.attr = {
+		.name = "event_data",
+		.mode = S_IWUSR,
+	},
+	.write = sysfs_ofb_data_write,
+};
+#endif
+
+static int __init sclp_ofb_setup(void)
+{
+#ifdef CONFIG_SCLP_OFB
+	struct kset *ofb_kset;
+	int rc;
+
+	ofb_kset = kset_create_and_add("ofb", NULL, firmware_kobj);
+	if (!ofb_kset)
+		return -ENOMEM;
+	rc = sysfs_create_bin_file(&ofb_kset->kobj, &ofb_bin_attr);
+	if (rc) {
+		kset_unregister(ofb_kset);
+		return rc;
+	}
+#endif
+	return 0;
+}
+
+static int __init sclp_conf_init(void)
+{
+	int rc;
+
+	INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
+	INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
+	rc = sclp_register(&sclp_conf_register);
+	if (rc)
+		return rc;
+	return sclp_ofb_setup();
+}
+
+__initcall(sclp_conf_init);
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
new file mode 100644
index 0000000..f60d7ea
--- /dev/null
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    SCLP control program identification sysfs interface
+ *
+ *    Copyright IBM Corp. 2001, 2007
+ *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *		 Michael Ernst <mernst@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "sclp_cpi"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/kmod.h>
+#include <linux/timer.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/export.h>
+#include <asm/ebcdic.h>
+#include <asm/sclp.h>
+
+#include "sclp.h"
+#include "sclp_rw.h"
+#include "sclp_cpi_sys.h"
+
+#define CPI_LENGTH_NAME 8
+#define CPI_LENGTH_LEVEL 16
+
+static DEFINE_MUTEX(sclp_cpi_mutex);
+
+struct cpi_evbuf {
+	struct evbuf_header header;
+	u8	id_format;
+	u8	reserved0;
+	u8	system_type[CPI_LENGTH_NAME];
+	u64	reserved1;
+	u8	system_name[CPI_LENGTH_NAME];
+	u64	reserved2;
+	u64	system_level;
+	u64	reserved3;
+	u8	sysplex_name[CPI_LENGTH_NAME];
+	u8	reserved4[16];
+} __attribute__((packed));
+
+struct cpi_sccb {
+	struct sccb_header header;
+	struct cpi_evbuf cpi_evbuf;
+} __attribute__((packed));
+
+static struct sclp_register sclp_cpi_event = {
+	.send_mask = EVTYP_CTLPROGIDENT_MASK,
+};
+
+static char system_name[CPI_LENGTH_NAME + 1];
+static char sysplex_name[CPI_LENGTH_NAME + 1];
+static char system_type[CPI_LENGTH_NAME + 1];
+static u64 system_level;
+
+static void set_data(char *field, char *data)
+{
+	memset(field, ' ', CPI_LENGTH_NAME);
+	memcpy(field, data, strlen(data));
+	sclp_ascebc_str(field, CPI_LENGTH_NAME);
+}
+
+static void cpi_callback(struct sclp_req *req, void *data)
+{
+	struct completion *completion = data;
+
+	complete(completion);
+}
+
+static struct sclp_req *cpi_prepare_req(void)
+{
+	struct sclp_req *req;
+	struct cpi_sccb *sccb;
+	struct cpi_evbuf *evb;
+
+	req = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
+	if (!req)
+		return ERR_PTR(-ENOMEM);
+	sccb = (struct cpi_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb) {
+		kfree(req);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* setup SCCB for Control-Program Identification */
+	sccb->header.length = sizeof(struct cpi_sccb);
+	sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
+	sccb->cpi_evbuf.header.type = EVTYP_CTLPROGIDENT;
+	evb = &sccb->cpi_evbuf;
+
+	/* set system type */
+	set_data(evb->system_type, system_type);
+
+	/* set system name */
+	set_data(evb->system_name, system_name);
+
+	/* set system level */
+	evb->system_level = system_level;
+
+	/* set sysplex name */
+	set_data(evb->sysplex_name, sysplex_name);
+
+	/* prepare request data structure presented to SCLP driver */
+	req->command = SCLP_CMDW_WRITE_EVENT_DATA;
+	req->sccb = sccb;
+	req->status = SCLP_REQ_FILLED;
+	req->callback = cpi_callback;
+	return req;
+}
+
+static void cpi_free_req(struct sclp_req *req)
+{
+	free_page((unsigned long) req->sccb);
+	kfree(req);
+}
+
+static int cpi_req(void)
+{
+	struct completion completion;
+	struct sclp_req *req;
+	int rc;
+	int response;
+
+	rc = sclp_register(&sclp_cpi_event);
+	if (rc)
+		goto out;
+	if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) {
+		rc = -EOPNOTSUPP;
+		goto out_unregister;
+	}
+
+	req = cpi_prepare_req();
+	if (IS_ERR(req)) {
+		rc = PTR_ERR(req);
+		goto out_unregister;
+	}
+
+	init_completion(&completion);
+	req->callback_data = &completion;
+
+	/* Add request to sclp queue */
+	rc = sclp_add_request(req);
+	if (rc)
+		goto out_free_req;
+
+	wait_for_completion(&completion);
+
+	if (req->status != SCLP_REQ_DONE) {
+		pr_warn("request failed (status=0x%02x)\n", req->status);
+		rc = -EIO;
+		goto out_free_req;
+	}
+
+	response = ((struct cpi_sccb *) req->sccb)->header.response_code;
+	if (response != 0x0020) {
+		pr_warn("request failed with response code 0x%x\n", response);
+		rc = -EIO;
+	}
+
+out_free_req:
+	cpi_free_req(req);
+
+out_unregister:
+	sclp_unregister(&sclp_cpi_event);
+
+out:
+	return rc;
+}
+
+static int check_string(const char *attr, const char *str)
+{
+	size_t len;
+	size_t i;
+
+	len = strlen(str);
+
+	if ((len > 0) && (str[len - 1] == '\n'))
+		len--;
+
+	if (len > CPI_LENGTH_NAME)
+		return -EINVAL;
+
+	for (i = 0; i < len ; i++) {
+		if (isalpha(str[i]) || isdigit(str[i]) ||
+		    strchr("$@# ", str[i]))
+			continue;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void set_string(char *attr, const char *value)
+{
+	size_t len;
+	size_t i;
+
+	len = strlen(value);
+
+	if ((len > 0) && (value[len - 1] == '\n'))
+		len--;
+
+	for (i = 0; i < CPI_LENGTH_NAME; i++) {
+		if (i < len)
+			attr[i] = toupper(value[i]);
+		else
+			attr[i] = ' ';
+	}
+}
+
+static ssize_t system_name_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *page)
+{
+	int rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	rc = snprintf(page, PAGE_SIZE, "%s\n", system_name);
+	mutex_unlock(&sclp_cpi_mutex);
+	return rc;
+}
+
+static ssize_t system_name_store(struct kobject *kobj,
+				 struct kobj_attribute *attr,
+				 const char *buf,
+	size_t len)
+{
+	int rc;
+
+	rc = check_string("system_name", buf);
+	if (rc)
+		return rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	set_string(system_name, buf);
+	mutex_unlock(&sclp_cpi_mutex);
+
+	return len;
+}
+
+static struct kobj_attribute system_name_attr =
+	__ATTR(system_name, 0644, system_name_show, system_name_store);
+
+static ssize_t sysplex_name_show(struct kobject *kobj,
+				 struct kobj_attribute *attr, char *page)
+{
+	int rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
+	mutex_unlock(&sclp_cpi_mutex);
+	return rc;
+}
+
+static ssize_t sysplex_name_store(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  const char *buf,
+	size_t len)
+{
+	int rc;
+
+	rc = check_string("sysplex_name", buf);
+	if (rc)
+		return rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	set_string(sysplex_name, buf);
+	mutex_unlock(&sclp_cpi_mutex);
+
+	return len;
+}
+
+static struct kobj_attribute sysplex_name_attr =
+	__ATTR(sysplex_name, 0644, sysplex_name_show, sysplex_name_store);
+
+static ssize_t system_type_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *page)
+{
+	int rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	rc = snprintf(page, PAGE_SIZE, "%s\n", system_type);
+	mutex_unlock(&sclp_cpi_mutex);
+	return rc;
+}
+
+static ssize_t system_type_store(struct kobject *kobj,
+				 struct kobj_attribute *attr,
+				 const char *buf,
+	size_t len)
+{
+	int rc;
+
+	rc = check_string("system_type", buf);
+	if (rc)
+		return rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	set_string(system_type, buf);
+	mutex_unlock(&sclp_cpi_mutex);
+
+	return len;
+}
+
+static struct kobj_attribute system_type_attr =
+	__ATTR(system_type, 0644, system_type_show, system_type_store);
+
+static ssize_t system_level_show(struct kobject *kobj,
+				 struct kobj_attribute *attr, char *page)
+{
+	unsigned long long level;
+
+	mutex_lock(&sclp_cpi_mutex);
+	level = system_level;
+	mutex_unlock(&sclp_cpi_mutex);
+	return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
+}
+
+static ssize_t system_level_store(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  const char *buf,
+	size_t len)
+{
+	unsigned long long level;
+	char *endp;
+
+	level = simple_strtoull(buf, &endp, 16);
+
+	if (endp == buf)
+		return -EINVAL;
+	if (*endp == '\n')
+		endp++;
+	if (*endp)
+		return -EINVAL;
+
+	mutex_lock(&sclp_cpi_mutex);
+	system_level = level;
+	mutex_unlock(&sclp_cpi_mutex);
+	return len;
+}
+
+static struct kobj_attribute system_level_attr =
+	__ATTR(system_level, 0644, system_level_show, system_level_store);
+
+static ssize_t set_store(struct kobject *kobj,
+			 struct kobj_attribute *attr,
+			 const char *buf, size_t len)
+{
+	int rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	rc = cpi_req();
+	mutex_unlock(&sclp_cpi_mutex);
+	if (rc)
+		return rc;
+
+	return len;
+}
+
+static struct kobj_attribute set_attr = __ATTR(set, 0200, NULL, set_store);
+
+static struct attribute *cpi_attrs[] = {
+	&system_name_attr.attr,
+	&sysplex_name_attr.attr,
+	&system_type_attr.attr,
+	&system_level_attr.attr,
+	&set_attr.attr,
+	NULL,
+};
+
+static struct attribute_group cpi_attr_group = {
+	.attrs = cpi_attrs,
+};
+
+static struct kset *cpi_kset;
+
+int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type,
+		      const u64 level)
+{
+	int rc;
+
+	rc = check_string("system_name", system);
+	if (rc)
+		return rc;
+	rc = check_string("sysplex_name", sysplex);
+	if (rc)
+		return rc;
+	rc = check_string("system_type", type);
+	if (rc)
+		return rc;
+
+	mutex_lock(&sclp_cpi_mutex);
+	set_string(system_name, system);
+	set_string(sysplex_name, sysplex);
+	set_string(system_type, type);
+	system_level = level;
+
+	rc = cpi_req();
+	mutex_unlock(&sclp_cpi_mutex);
+
+	return rc;
+}
+EXPORT_SYMBOL(sclp_cpi_set_data);
+
+static int __init cpi_init(void)
+{
+	int rc;
+
+	cpi_kset = kset_create_and_add("cpi", NULL, firmware_kobj);
+	if (!cpi_kset)
+		return -ENOMEM;
+
+	rc = sysfs_create_group(&cpi_kset->kobj, &cpi_attr_group);
+	if (rc)
+		kset_unregister(cpi_kset);
+
+	return rc;
+}
+
+__initcall(cpi_init);
diff --git a/drivers/s390/char/sclp_cpi_sys.h b/drivers/s390/char/sclp_cpi_sys.h
new file mode 100644
index 0000000..edf60d1
--- /dev/null
+++ b/drivers/s390/char/sclp_cpi_sys.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    SCLP control program identification sysfs interface
+ *
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Michael Ernst <mernst@de.ibm.com>
+ */
+
+#ifndef __SCLP_CPI_SYS_H__
+#define __SCLP_CPI_SYS_H__
+
+int sclp_cpi_set_data(const char *system, const char *sysplex,
+		      const char *type, u64 level);
+
+#endif	 /* __SCLP_CPI_SYS_H__ */
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
new file mode 100644
index 0000000..248b5db
--- /dev/null
+++ b/drivers/s390/char/sclp_ctl.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IOCTL interface for SCLP
+ *
+ * Copyright IBM Corp. 2012
+ *
+ * Author: Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
+#include <linux/compat.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <asm/sclp_ctl.h>
+#include <asm/sclp.h>
+
+#include "sclp.h"
+
+/*
+ * Supported command words
+ */
+static unsigned int sclp_ctl_sccb_wlist[] = {
+	0x00400002,
+	0x00410002,
+};
+
+/*
+ * Check if command word is supported
+ */
+static int sclp_ctl_cmdw_supported(unsigned int cmdw)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sclp_ctl_sccb_wlist); i++) {
+		if (cmdw == sclp_ctl_sccb_wlist[i])
+			return 1;
+	}
+	return 0;
+}
+
+static void __user *u64_to_uptr(u64 value)
+{
+	if (is_compat_task())
+		return compat_ptr(value);
+	else
+		return (void __user *)(unsigned long)value;
+}
+
+/*
+ * Start SCLP request
+ */
+static int sclp_ctl_ioctl_sccb(void __user *user_area)
+{
+	struct sclp_ctl_sccb ctl_sccb;
+	struct sccb_header *sccb;
+	unsigned long copied;
+	int rc;
+
+	if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
+		return -EFAULT;
+	if (!sclp_ctl_cmdw_supported(ctl_sccb.cmdw))
+		return -EOPNOTSUPP;
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	copied = PAGE_SIZE -
+		copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
+	if (offsetof(struct sccb_header, length) +
+	    sizeof(sccb->length) > copied || sccb->length > copied) {
+		rc = -EFAULT;
+		goto out_free;
+	}
+	if (sccb->length < 8) {
+		rc = -EINVAL;
+		goto out_free;
+	}
+	rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
+	if (rc)
+		goto out_free;
+	if (copy_to_user(u64_to_uptr(ctl_sccb.sccb), sccb, sccb->length))
+		rc = -EFAULT;
+out_free:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+/*
+ * SCLP SCCB ioctl function
+ */
+static long sclp_ctl_ioctl(struct file *filp, unsigned int cmd,
+			   unsigned long arg)
+{
+	void __user *argp;
+
+	if (is_compat_task())
+		argp = compat_ptr(arg);
+	else
+		argp = (void __user *) arg;
+	switch (cmd) {
+	case SCLP_CTL_SCCB:
+		return sclp_ctl_ioctl_sccb(argp);
+	default: /* unknown ioctl number */
+		return -ENOTTY;
+	}
+}
+
+/*
+ * File operations
+ */
+static const struct file_operations sclp_ctl_fops = {
+	.owner = THIS_MODULE,
+	.open = nonseekable_open,
+	.unlocked_ioctl = sclp_ctl_ioctl,
+	.compat_ioctl = sclp_ctl_ioctl,
+	.llseek = no_llseek,
+};
+
+/*
+ * Misc device definition
+ */
+static struct miscdevice sclp_ctl_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "sclp",
+	.fops = &sclp_ctl_fops,
+};
+builtin_misc_device(sclp_ctl_device);
diff --git a/drivers/s390/char/sclp_diag.h b/drivers/s390/char/sclp_diag.h
new file mode 100644
index 0000000..796c531
--- /dev/null
+++ b/drivers/s390/char/sclp_diag.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef _SCLP_DIAG_H
+#define _SCLP_DIAG_H
+
+#include <linux/types.h>
+
+/* return codes for Diagnostic Test FTP Service, as indicated in member
+ * sclp_diag_ftp::ldflg
+ */
+#define SCLP_DIAG_FTP_OK	0x80U /* success */
+#define SCLP_DIAG_FTP_LDFAIL	0x01U /* load failed */
+#define SCLP_DIAG_FTP_LDNPERM	0x02U /* not allowed */
+#define SCLP_DIAG_FTP_LDRUNS	0x03U /* LD runs */
+#define SCLP_DIAG_FTP_LDNRUNS	0x04U /* LD does not run */
+
+#define SCLP_DIAG_FTP_XPCX	0x80 /* PCX communication code */
+#define SCLP_DIAG_FTP_ROUTE	4 /* routing code for new FTP service */
+
+/*
+ * length of Diagnostic Test FTP Service event buffer
+ */
+#define SCLP_DIAG_FTP_EVBUF_LEN				\
+	(offsetof(struct sclp_diag_evbuf, mdd) +	\
+	 sizeof(struct sclp_diag_ftp))
+
+/**
+ * struct sclp_diag_ftp - Diagnostic Test FTP Service model-dependent data
+ * @pcx: code for PCX communication (should be 0x80)
+ * @ldflg: load flag (see defines above)
+ * @cmd: FTP command
+ * @pgsize: page size (0 = 4kB, 1 = large page size)
+ * @srcflg: source flag
+ * @spare: reserved (zeroes)
+ * @offset: file offset
+ * @fsize: file size
+ * @length: buffer size resp. bytes transferred
+ * @failaddr: failing address
+ * @bufaddr: buffer address, virtual
+ * @asce: region or segment table designation
+ * @fident: file name (ASCII, zero-terminated)
+ */
+struct sclp_diag_ftp {
+	u8 pcx;
+	u8 ldflg;
+	u8 cmd;
+	u8 pgsize;
+	u8 srcflg;
+	u8 spare;
+	u64 offset;
+	u64 fsize;
+	u64 length;
+	u64 failaddr;
+	u64 bufaddr;
+	u64 asce;
+
+	u8 fident[256];
+} __packed;
+
+/**
+ * struct sclp_diag_evbuf - Diagnostic Test (ET7) Event Buffer
+ * @hdr: event buffer header
+ * @route: diagnostic route
+ * @mdd: model-dependent data (@route dependent)
+ */
+struct sclp_diag_evbuf {
+	struct evbuf_header hdr;
+	u16 route;
+
+	union {
+		struct sclp_diag_ftp ftp;
+	} mdd;
+} __packed;
+
+/**
+ * struct sclp_diag_sccb - Diagnostic Test (ET7) SCCB
+ * @hdr: SCCB header
+ * @evbuf: event buffer
+ */
+struct sclp_diag_sccb {
+
+	struct sccb_header hdr;
+	struct sclp_diag_evbuf evbuf;
+} __packed;
+
+#endif /* _SCLP_DIAG_H */
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
new file mode 100644
index 0000000..9a74abb
--- /dev/null
+++ b/drivers/s390/char/sclp_early.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCLP early driver
+ *
+ * Copyright IBM Corp. 2013
+ */
+
+#define KMSG_COMPONENT "sclp_early"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/errno.h>
+#include <asm/ctl_reg.h>
+#include <asm/sclp.h>
+#include <asm/ipl.h>
+#include "sclp_sdias.h"
+#include "sclp.h"
+
+#define SCLP_CMDW_READ_SCP_INFO		0x00020001
+#define SCLP_CMDW_READ_SCP_INFO_FORCED	0x00120001
+
+struct read_info_sccb {
+	struct	sccb_header header;	/* 0-7 */
+	u16	rnmax;			/* 8-9 */
+	u8	rnsize;			/* 10 */
+	u8	_pad_11[16 - 11];	/* 11-15 */
+	u16	ncpurl;			/* 16-17 */
+	u16	cpuoff;			/* 18-19 */
+	u8	_pad_20[24 - 20];	/* 20-23 */
+	u8	loadparm[8];		/* 24-31 */
+	u8	_pad_32[42 - 32];	/* 32-41 */
+	u8	fac42;			/* 42 */
+	u8	fac43;			/* 43 */
+	u8	_pad_44[48 - 44];	/* 44-47 */
+	u64	facilities;		/* 48-55 */
+	u8	_pad_56[66 - 56];	/* 56-65 */
+	u8	fac66;			/* 66 */
+	u8	_pad_67[76 - 67];	/* 67-83 */
+	u32	ibc;			/* 76-79 */
+	u8	_pad80[84 - 80];	/* 80-83 */
+	u8	fac84;			/* 84 */
+	u8	fac85;			/* 85 */
+	u8	_pad_86[91 - 86];	/* 86-90 */
+	u8	fac91;			/* 91 */
+	u8	_pad_92[98 - 92];	/* 92-97 */
+	u8	fac98;			/* 98 */
+	u8	hamaxpow;		/* 99 */
+	u32	rnsize2;		/* 100-103 */
+	u64	rnmax2;			/* 104-111 */
+	u8	_pad_112[116 - 112];	/* 112-115 */
+	u8	fac116;			/* 116 */
+	u8	fac117;			/* 117 */
+	u8	fac118;			/* 118 */
+	u8	fac119;			/* 119 */
+	u16	hcpua;			/* 120-121 */
+	u8	_pad_122[124 - 122];	/* 122-123 */
+	u32	hmfai;			/* 124-127 */
+	u8	_pad_128[4096 - 128];	/* 128-4095 */
+} __packed __aligned(PAGE_SIZE);
+
+static struct sclp_ipl_info sclp_ipl_info;
+
+struct sclp_info sclp;
+EXPORT_SYMBOL(sclp);
+
+static int __init sclp_early_read_info(struct read_info_sccb *sccb)
+{
+	int i;
+	sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
+				  SCLP_CMDW_READ_SCP_INFO};
+
+	for (i = 0; i < ARRAY_SIZE(commands); i++) {
+		memset(sccb, 0, sizeof(*sccb));
+		sccb->header.length = sizeof(*sccb);
+		sccb->header.function_code = 0x80;
+		sccb->header.control_mask[2] = 0x80;
+		if (sclp_early_cmd(commands[i], sccb))
+			break;
+		if (sccb->header.response_code == 0x10)
+			return 0;
+		if (sccb->header.response_code != 0x1f0)
+			break;
+	}
+	return -EIO;
+}
+
+static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
+{
+	struct sclp_core_entry *cpue;
+	u16 boot_cpu_address, cpu;
+
+	if (sclp_early_read_info(sccb))
+		return;
+
+	sclp.facilities = sccb->facilities;
+	sclp.has_sprp = !!(sccb->fac84 & 0x02);
+	sclp.has_core_type = !!(sccb->fac84 & 0x01);
+	sclp.has_gsls = !!(sccb->fac85 & 0x80);
+	sclp.has_64bscao = !!(sccb->fac116 & 0x80);
+	sclp.has_cmma = !!(sccb->fac116 & 0x40);
+	sclp.has_esca = !!(sccb->fac116 & 0x08);
+	sclp.has_pfmfi = !!(sccb->fac117 & 0x40);
+	sclp.has_ibs = !!(sccb->fac117 & 0x20);
+	sclp.has_gisaf = !!(sccb->fac118 & 0x08);
+	sclp.has_hvs = !!(sccb->fac119 & 0x80);
+	sclp.has_kss = !!(sccb->fac98 & 0x01);
+	if (sccb->fac85 & 0x02)
+		S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
+	if (sccb->fac91 & 0x40)
+		S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
+	sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
+	sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
+	sclp.rzm <<= 20;
+	sclp.ibc = sccb->ibc;
+
+	if (sccb->hamaxpow && sccb->hamaxpow < 64)
+		sclp.hamax = (1UL << sccb->hamaxpow) - 1;
+	else
+		sclp.hamax = U64_MAX;
+
+	if (!sccb->hcpua) {
+		if (MACHINE_IS_VM)
+			sclp.max_cores = 64;
+		else
+			sclp.max_cores = sccb->ncpurl;
+	} else {
+		sclp.max_cores = sccb->hcpua + 1;
+	}
+
+	boot_cpu_address = stap();
+	cpue = (void *)sccb + sccb->cpuoff;
+	for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) {
+		if (boot_cpu_address != cpue->core_id)
+			continue;
+		sclp.has_siif = cpue->siif;
+		sclp.has_sigpif = cpue->sigpif;
+		sclp.has_sief2 = cpue->sief2;
+		sclp.has_gpere = cpue->gpere;
+		sclp.has_ib = cpue->ib;
+		sclp.has_cei = cpue->cei;
+		sclp.has_skey = cpue->skey;
+		break;
+	}
+
+	/* Save IPL information */
+	sclp_ipl_info.is_valid = 1;
+	if (sccb->fac91 & 0x2)
+		sclp_ipl_info.has_dump = 1;
+	memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
+
+	sclp.mtid = (sccb->fac42 & 0x80) ? (sccb->fac42 & 31) : 0;
+	sclp.mtid_cp = (sccb->fac42 & 0x80) ? (sccb->fac43 & 31) : 0;
+	sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0;
+
+	sclp.hmfai = sccb->hmfai;
+}
+
+/*
+ * This function will be called after sclp_early_facilities_detect(), which gets
+ * called from early.c code. The sclp_early_facilities_detect() function retrieves
+ * and saves the IPL information.
+ */
+void __init sclp_early_get_ipl_info(struct sclp_ipl_info *info)
+{
+	*info = sclp_ipl_info;
+}
+
+static struct sclp_core_info sclp_early_core_info __initdata;
+static int sclp_early_core_info_valid __initdata;
+
+static void __init sclp_early_init_core_info(struct read_cpu_info_sccb *sccb)
+{
+	if (!SCLP_HAS_CPU_INFO)
+		return;
+	memset(sccb, 0, sizeof(*sccb));
+	sccb->header.length = sizeof(*sccb);
+	if (sclp_early_cmd(SCLP_CMDW_READ_CPU_INFO, sccb))
+		return;
+	if (sccb->header.response_code != 0x0010)
+		return;
+	sclp_fill_core_info(&sclp_early_core_info, sccb);
+	sclp_early_core_info_valid = 1;
+}
+
+int __init sclp_early_get_core_info(struct sclp_core_info *info)
+{
+	if (!sclp_early_core_info_valid)
+		return -EIO;
+	*info = sclp_early_core_info;
+	return 0;
+}
+
+static long __init sclp_early_hsa_size_init(struct sdias_sccb *sccb)
+{
+	memset(sccb, 0, sizeof(*sccb));
+	sccb->hdr.length = sizeof(*sccb);
+	sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
+	sccb->evbuf.hdr.type = EVTYP_SDIAS;
+	sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
+	sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
+	sccb->evbuf.event_id = 4712;
+	sccb->evbuf.dbs = 1;
+	if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
+		return -EIO;
+	if (sccb->hdr.response_code != 0x20)
+		return -EIO;
+	if (sccb->evbuf.blk_cnt == 0)
+		return 0;
+	return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
+}
+
+static long __init sclp_early_hsa_copy_wait(struct sdias_sccb *sccb)
+{
+	memset(sccb, 0, PAGE_SIZE);
+	sccb->hdr.length = PAGE_SIZE;
+	if (sclp_early_cmd(SCLP_CMDW_READ_EVENT_DATA, sccb))
+		return -EIO;
+	if ((sccb->hdr.response_code != 0x20) && (sccb->hdr.response_code != 0x220))
+		return -EIO;
+	if (sccb->evbuf.blk_cnt == 0)
+		return 0;
+	return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
+}
+
+static void __init sclp_early_hsa_size_detect(void *sccb)
+{
+	unsigned long flags;
+	long size = -EIO;
+
+	raw_local_irq_save(flags);
+	if (sclp_early_set_event_mask(sccb, EVTYP_SDIAS_MASK, EVTYP_SDIAS_MASK))
+		goto out;
+	size = sclp_early_hsa_size_init(sccb);
+	/* First check for synchronous response (LPAR) */
+	if (size)
+		goto out_mask;
+	if (!(S390_lowcore.ext_params & 1))
+		sclp_early_wait_irq();
+	size = sclp_early_hsa_copy_wait(sccb);
+out_mask:
+	sclp_early_set_event_mask(sccb, 0, 0);
+out:
+	raw_local_irq_restore(flags);
+	if (size > 0)
+		sclp.hsa_size = size;
+}
+
+static void __init sclp_early_console_detect(struct init_sccb *sccb)
+{
+	if (sccb->header.response_code != 0x20)
+		return;
+
+	if (sclp_early_con_check_vt220(sccb))
+		sclp.has_vt220 = 1;
+
+	if (sclp_early_con_check_linemode(sccb))
+		sclp.has_linemode = 1;
+}
+
+void __init sclp_early_detect(void)
+{
+	void *sccb = &sclp_early_sccb;
+
+	sclp_early_facilities_detect(sccb);
+	sclp_early_init_core_info(sccb);
+	sclp_early_hsa_size_detect(sccb);
+
+	/*
+	 * Turn off SCLP event notifications.  Also save remote masks in the
+	 * sccb.  These are sufficient to detect sclp console capabilities.
+	 */
+	sclp_early_set_event_mask(sccb, 0, 0);
+	sclp_early_console_detect(sccb);
+}
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
new file mode 100644
index 0000000..2f61f55
--- /dev/null
+++ b/drivers/s390/char/sclp_early_core.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    Copyright IBM Corp. 2015
+ *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <asm/processor.h>
+#include <asm/lowcore.h>
+#include <asm/ebcdic.h>
+#include <asm/irq.h>
+#include "sclp.h"
+#include "sclp_rw.h"
+
+char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data);
+int sclp_init_state __section(.data) = sclp_init_state_uninitialized;
+/*
+ * Used to keep track of the size of the event masks. Qemu until version 2.11
+ * only supports 4 and needs a workaround.
+ */
+bool sclp_mask_compat_mode __section(.data);
+
+void sclp_early_wait_irq(void)
+{
+	unsigned long psw_mask, addr;
+	psw_t psw_ext_save, psw_wait;
+	union ctlreg0 cr0, cr0_new;
+
+	__ctl_store(cr0.val, 0, 0);
+	cr0_new.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
+	cr0_new.lap = 0;
+	cr0_new.sssm = 1;
+	__ctl_load(cr0_new.val, 0, 0);
+
+	psw_ext_save = S390_lowcore.external_new_psw;
+	psw_mask = __extract_psw();
+	S390_lowcore.external_new_psw.mask = psw_mask;
+	psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT;
+	S390_lowcore.ext_int_code = 0;
+
+	do {
+		asm volatile(
+			"	larl	%[addr],0f\n"
+			"	stg	%[addr],%[psw_wait_addr]\n"
+			"	stg	%[addr],%[psw_ext_addr]\n"
+			"	lpswe	%[psw_wait]\n"
+			"0:\n"
+			: [addr] "=&d" (addr),
+			  [psw_wait_addr] "=Q" (psw_wait.addr),
+			  [psw_ext_addr] "=Q" (S390_lowcore.external_new_psw.addr)
+			: [psw_wait] "Q" (psw_wait)
+			: "cc", "memory");
+	} while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG);
+
+	S390_lowcore.external_new_psw = psw_ext_save;
+	__ctl_load(cr0.val, 0, 0);
+}
+
+int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb)
+{
+	unsigned long flags;
+	int rc;
+
+	raw_local_irq_save(flags);
+	rc = sclp_service_call(cmd, sccb);
+	if (rc)
+		goto out;
+	sclp_early_wait_irq();
+out:
+	raw_local_irq_restore(flags);
+	return rc;
+}
+
+struct write_sccb {
+	struct sccb_header header;
+	struct msg_buf msg;
+} __packed;
+
+/* Output multi-line text using SCLP Message interface. */
+static void sclp_early_print_lm(const char *str, unsigned int len)
+{
+	unsigned char *ptr, *end, ch;
+	unsigned int count, offset;
+	struct write_sccb *sccb;
+	struct msg_buf *msg;
+	struct mdb *mdb;
+	struct mto *mto;
+	struct go *go;
+
+	sccb = (struct write_sccb *) &sclp_early_sccb;
+	end = (unsigned char *) sccb + sizeof(sclp_early_sccb) - 1;
+	memset(sccb, 0, sizeof(*sccb));
+	ptr = (unsigned char *) &sccb->msg.mdb.mto;
+	offset = 0;
+	do {
+		for (count = sizeof(*mto); offset < len; count++) {
+			ch = str[offset++];
+			if ((ch == 0x0a) || (ptr + count > end))
+				break;
+			ptr[count] = _ascebc[ch];
+		}
+		mto = (struct mto *) ptr;
+		memset(mto, 0, sizeof(*mto));
+		mto->length = count;
+		mto->type = 4;
+		mto->line_type_flags = LNTPFLGS_ENDTEXT;
+		ptr += count;
+	} while ((offset < len) && (ptr + sizeof(*mto) <= end));
+	len = ptr - (unsigned char *) sccb;
+	sccb->header.length = len - offsetof(struct write_sccb, header);
+	msg = &sccb->msg;
+	msg->header.type = EVTYP_MSG;
+	msg->header.length = len - offsetof(struct write_sccb, msg.header);
+	mdb = &msg->mdb;
+	mdb->header.type = 1;
+	mdb->header.tag = 0xD4C4C240;
+	mdb->header.revision_code = 1;
+	mdb->header.length = len - offsetof(struct write_sccb, msg.mdb.header);
+	go = &mdb->go;
+	go->length = sizeof(*go);
+	go->type = 1;
+	sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
+}
+
+struct vt220_sccb {
+	struct sccb_header header;
+	struct {
+		struct evbuf_header header;
+		char data[];
+	} msg;
+} __packed;
+
+/* Output multi-line text using SCLP VT220 interface. */
+static void sclp_early_print_vt220(const char *str, unsigned int len)
+{
+	struct vt220_sccb *sccb;
+
+	sccb = (struct vt220_sccb *) &sclp_early_sccb;
+	if (sizeof(*sccb) + len >= sizeof(sclp_early_sccb))
+		len = sizeof(sclp_early_sccb) - sizeof(*sccb);
+	memset(sccb, 0, sizeof(*sccb));
+	memcpy(&sccb->msg.data, str, len);
+	sccb->header.length = sizeof(*sccb) + len;
+	sccb->msg.header.length = sizeof(sccb->msg) + len;
+	sccb->msg.header.type = EVTYP_VT220MSG;
+	sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
+}
+
+int sclp_early_set_event_mask(struct init_sccb *sccb,
+			      sccb_mask_t receive_mask,
+			      sccb_mask_t send_mask)
+{
+retry:
+	memset(sccb, 0, sizeof(*sccb));
+	sccb->header.length = sizeof(*sccb);
+	if (sclp_mask_compat_mode)
+		sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
+	else
+		sccb->mask_length = sizeof(sccb_mask_t);
+	sccb_set_recv_mask(sccb, receive_mask);
+	sccb_set_send_mask(sccb, send_mask);
+	if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_MASK, sccb))
+		return -EIO;
+	if ((sccb->header.response_code == 0x74f0) && !sclp_mask_compat_mode) {
+		sclp_mask_compat_mode = true;
+		goto retry;
+	}
+	if (sccb->header.response_code != 0x20)
+		return -EIO;
+	return 0;
+}
+
+unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb)
+{
+	if (!(sccb_get_sclp_send_mask(sccb) & EVTYP_OPCMD_MASK))
+		return 0;
+	if (!(sccb_get_sclp_recv_mask(sccb) & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
+		return 0;
+	return 1;
+}
+
+unsigned int sclp_early_con_check_vt220(struct init_sccb *sccb)
+{
+	if (sccb_get_sclp_send_mask(sccb) & EVTYP_VT220MSG_MASK)
+		return 1;
+	return 0;
+}
+
+static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
+{
+	unsigned long receive_mask, send_mask;
+	struct init_sccb *sccb;
+	int rc;
+
+	BUILD_BUG_ON(sizeof(struct init_sccb) > PAGE_SIZE);
+
+	*have_linemode = *have_vt220 = 0;
+	sccb = (struct init_sccb *) &sclp_early_sccb;
+	receive_mask = disable ? 0 : EVTYP_OPCMD_MASK;
+	send_mask = disable ? 0 : EVTYP_VT220MSG_MASK | EVTYP_MSG_MASK;
+	rc = sclp_early_set_event_mask(sccb, receive_mask, send_mask);
+	if (rc)
+		return rc;
+	*have_linemode = sclp_early_con_check_linemode(sccb);
+	*have_vt220 = !!(sccb_get_send_mask(sccb) & EVTYP_VT220MSG_MASK);
+	return rc;
+}
+
+/*
+ * Output one or more lines of text on the SCLP console (VT220 and /
+ * or line-mode).
+ */
+void __sclp_early_printk(const char *str, unsigned int len, unsigned int force)
+{
+	int have_linemode, have_vt220;
+
+	if (!force && sclp_init_state != sclp_init_state_uninitialized)
+		return;
+	if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0)
+		return;
+	if (have_linemode)
+		sclp_early_print_lm(str, len);
+	if (have_vt220)
+		sclp_early_print_vt220(str, len);
+	sclp_early_setup(1, &have_linemode, &have_vt220);
+}
+
+void sclp_early_printk(const char *str)
+{
+	__sclp_early_printk(str, strlen(str), 0);
+}
+
+void sclp_early_printk_force(const char *str)
+{
+	__sclp_early_printk(str, strlen(str), 1);
+}
diff --git a/drivers/s390/char/sclp_ftp.c b/drivers/s390/char/sclp_ftp.c
new file mode 100644
index 0000000..dfdd6c8
--- /dev/null
+++ b/drivers/s390/char/sclp_ftp.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    SCLP Event Type (ET) 7 - Diagnostic Test FTP Services, useable on LPAR
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ *
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <asm/sysinfo.h>
+#include <asm/ebcdic.h>
+
+#include "sclp.h"
+#include "sclp_diag.h"
+#include "sclp_ftp.h"
+
+static DECLARE_COMPLETION(sclp_ftp_rx_complete);
+static u8 sclp_ftp_ldflg;
+static u64 sclp_ftp_fsize;
+static u64 sclp_ftp_length;
+
+/**
+ * sclp_ftp_txcb() - Diagnostic Test FTP services SCLP command callback
+ */
+static void sclp_ftp_txcb(struct sclp_req *req, void *data)
+{
+	struct completion *completion = data;
+
+#ifdef DEBUG
+	pr_debug("SCLP (ET7) TX-IRQ, SCCB @ 0x%p: %*phN\n",
+		 req->sccb, 24, req->sccb);
+#endif
+	complete(completion);
+}
+
+/**
+ * sclp_ftp_rxcb() - Diagnostic Test FTP services receiver event callback
+ */
+static void sclp_ftp_rxcb(struct evbuf_header *evbuf)
+{
+	struct sclp_diag_evbuf *diag = (struct sclp_diag_evbuf *) evbuf;
+
+	/*
+	 * Check for Diagnostic Test FTP Service
+	 */
+	if (evbuf->type != EVTYP_DIAG_TEST ||
+	    diag->route != SCLP_DIAG_FTP_ROUTE ||
+	    diag->mdd.ftp.pcx != SCLP_DIAG_FTP_XPCX ||
+	    evbuf->length < SCLP_DIAG_FTP_EVBUF_LEN)
+		return;
+
+#ifdef DEBUG
+	pr_debug("SCLP (ET7) RX-IRQ, Event @ 0x%p: %*phN\n",
+		 evbuf, 24, evbuf);
+#endif
+
+	/*
+	 * Because the event buffer is located in a page which is owned
+	 * by the SCLP core, all data of interest must be copied. The
+	 * error indication is in 'sclp_ftp_ldflg'
+	 */
+	sclp_ftp_ldflg = diag->mdd.ftp.ldflg;
+	sclp_ftp_fsize = diag->mdd.ftp.fsize;
+	sclp_ftp_length = diag->mdd.ftp.length;
+
+	complete(&sclp_ftp_rx_complete);
+}
+
+/**
+ * sclp_ftp_et7() - start a Diagnostic Test FTP Service SCLP request
+ * @ftp: pointer to FTP descriptor
+ *
+ * Return: 0 on success, else a (negative) error code
+ */
+static int sclp_ftp_et7(const struct hmcdrv_ftp_cmdspec *ftp)
+{
+	struct completion completion;
+	struct sclp_diag_sccb *sccb;
+	struct sclp_req *req;
+	size_t len;
+	int rc;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!req || !sccb) {
+		rc = -ENOMEM;
+		goto out_free;
+	}
+
+	sccb->hdr.length = SCLP_DIAG_FTP_EVBUF_LEN +
+		sizeof(struct sccb_header);
+	sccb->evbuf.hdr.type = EVTYP_DIAG_TEST;
+	sccb->evbuf.hdr.length = SCLP_DIAG_FTP_EVBUF_LEN;
+	sccb->evbuf.hdr.flags = 0; /* clear processed-buffer */
+	sccb->evbuf.route = SCLP_DIAG_FTP_ROUTE;
+	sccb->evbuf.mdd.ftp.pcx = SCLP_DIAG_FTP_XPCX;
+	sccb->evbuf.mdd.ftp.srcflg = 0;
+	sccb->evbuf.mdd.ftp.pgsize = 0;
+	sccb->evbuf.mdd.ftp.asce = _ASCE_REAL_SPACE;
+	sccb->evbuf.mdd.ftp.ldflg = SCLP_DIAG_FTP_LDFAIL;
+	sccb->evbuf.mdd.ftp.fsize = 0;
+	sccb->evbuf.mdd.ftp.cmd = ftp->id;
+	sccb->evbuf.mdd.ftp.offset = ftp->ofs;
+	sccb->evbuf.mdd.ftp.length = ftp->len;
+	sccb->evbuf.mdd.ftp.bufaddr = virt_to_phys(ftp->buf);
+
+	len = strlcpy(sccb->evbuf.mdd.ftp.fident, ftp->fname,
+		      HMCDRV_FTP_FIDENT_MAX);
+	if (len >= HMCDRV_FTP_FIDENT_MAX) {
+		rc = -EINVAL;
+		goto out_free;
+	}
+
+	req->command = SCLP_CMDW_WRITE_EVENT_DATA;
+	req->sccb = sccb;
+	req->status = SCLP_REQ_FILLED;
+	req->callback = sclp_ftp_txcb;
+	req->callback_data = &completion;
+
+	init_completion(&completion);
+
+	rc = sclp_add_request(req);
+	if (rc)
+		goto out_free;
+
+	/* Wait for end of ftp sclp command. */
+	wait_for_completion(&completion);
+
+#ifdef DEBUG
+	pr_debug("status of SCLP (ET7) request is 0x%04x (0x%02x)\n",
+		 sccb->hdr.response_code, sccb->evbuf.hdr.flags);
+#endif
+
+	/*
+	 * Check if sclp accepted the request. The data transfer runs
+	 * asynchronously and the completion is indicated with an
+	 * sclp ET7 event.
+	 */
+	if (req->status != SCLP_REQ_DONE ||
+	    (sccb->evbuf.hdr.flags & 0x80) == 0 || /* processed-buffer */
+	    (sccb->hdr.response_code & 0xffU) != 0x20U) {
+		rc = -EIO;
+	}
+
+out_free:
+	free_page((unsigned long) sccb);
+	kfree(req);
+	return rc;
+}
+
+/**
+ * sclp_ftp_cmd() - executes a HMC related SCLP Diagnose (ET7) FTP command
+ * @ftp: pointer to FTP command specification
+ * @fsize: return of file size (or NULL if undesirable)
+ *
+ * Attention: Notice that this function is not reentrant - so the caller
+ * must ensure locking.
+ *
+ * Return: number of bytes read/written or a (negative) error code
+ */
+ssize_t sclp_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize)
+{
+	ssize_t len;
+#ifdef DEBUG
+	unsigned long start_jiffies;
+
+	pr_debug("starting SCLP (ET7), cmd %d for '%s' at %lld with %zd bytes\n",
+		 ftp->id, ftp->fname, (long long) ftp->ofs, ftp->len);
+	start_jiffies = jiffies;
+#endif
+
+	init_completion(&sclp_ftp_rx_complete);
+
+	/* Start ftp sclp command. */
+	len = sclp_ftp_et7(ftp);
+	if (len)
+		goto out_unlock;
+
+	/*
+	 * There is no way to cancel the sclp ET7 request, the code
+	 * needs to wait unconditionally until the transfer is complete.
+	 */
+	wait_for_completion(&sclp_ftp_rx_complete);
+
+#ifdef DEBUG
+	pr_debug("completed SCLP (ET7) request after %lu ms (all)\n",
+		 (jiffies - start_jiffies) * 1000 / HZ);
+	pr_debug("return code of SCLP (ET7) FTP Service is 0x%02x, with %lld/%lld bytes\n",
+		 sclp_ftp_ldflg, sclp_ftp_length, sclp_ftp_fsize);
+#endif
+
+	switch (sclp_ftp_ldflg) {
+	case SCLP_DIAG_FTP_OK:
+		len = sclp_ftp_length;
+		if (fsize)
+			*fsize = sclp_ftp_fsize;
+		break;
+	case SCLP_DIAG_FTP_LDNPERM:
+		len = -EPERM;
+		break;
+	case SCLP_DIAG_FTP_LDRUNS:
+		len = -EBUSY;
+		break;
+	case SCLP_DIAG_FTP_LDFAIL:
+		len = -ENOENT;
+		break;
+	default:
+		len = -EIO;
+		break;
+	}
+
+out_unlock:
+	return len;
+}
+
+/*
+ * ET7 event listener
+ */
+static struct sclp_register sclp_ftp_event = {
+	.send_mask = EVTYP_DIAG_TEST_MASK,    /* want tx events */
+	.receive_mask = EVTYP_DIAG_TEST_MASK, /* want rx events */
+	.receiver_fn = sclp_ftp_rxcb,	      /* async callback (rx) */
+	.state_change_fn = NULL,
+	.pm_event_fn = NULL,
+};
+
+/**
+ * sclp_ftp_startup() - startup of FTP services, when running on LPAR
+ */
+int sclp_ftp_startup(void)
+{
+#ifdef DEBUG
+	unsigned long info;
+#endif
+	int rc;
+
+	rc = sclp_register(&sclp_ftp_event);
+	if (rc)
+		return rc;
+
+#ifdef DEBUG
+	info = get_zeroed_page(GFP_KERNEL);
+
+	if (info != 0) {
+		struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
+
+		if (!stsi(info222, 2, 2, 2)) { /* get SYSIB 2.2.2 */
+			info222->name[sizeof(info222->name) - 1] = '\0';
+			EBCASC_500(info222->name, sizeof(info222->name) - 1);
+			pr_debug("SCLP (ET7) FTP Service working on LPAR %u (%s)\n",
+				 info222->lpar_number, info222->name);
+		}
+
+		free_page(info);
+	}
+#endif	/* DEBUG */
+	return 0;
+}
+
+/**
+ * sclp_ftp_shutdown() - shutdown of FTP services, when running on LPAR
+ */
+void sclp_ftp_shutdown(void)
+{
+	sclp_unregister(&sclp_ftp_event);
+}
diff --git a/drivers/s390/char/sclp_ftp.h b/drivers/s390/char/sclp_ftp.h
new file mode 100644
index 0000000..d64da18
--- /dev/null
+++ b/drivers/s390/char/sclp_ftp.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    SCLP Event Type (ET) 7 - Diagnostic Test FTP Services, useable on LPAR
+ *
+ *    Notice that all functions exported here are not reentrant.
+ *    So usage should be exclusive, ensured by the caller (e.g. using a
+ *    mutex).
+ *
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef __SCLP_FTP_H__
+#define __SCLP_FTP_H__
+
+#include "hmcdrv_ftp.h"
+
+int sclp_ftp_startup(void);
+void sclp_ftp_shutdown(void);
+ssize_t sclp_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize);
+
+#endif	 /* __SCLP_FTP_H__ */
diff --git a/drivers/s390/char/sclp_ocf.c b/drivers/s390/char/sclp_ocf.c
new file mode 100644
index 0000000..d35f10e
--- /dev/null
+++ b/drivers/s390/char/sclp_ocf.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    SCLP OCF communication parameters sysfs interface
+ *
+ *    Copyright IBM Corp. 2011
+ *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "sclp_ocf"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/kmod.h>
+#include <linux/timer.h>
+#include <linux/err.h>
+#include <asm/ebcdic.h>
+#include <asm/sclp.h>
+
+#include "sclp.h"
+
+#define OCF_LENGTH_HMC_NETWORK 8UL
+#define OCF_LENGTH_CPC_NAME 8UL
+
+static char hmc_network[OCF_LENGTH_HMC_NETWORK + 1];
+static char cpc_name[OCF_LENGTH_CPC_NAME]; /* in EBCDIC */
+
+static DEFINE_SPINLOCK(sclp_ocf_lock);
+static struct work_struct sclp_ocf_change_work;
+
+static struct kset *ocf_kset;
+
+static void sclp_ocf_change_notify(struct work_struct *work)
+{
+	kobject_uevent(&ocf_kset->kobj, KOBJ_CHANGE);
+}
+
+/* Handler for OCF event. Look for the CPC image name. */
+static void sclp_ocf_handler(struct evbuf_header *evbuf)
+{
+	struct gds_vector *v;
+	struct gds_subvector *sv, *netid, *cpc;
+	size_t size;
+
+	/* Find the 0x9f00 block. */
+	v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
+				 0x9f00);
+	if (!v)
+		return;
+	/* Find the 0x9f22 block inside the 0x9f00 block. */
+	v = sclp_find_gds_vector(v + 1, (void *) v + v->length, 0x9f22);
+	if (!v)
+		return;
+	/* Find the 0x81 block inside the 0x9f22 block. */
+	sv = sclp_find_gds_subvector(v + 1, (void *) v + v->length, 0x81);
+	if (!sv)
+		return;
+	/* Find the 0x01 block inside the 0x81 block. */
+	netid = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 1);
+	/* Find the 0x02 block inside the 0x81 block. */
+	cpc = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 2);
+	/* Copy network name and cpc name. */
+	spin_lock(&sclp_ocf_lock);
+	if (netid) {
+		size = min(OCF_LENGTH_HMC_NETWORK, (size_t) netid->length);
+		memcpy(hmc_network, netid + 1, size);
+		EBCASC(hmc_network, size);
+		hmc_network[size] = 0;
+	}
+	if (cpc) {
+		size = min(OCF_LENGTH_CPC_NAME, (size_t) cpc->length);
+		memset(cpc_name, 0, OCF_LENGTH_CPC_NAME);
+		memcpy(cpc_name, cpc + 1, size);
+	}
+	spin_unlock(&sclp_ocf_lock);
+	schedule_work(&sclp_ocf_change_work);
+}
+
+static struct sclp_register sclp_ocf_event = {
+	.receive_mask = EVTYP_OCF_MASK,
+	.receiver_fn = sclp_ocf_handler,
+};
+
+void sclp_ocf_cpc_name_copy(char *dst)
+{
+	spin_lock_irq(&sclp_ocf_lock);
+	memcpy(dst, cpc_name, OCF_LENGTH_CPC_NAME);
+	spin_unlock_irq(&sclp_ocf_lock);
+}
+EXPORT_SYMBOL(sclp_ocf_cpc_name_copy);
+
+static ssize_t cpc_name_show(struct kobject *kobj,
+			     struct kobj_attribute *attr, char *page)
+{
+	char name[OCF_LENGTH_CPC_NAME + 1];
+
+	sclp_ocf_cpc_name_copy(name);
+	name[OCF_LENGTH_CPC_NAME] = 0;
+	EBCASC(name, OCF_LENGTH_CPC_NAME);
+	return snprintf(page, PAGE_SIZE, "%s\n", name);
+}
+
+static struct kobj_attribute cpc_name_attr =
+	__ATTR(cpc_name, 0444, cpc_name_show, NULL);
+
+static ssize_t hmc_network_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *page)
+{
+	int rc;
+
+	spin_lock_irq(&sclp_ocf_lock);
+	rc = snprintf(page, PAGE_SIZE, "%s\n", hmc_network);
+	spin_unlock_irq(&sclp_ocf_lock);
+	return rc;
+}
+
+static struct kobj_attribute hmc_network_attr =
+	__ATTR(hmc_network, 0444, hmc_network_show, NULL);
+
+static struct attribute *ocf_attrs[] = {
+	&cpc_name_attr.attr,
+	&hmc_network_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ocf_attr_group = {
+	.attrs = ocf_attrs,
+};
+
+static int __init ocf_init(void)
+{
+	int rc;
+
+	INIT_WORK(&sclp_ocf_change_work, sclp_ocf_change_notify);
+	ocf_kset = kset_create_and_add("ocf", NULL, firmware_kobj);
+	if (!ocf_kset)
+		return -ENOMEM;
+
+	rc = sysfs_create_group(&ocf_kset->kobj, &ocf_attr_group);
+	if (rc) {
+		kset_unregister(ocf_kset);
+		return rc;
+	}
+
+	return sclp_register(&sclp_ocf_event);
+}
+
+device_initcall(ocf_init);
diff --git a/drivers/s390/char/sclp_pci.c b/drivers/s390/char/sclp_pci.c
new file mode 100644
index 0000000..e7c84a4
--- /dev/null
+++ b/drivers/s390/char/sclp_pci.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI I/O adapter configuration related functions.
+ *
+ * Copyright IBM Corp. 2016
+ */
+#define KMSG_COMPONENT "sclp_cmd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/completion.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+
+#include <asm/sclp.h>
+
+#include "sclp.h"
+
+#define SCLP_CMDW_CONFIGURE_PCI			0x001a0001
+#define SCLP_CMDW_DECONFIGURE_PCI		0x001b0001
+
+#define SCLP_ATYPE_PCI				2
+
+#define SCLP_ERRNOTIFY_AQ_REPAIR		1
+#define SCLP_ERRNOTIFY_AQ_INFO_LOG		2
+
+static DEFINE_MUTEX(sclp_pci_mutex);
+static struct sclp_register sclp_pci_event = {
+	.send_mask = EVTYP_ERRNOTIFY_MASK,
+};
+
+struct err_notify_evbuf {
+	struct evbuf_header header;
+	u8 action;
+	u8 atype;
+	u32 fh;
+	u32 fid;
+	u8 data[0];
+} __packed;
+
+struct err_notify_sccb {
+	struct sccb_header header;
+	struct err_notify_evbuf evbuf;
+} __packed;
+
+struct pci_cfg_sccb {
+	struct sccb_header header;
+	u8 atype;		/* adapter type */
+	u8 reserved1;
+	u16 reserved2;
+	u32 aid;		/* adapter identifier */
+} __packed;
+
+static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
+{
+	struct pci_cfg_sccb *sccb;
+	int rc;
+
+	if (!SCLP_HAS_PCI_RECONFIG)
+		return -EOPNOTSUPP;
+
+	sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+
+	sccb->header.length = PAGE_SIZE;
+	sccb->atype = SCLP_ATYPE_PCI;
+	sccb->aid = fid;
+	rc = sclp_sync_request(cmd, sccb);
+	if (rc)
+		goto out;
+	switch (sccb->header.response_code) {
+	case 0x0020:
+	case 0x0120:
+		break;
+	default:
+		pr_warn("configure PCI I/O adapter failed: cmd=0x%08x  response=0x%04x\n",
+			cmd, sccb->header.response_code);
+		rc = -EIO;
+		break;
+	}
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+int sclp_pci_configure(u32 fid)
+{
+	return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
+}
+EXPORT_SYMBOL(sclp_pci_configure);
+
+int sclp_pci_deconfigure(u32 fid)
+{
+	return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
+}
+EXPORT_SYMBOL(sclp_pci_deconfigure);
+
+static void sclp_pci_callback(struct sclp_req *req, void *data)
+{
+	struct completion *completion = data;
+
+	complete(completion);
+}
+
+static int sclp_pci_check_report(struct zpci_report_error_header *report)
+{
+	if (report->version != 1)
+		return -EINVAL;
+
+	if (report->action != SCLP_ERRNOTIFY_AQ_REPAIR &&
+	    report->action != SCLP_ERRNOTIFY_AQ_INFO_LOG)
+		return -EINVAL;
+
+	if (report->length > (PAGE_SIZE - sizeof(struct err_notify_sccb)))
+		return -EINVAL;
+
+	return 0;
+}
+
+int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid)
+{
+	DECLARE_COMPLETION_ONSTACK(completion);
+	struct err_notify_sccb *sccb;
+	struct sclp_req req;
+	int ret;
+
+	ret = sclp_pci_check_report(report);
+	if (ret)
+		return ret;
+
+	mutex_lock(&sclp_pci_mutex);
+	ret = sclp_register(&sclp_pci_event);
+	if (ret)
+		goto out_unlock;
+
+	if (!(sclp_pci_event.sclp_receive_mask & EVTYP_ERRNOTIFY_MASK)) {
+		ret = -EOPNOTSUPP;
+		goto out_unregister;
+	}
+
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb) {
+		ret = -ENOMEM;
+		goto out_unregister;
+	}
+
+	memset(&req, 0, sizeof(req));
+	req.callback_data = &completion;
+	req.callback = sclp_pci_callback;
+	req.command = SCLP_CMDW_WRITE_EVENT_DATA;
+	req.status = SCLP_REQ_FILLED;
+	req.sccb = sccb;
+
+	sccb->evbuf.header.length = sizeof(sccb->evbuf) + report->length;
+	sccb->evbuf.header.type = EVTYP_ERRNOTIFY;
+	sccb->header.length = sizeof(sccb->header) + sccb->evbuf.header.length;
+
+	sccb->evbuf.action = report->action;
+	sccb->evbuf.atype = SCLP_ATYPE_PCI;
+	sccb->evbuf.fh = fh;
+	sccb->evbuf.fid = fid;
+
+	memcpy(sccb->evbuf.data, report->data, report->length);
+
+	ret = sclp_add_request(&req);
+	if (ret)
+		goto out_free_req;
+
+	wait_for_completion(&completion);
+	if (req.status != SCLP_REQ_DONE) {
+		pr_warn("request failed (status=0x%02x)\n",
+			req.status);
+		ret = -EIO;
+		goto out_free_req;
+	}
+
+	if (sccb->header.response_code != 0x0020) {
+		pr_warn("request failed with response code 0x%x\n",
+			sccb->header.response_code);
+		ret = -EIO;
+	}
+
+out_free_req:
+	free_page((unsigned long) sccb);
+out_unregister:
+	sclp_unregister(&sclp_pci_event);
+out_unlock:
+	mutex_unlock(&sclp_pci_mutex);
+	return ret;
+}
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
new file mode 100644
index 0000000..76956c2
--- /dev/null
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *     signal quiesce handler
+ *
+ *  Copyright IBM Corp. 1999, 2004
+ *  Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *             Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/atomic.h>
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+
+#include "sclp.h"
+
+static void (*old_machine_restart)(char *);
+static void (*old_machine_halt)(void);
+static void (*old_machine_power_off)(void);
+
+/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
+static void do_machine_quiesce(void)
+{
+	psw_t quiesce_psw;
+
+	smp_send_stop();
+	quiesce_psw.mask =
+		PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_WAIT;
+	quiesce_psw.addr = 0xfff;
+	__load_psw(quiesce_psw);
+}
+
+/* Handler for quiesce event. Start shutdown procedure. */
+static void sclp_quiesce_handler(struct evbuf_header *evbuf)
+{
+	if (_machine_restart != (void *) do_machine_quiesce) {
+		old_machine_restart = _machine_restart;
+		old_machine_halt = _machine_halt;
+		old_machine_power_off = _machine_power_off;
+		_machine_restart = (void *) do_machine_quiesce;
+		_machine_halt = do_machine_quiesce;
+		_machine_power_off = do_machine_quiesce;
+	}
+	ctrl_alt_del();
+}
+
+/* Undo machine restart/halt/power_off modification on resume */
+static void sclp_quiesce_pm_event(struct sclp_register *reg,
+				  enum sclp_pm_event sclp_pm_event)
+{
+	switch (sclp_pm_event) {
+	case SCLP_PM_EVENT_RESTORE:
+		if (old_machine_restart) {
+			_machine_restart = old_machine_restart;
+			_machine_halt = old_machine_halt;
+			_machine_power_off = old_machine_power_off;
+			old_machine_restart = NULL;
+			old_machine_halt = NULL;
+			old_machine_power_off = NULL;
+		}
+		break;
+	case SCLP_PM_EVENT_FREEZE:
+	case SCLP_PM_EVENT_THAW:
+		break;
+	}
+}
+
+static struct sclp_register sclp_quiesce_event = {
+	.receive_mask = EVTYP_SIGQUIESCE_MASK,
+	.receiver_fn = sclp_quiesce_handler,
+	.pm_event_fn = sclp_quiesce_pm_event
+};
+
+/* Initialize quiesce driver. */
+static int __init sclp_quiesce_init(void)
+{
+	return sclp_register(&sclp_quiesce_event);
+}
+device_initcall(sclp_quiesce_init);
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
new file mode 100644
index 0000000..44594a4
--- /dev/null
+++ b/drivers/s390/char/sclp_rw.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * driver: reading from and writing to system console on S/390 via SCLP
+ *
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kmod.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/ctype.h>
+#include <linux/uaccess.h>
+
+#include "sclp.h"
+#include "sclp_rw.h"
+
+/*
+ * The room for the SCCB (only for writing) is not equal to a pages size
+ * (as it is specified as the maximum size in the SCLP documentation)
+ * because of the additional data structure described above.
+ */
+#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
+
+static void sclp_rw_pm_event(struct sclp_register *reg,
+			     enum sclp_pm_event sclp_pm_event)
+{
+	sclp_console_pm_event(sclp_pm_event);
+}
+
+/* Event type structure for write message and write priority message */
+static struct sclp_register sclp_rw_event = {
+	.send_mask = EVTYP_MSG_MASK,
+	.pm_event_fn = sclp_rw_pm_event,
+};
+
+/*
+ * Setup a sclp write buffer. Gets a page as input (4K) and returns
+ * a pointer to a struct sclp_buffer structure that is located at the
+ * end of the input page. This reduces the buffer space by a few
+ * bytes but simplifies things.
+ */
+struct sclp_buffer *
+sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
+{
+	struct sclp_buffer *buffer;
+	struct sccb_header *sccb;
+
+	sccb = (struct sccb_header *) page;
+	/*
+	 * We keep the struct sclp_buffer structure at the end
+	 * of the sccb page.
+	 */
+	buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1;
+	buffer->sccb = sccb;
+	buffer->retry_count = 0;
+	buffer->messages = 0;
+	buffer->char_sum = 0;
+	buffer->current_line = NULL;
+	buffer->current_length = 0;
+	buffer->columns = columns;
+	buffer->htab = htab;
+
+	/* initialize sccb */
+	memset(sccb, 0, sizeof(struct sccb_header));
+	sccb->length = sizeof(struct sccb_header);
+
+	return buffer;
+}
+
+/*
+ * Return a pointer to the original page that has been used to create
+ * the buffer.
+ */
+void *
+sclp_unmake_buffer(struct sclp_buffer *buffer)
+{
+	return buffer->sccb;
+}
+
+/*
+ * Initialize a new message the end of the provided buffer with
+ * enough room for max_len characters. Return 0 on success.
+ */
+static int
+sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
+{
+	struct sccb_header *sccb;
+	struct msg_buf *msg;
+	struct mdb *mdb;
+	struct go *go;
+	struct mto *mto;
+	int msg_size;
+
+	/* max size of new message including message text  */
+	msg_size = sizeof(struct msg_buf) + max_len;
+
+	/* check if current buffer sccb can contain the mto */
+	sccb = buffer->sccb;
+	if ((MAX_SCCB_ROOM - sccb->length) < msg_size)
+		return -ENOMEM;
+
+	msg = (struct msg_buf *)((addr_t) sccb + sccb->length);
+	memset(msg, 0, sizeof(struct msg_buf));
+	msg->header.length = sizeof(struct msg_buf);
+	msg->header.type = EVTYP_MSG;
+
+	mdb = &msg->mdb;
+	mdb->header.length = sizeof(struct mdb);
+	mdb->header.type = 1;
+	mdb->header.tag = 0xD4C4C240;	/* ebcdic "MDB " */
+	mdb->header.revision_code = 1;
+
+	go = &mdb->go;
+	go->length = sizeof(struct go);
+	go->type = 1;
+
+	mto = &mdb->mto;
+	mto->length = sizeof(struct mto);
+	mto->type = 4;	/* message text object */
+	mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */
+
+	/* set pointer to first byte after struct mto. */
+	buffer->current_msg = msg;
+	buffer->current_line = (char *) (mto + 1);
+	buffer->current_length = 0;
+
+	return 0;
+}
+
+/*
+ * Finalize message initialized by sclp_initialize_mto(),
+ * updating the sizes of MTO, enclosing MDB, event buffer and SCCB.
+ */
+static void
+sclp_finalize_mto(struct sclp_buffer *buffer)
+{
+	struct sccb_header *sccb;
+	struct msg_buf *msg;
+
+	/*
+	 * update values of sizes
+	 * (SCCB, Event(Message) Buffer, Message Data Block)
+	 */
+	sccb = buffer->sccb;
+	msg = buffer->current_msg;
+	msg->header.length += buffer->current_length;
+	msg->mdb.header.length += buffer->current_length;
+	msg->mdb.mto.length += buffer->current_length;
+	sccb->length += msg->header.length;
+
+	/*
+	 * count number of buffered messages (= number of Message Text
+	 * Objects) and number of buffered characters
+	 * for the SCCB currently used for buffering and at all
+	 */
+	buffer->messages++;
+	buffer->char_sum += buffer->current_length;
+
+	buffer->current_line = NULL;
+	buffer->current_length = 0;
+	buffer->current_msg = NULL;
+}
+
+/*
+ * processing of a message including escape characters,
+ * returns number of characters written to the output sccb
+ * ("processed" means that is not guaranteed that the character have already
+ *  been sent to the SCLP but that it will be done at least next time the SCLP
+ *  is not busy)
+ */
+int
+sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
+{
+	int spaces, i_msg;
+	int rc;
+
+	/*
+	 * parse msg for escape sequences (\t,\v ...) and put formated
+	 * msg into an mto (created by sclp_initialize_mto).
+	 *
+	 * We have to do this work ourselfs because there is no support for
+	 * these characters on the native machine and only partial support
+	 * under VM (Why does VM interpret \n but the native machine doesn't ?)
+	 *
+	 * Depending on i/o-control setting the message is always written
+	 * immediately or we wait for a final new line maybe coming with the
+	 * next message. Besides we avoid a buffer overrun by writing its
+	 * content.
+	 *
+	 * RESTRICTIONS:
+	 *
+	 * \r and \b work within one line because we are not able to modify
+	 * previous output that have already been accepted by the SCLP.
+	 *
+	 * \t combined with following \r is not correctly represented because
+	 * \t is expanded to some spaces but \r does not know about a
+	 * previous \t and decreases the current position by one column.
+	 * This is in order to a slim and quick implementation.
+	 */
+	for (i_msg = 0; i_msg < count; i_msg++) {
+		switch (msg[i_msg]) {
+		case '\n':	/* new line, line feed (ASCII)	*/
+			/* check if new mto needs to be created */
+			if (buffer->current_line == NULL) {
+				rc = sclp_initialize_mto(buffer, 0);
+				if (rc)
+					return i_msg;
+			}
+			sclp_finalize_mto(buffer);
+			break;
+		case '\a':	/* bell, one for several times	*/
+			/* set SCLP sound alarm bit in General Object */
+			if (buffer->current_line == NULL) {
+				rc = sclp_initialize_mto(buffer,
+							 buffer->columns);
+				if (rc)
+					return i_msg;
+			}
+			buffer->current_msg->mdb.go.general_msg_flags |=
+				GNRLMSGFLGS_SNDALRM;
+			break;
+		case '\t':	/* horizontal tabulator	 */
+			/* check if new mto needs to be created */
+			if (buffer->current_line == NULL) {
+				rc = sclp_initialize_mto(buffer,
+							 buffer->columns);
+				if (rc)
+					return i_msg;
+			}
+			/* "go to (next htab-boundary + 1, same line)" */
+			do {
+				if (buffer->current_length >= buffer->columns)
+					break;
+				/* ok, add a blank */
+				*buffer->current_line++ = 0x40;
+				buffer->current_length++;
+			} while (buffer->current_length % buffer->htab);
+			break;
+		case '\f':	/* form feed  */
+		case '\v':	/* vertical tabulator  */
+			/* "go to (actual column, actual line + 1)" */
+			/* = new line, leading spaces */
+			if (buffer->current_line != NULL) {
+				spaces = buffer->current_length;
+				sclp_finalize_mto(buffer);
+				rc = sclp_initialize_mto(buffer,
+							 buffer->columns);
+				if (rc)
+					return i_msg;
+				memset(buffer->current_line, 0x40, spaces);
+				buffer->current_line += spaces;
+				buffer->current_length = spaces;
+			} else {
+				/* one an empty line this is the same as \n */
+				rc = sclp_initialize_mto(buffer,
+							 buffer->columns);
+				if (rc)
+					return i_msg;
+				sclp_finalize_mto(buffer);
+			}
+			break;
+		case '\b':	/* backspace  */
+			/* "go to (actual column - 1, actual line)" */
+			/* decrement counter indicating position, */
+			/* do not remove last character */
+			if (buffer->current_line != NULL &&
+			    buffer->current_length > 0) {
+				buffer->current_length--;
+				buffer->current_line--;
+			}
+			break;
+		case 0x00:	/* end of string  */
+			/* transfer current line to SCCB */
+			if (buffer->current_line != NULL)
+				sclp_finalize_mto(buffer);
+			/* skip the rest of the message including the 0 byte */
+			i_msg = count - 1;
+			break;
+		default:	/* no escape character	*/
+			/* do not output unprintable characters */
+			if (!isprint(msg[i_msg]))
+				break;
+			/* check if new mto needs to be created */
+			if (buffer->current_line == NULL) {
+				rc = sclp_initialize_mto(buffer,
+							 buffer->columns);
+				if (rc)
+					return i_msg;
+			}
+			*buffer->current_line++ = sclp_ascebc(msg[i_msg]);
+			buffer->current_length++;
+			break;
+		}
+		/* check if current mto is full */
+		if (buffer->current_line != NULL &&
+		    buffer->current_length >= buffer->columns)
+			sclp_finalize_mto(buffer);
+	}
+
+	/* return number of processed characters */
+	return i_msg;
+}
+
+/*
+ * Return the number of free bytes in the sccb
+ */
+int
+sclp_buffer_space(struct sclp_buffer *buffer)
+{
+	struct sccb_header *sccb;
+	int count;
+
+	sccb = buffer->sccb;
+	count = MAX_SCCB_ROOM - sccb->length;
+	if (buffer->current_line != NULL)
+		count -= sizeof(struct msg_buf) + buffer->current_length;
+	return count;
+}
+
+/*
+ * Return number of characters in buffer
+ */
+int
+sclp_chars_in_buffer(struct sclp_buffer *buffer)
+{
+	int count;
+
+	count = buffer->char_sum;
+	if (buffer->current_line != NULL)
+		count += buffer->current_length;
+	return count;
+}
+
+/*
+ * sets or provides some values that influence the drivers behaviour
+ */
+void
+sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns)
+{
+	buffer->columns = columns;
+	if (buffer->current_line != NULL &&
+	    buffer->current_length > buffer->columns)
+		sclp_finalize_mto(buffer);
+}
+
+void
+sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab)
+{
+	buffer->htab = htab;
+}
+
+/*
+ * called by sclp_console_init and/or sclp_tty_init
+ */
+int
+sclp_rw_init(void)
+{
+	static int init_done = 0;
+	int rc;
+
+	if (init_done)
+		return 0;
+
+	rc = sclp_register(&sclp_rw_event);
+	if (rc == 0)
+		init_done = 1;
+	return rc;
+}
+
+#define SCLP_BUFFER_MAX_RETRY		1
+
+/*
+ * second half of Write Event Data-function that has to be done after
+ * interruption indicating completion of Service Call.
+ */
+static void
+sclp_writedata_callback(struct sclp_req *request, void *data)
+{
+	int rc;
+	struct sclp_buffer *buffer;
+	struct sccb_header *sccb;
+
+	buffer = (struct sclp_buffer *) data;
+	sccb = buffer->sccb;
+
+	if (request->status == SCLP_REQ_FAILED) {
+		if (buffer->callback != NULL)
+			buffer->callback(buffer, -EIO);
+		return;
+	}
+	/* check SCLP response code and choose suitable action	*/
+	switch (sccb->response_code) {
+	case 0x0020 :
+		/* Normal completion, buffer processed, message(s) sent */
+		rc = 0;
+		break;
+
+	case 0x0340: /* Contained SCLP equipment check */
+		if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
+			rc = -EIO;
+			break;
+		}
+		/* remove processed buffers and requeue rest */
+		if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
+			/* not all buffers were processed */
+			sccb->response_code = 0x0000;
+			buffer->request.status = SCLP_REQ_FILLED;
+			rc = sclp_add_request(request);
+			if (rc == 0)
+				return;
+		} else
+			rc = 0;
+		break;
+
+	case 0x0040: /* SCLP equipment check */
+	case 0x05f0: /* Target resource in improper state */
+		if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
+			rc = -EIO;
+			break;
+		}
+		/* retry request */
+		sccb->response_code = 0x0000;
+		buffer->request.status = SCLP_REQ_FILLED;
+		rc = sclp_add_request(request);
+		if (rc == 0)
+			return;
+		break;
+	default:
+		if (sccb->response_code == 0x71f0)
+			rc = -ENOMEM;
+		else
+			rc = -EINVAL;
+		break;
+	}
+	if (buffer->callback != NULL)
+		buffer->callback(buffer, rc);
+}
+
+/*
+ * Setup the request structure in the struct sclp_buffer to do SCLP Write
+ * Event Data and pass the request to the core SCLP loop. Return zero on
+ * success, non-zero otherwise.
+ */
+int
+sclp_emit_buffer(struct sclp_buffer *buffer,
+		 void (*callback)(struct sclp_buffer *, int))
+{
+	/* add current line if there is one */
+	if (buffer->current_line != NULL)
+		sclp_finalize_mto(buffer);
+
+	/* Are there messages in the output buffer ? */
+	if (buffer->messages == 0)
+		return -EIO;
+
+	buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
+	buffer->request.status = SCLP_REQ_FILLED;
+	buffer->request.callback = sclp_writedata_callback;
+	buffer->request.callback_data = buffer;
+	buffer->request.sccb = buffer->sccb;
+	buffer->callback = callback;
+	return sclp_add_request(&buffer->request);
+}
diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h
new file mode 100644
index 0000000..a2eb22f
--- /dev/null
+++ b/drivers/s390/char/sclp_rw.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * interface to the SCLP-read/write driver
+ *
+ * Copyright IBM Corporation 1999, 2009
+ *
+ * Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __SCLP_RW_H__
+#define __SCLP_RW_H__
+
+#include <linux/list.h>
+
+struct mto {
+	u16 length;
+	u16 type;
+	u16 line_type_flags;
+	u8  alarm_control;
+	u8  _reserved[3];
+} __attribute__((packed));
+
+struct go {
+	u16 length;
+	u16 type;
+	u32 domid;
+	u8  hhmmss_time[8];
+	u8  th_time[3];
+	u8  reserved_0;
+	u8  dddyyyy_date[7];
+	u8  _reserved_1;
+	u16 general_msg_flags;
+	u8  _reserved_2[10];
+	u8  originating_system_name[8];
+	u8  job_guest_name[8];
+} __attribute__((packed));
+
+struct mdb_header {
+	u16 length;
+	u16 type;
+	u32 tag;
+	u32 revision_code;
+} __attribute__((packed));
+
+struct mdb {
+	struct mdb_header header;
+	struct go go;
+	struct mto mto;
+} __attribute__((packed));
+
+struct msg_buf {
+	struct evbuf_header header;
+	struct mdb mdb;
+} __attribute__((packed));
+
+/* The number of empty mto buffers that can be contained in a single sccb. */
+#define NR_EMPTY_MSG_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \
+			sizeof(struct sccb_header)) / sizeof(struct msg_buf))
+
+/*
+ * data structure for information about list of SCCBs (only for writing),
+ * will be located at the end of a SCCBs page
+ */
+struct sclp_buffer {
+	struct list_head list;		/* list_head for sccb_info chain */
+	struct sclp_req request;
+	void *sccb;
+	struct msg_buf *current_msg;
+	char *current_line;
+	int current_length;
+	int retry_count;
+	/* output format settings */
+	unsigned short columns;
+	unsigned short htab;
+	/* statistics about this buffer */
+	unsigned int char_sum;		/* # chars in sccb */
+	unsigned int messages;		/* # messages in sccb */
+	/* Callback that is called after reaching final status. */
+	void (*callback)(struct sclp_buffer *, int);
+};
+
+int sclp_rw_init(void);
+struct sclp_buffer *sclp_make_buffer(void *, unsigned short, unsigned short);
+void *sclp_unmake_buffer(struct sclp_buffer *);
+int sclp_buffer_space(struct sclp_buffer *);
+int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int);
+int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int));
+void sclp_set_columns(struct sclp_buffer *, unsigned short);
+void sclp_set_htab(struct sclp_buffer *, unsigned short);
+int sclp_chars_in_buffer(struct sclp_buffer *);
+
+#ifdef CONFIG_SCLP_CONSOLE
+void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event);
+#else
+static inline void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event) { }
+#endif
+
+#endif	/* __SCLP_RW_H__ */
diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c
new file mode 100644
index 0000000..1e244f7
--- /dev/null
+++ b/drivers/s390/char/sclp_sd.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCLP Store Data support and sysfs interface
+ *
+ * Copyright IBM Corp. 2017
+ */
+
+#define KMSG_COMPONENT "sclp_sd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/completion.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/async.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+
+#include <asm/pgalloc.h>
+
+#include "sclp.h"
+
+#define SD_EQ_STORE_DATA	0
+#define SD_EQ_HALT		1
+#define SD_EQ_SIZE		2
+
+#define SD_DI_CONFIG		3
+
+struct sclp_sd_evbuf {
+	struct evbuf_header hdr;
+	u8 eq;
+	u8 di;
+	u8 rflags;
+	u64 :56;
+	u32 id;
+	u16 :16;
+	u8 fmt;
+	u8 status;
+	u64 sat;
+	u64 sa;
+	u32 esize;
+	u32 dsize;
+} __packed;
+
+struct sclp_sd_sccb {
+	struct sccb_header hdr;
+	struct sclp_sd_evbuf evbuf;
+} __packed __aligned(PAGE_SIZE);
+
+/**
+ * struct sclp_sd_data - Result of a Store Data request
+ * @esize_bytes: Resulting esize in bytes
+ * @dsize_bytes: Resulting dsize in bytes
+ * @data: Pointer to data - must be released using vfree()
+ */
+struct sclp_sd_data {
+	size_t esize_bytes;
+	size_t dsize_bytes;
+	void *data;
+};
+
+/**
+ * struct sclp_sd_listener - Listener for asynchronous Store Data response
+ * @list: For enqueueing this struct
+ * @id: Event ID of response to listen for
+ * @completion: Can be used to wait for response
+ * @evbuf: Contains the resulting Store Data response after completion
+ */
+struct sclp_sd_listener {
+	struct list_head list;
+	u32 id;
+	struct completion completion;
+	struct sclp_sd_evbuf evbuf;
+};
+
+/**
+ * struct sclp_sd_file - Sysfs representation of a Store Data entity
+ * @kobj: Kobject
+ * @data_attr: Attribute for accessing data contents
+ * @data_mutex: Mutex to serialize access and updates to @data
+ * @data: Data associated with this entity
+ * @di: DI value associated with this entity
+ */
+struct sclp_sd_file {
+	struct kobject kobj;
+	struct bin_attribute data_attr;
+	struct mutex data_mutex;
+	struct sclp_sd_data data;
+	u8 di;
+};
+#define to_sd_file(x) container_of(x, struct sclp_sd_file, kobj)
+
+static struct kset *sclp_sd_kset;
+static struct sclp_sd_file *config_file;
+
+static LIST_HEAD(sclp_sd_queue);
+static DEFINE_SPINLOCK(sclp_sd_queue_lock);
+
+/**
+ * sclp_sd_listener_add() - Add listener for Store Data responses
+ * @listener: Listener to add
+ */
+static void sclp_sd_listener_add(struct sclp_sd_listener *listener)
+{
+	spin_lock_irq(&sclp_sd_queue_lock);
+	list_add_tail(&listener->list, &sclp_sd_queue);
+	spin_unlock_irq(&sclp_sd_queue_lock);
+}
+
+/**
+ * sclp_sd_listener_remove() - Remove listener for Store Data responses
+ * @listener: Listener to remove
+ */
+static void sclp_sd_listener_remove(struct sclp_sd_listener *listener)
+{
+	spin_lock_irq(&sclp_sd_queue_lock);
+	list_del(&listener->list);
+	spin_unlock_irq(&sclp_sd_queue_lock);
+}
+
+/**
+ * sclp_sd_listener_init() - Initialize a Store Data response listener
+ * @id: Event ID to listen for
+ *
+ * Initialize a listener for asynchronous Store Data responses. This listener
+ * can afterwards be used to wait for a specific response and to retrieve
+ * the associated response data.
+ */
+static void sclp_sd_listener_init(struct sclp_sd_listener *listener, u32 id)
+{
+	memset(listener, 0, sizeof(*listener));
+	listener->id = id;
+	init_completion(&listener->completion);
+}
+
+/**
+ * sclp_sd_receiver() - Receiver for Store Data events
+ * @evbuf_hdr: Header of received events
+ *
+ * Process Store Data events and complete listeners with matching event IDs.
+ */
+static void sclp_sd_receiver(struct evbuf_header *evbuf_hdr)
+{
+	struct sclp_sd_evbuf *evbuf = (struct sclp_sd_evbuf *) evbuf_hdr;
+	struct sclp_sd_listener *listener;
+	int found = 0;
+
+	pr_debug("received event (id=0x%08x)\n", evbuf->id);
+	spin_lock(&sclp_sd_queue_lock);
+	list_for_each_entry(listener, &sclp_sd_queue, list) {
+		if (listener->id != evbuf->id)
+			continue;
+
+		listener->evbuf = *evbuf;
+		complete(&listener->completion);
+		found = 1;
+		break;
+	}
+	spin_unlock(&sclp_sd_queue_lock);
+
+	if (!found)
+		pr_debug("unsolicited event (id=0x%08x)\n", evbuf->id);
+}
+
+static struct sclp_register sclp_sd_register = {
+	.send_mask = EVTYP_STORE_DATA_MASK,
+	.receive_mask = EVTYP_STORE_DATA_MASK,
+	.receiver_fn = sclp_sd_receiver,
+};
+
+/**
+ * sclp_sd_sync() - Perform Store Data request synchronously
+ * @page: Address of work page - must be below 2GB
+ * @eq: Input EQ value
+ * @di: Input DI value
+ * @sat: Input SAT value
+ * @sa: Input SA value used to specify the address of the target buffer
+ * @dsize_ptr: Optional pointer to input and output DSIZE value
+ * @esize_ptr: Optional pointer to output ESIZE value
+ *
+ * Perform Store Data request with specified parameters and wait for completion.
+ *
+ * Return %0 on success and store resulting DSIZE and ESIZE values in
+ * @dsize_ptr and @esize_ptr (if provided). Return non-zero on error.
+ */
+static int sclp_sd_sync(unsigned long page, u8 eq, u8 di, u64 sat, u64 sa,
+			u32 *dsize_ptr, u32 *esize_ptr)
+{
+	struct sclp_sd_sccb *sccb = (void *) page;
+	struct sclp_sd_listener listener;
+	struct sclp_sd_evbuf *evbuf;
+	int rc;
+
+	sclp_sd_listener_init(&listener, (u32) (addr_t) sccb);
+	sclp_sd_listener_add(&listener);
+
+	/* Prepare SCCB */
+	memset(sccb, 0, PAGE_SIZE);
+	sccb->hdr.length = sizeof(sccb->hdr) + sizeof(sccb->evbuf);
+	evbuf = &sccb->evbuf;
+	evbuf->hdr.length = sizeof(*evbuf);
+	evbuf->hdr.type = EVTYP_STORE_DATA;
+	evbuf->eq = eq;
+	evbuf->di = di;
+	evbuf->id = listener.id;
+	evbuf->fmt = 1;
+	evbuf->sat = sat;
+	evbuf->sa = sa;
+	if (dsize_ptr)
+		evbuf->dsize = *dsize_ptr;
+
+	/* Perform command */
+	pr_debug("request (eq=%d, di=%d, id=0x%08x)\n", eq, di, listener.id);
+	rc = sclp_sync_request(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
+	pr_debug("request done (rc=%d)\n", rc);
+	if (rc)
+		goto out;
+
+	/* Evaluate response */
+	if (sccb->hdr.response_code == 0x73f0) {
+		pr_debug("event not supported\n");
+		rc = -EIO;
+		goto out_remove;
+	}
+	if (sccb->hdr.response_code != 0x0020 || !(evbuf->hdr.flags & 0x80)) {
+		rc = -EIO;
+		goto out;
+	}
+	if (!(evbuf->rflags & 0x80)) {
+		rc = wait_for_completion_interruptible(&listener.completion);
+		if (rc)
+			goto out;
+		evbuf = &listener.evbuf;
+	}
+	switch (evbuf->status) {
+	case 0:
+		if (dsize_ptr)
+			*dsize_ptr = evbuf->dsize;
+		if (esize_ptr)
+			*esize_ptr = evbuf->esize;
+		pr_debug("success (dsize=%u, esize=%u)\n", evbuf->dsize,
+			 evbuf->esize);
+		break;
+	case 3:
+		rc = -ENOENT;
+		break;
+	default:
+		rc = -EIO;
+		break;
+
+	}
+
+out:
+	if (rc && rc != -ENOENT) {
+		/* Provide some information about what went wrong */
+		pr_warn("Store Data request failed (eq=%d, di=%d, "
+			"response=0x%04x, flags=0x%02x, status=%d, rc=%d)\n",
+			eq, di, sccb->hdr.response_code, evbuf->hdr.flags,
+			evbuf->status, rc);
+	}
+
+out_remove:
+	sclp_sd_listener_remove(&listener);
+
+	return rc;
+}
+
+/**
+ * sclp_sd_store_data() - Obtain data for specified Store Data entity
+ * @result: Resulting data
+ * @di: DI value associated with this entity
+ *
+ * Perform a series of Store Data requests to obtain the size and contents of
+ * the specified Store Data entity.
+ *
+ * Return:
+ *   %0:       Success - result is stored in @result. @result->data must be
+ *	       released using vfree() after use.
+ *   %-ENOENT: No data available for this entity
+ *   %<0:      Other error
+ */
+static int sclp_sd_store_data(struct sclp_sd_data *result, u8 di)
+{
+	u32 dsize = 0, esize = 0;
+	unsigned long page, asce = 0;
+	void *data = NULL;
+	int rc;
+
+	page = __get_free_page(GFP_KERNEL | GFP_DMA);
+	if (!page)
+		return -ENOMEM;
+
+	/* Get size */
+	rc = sclp_sd_sync(page, SD_EQ_SIZE, di, 0, 0, &dsize, &esize);
+	if (rc)
+		goto out;
+	if (dsize == 0)
+		goto out_result;
+
+	/* Allocate memory */
+	data = vzalloc(array_size((size_t)dsize, PAGE_SIZE));
+	if (!data) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* Get translation table for buffer */
+	asce = base_asce_alloc((unsigned long) data, dsize);
+	if (!asce) {
+		vfree(data);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* Get data */
+	rc = sclp_sd_sync(page, SD_EQ_STORE_DATA, di, asce, (u64) data, &dsize,
+			  &esize);
+	if (rc) {
+		/* Cancel running request if interrupted */
+		if (rc == -ERESTARTSYS)
+			sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL);
+		vfree(data);
+		goto out;
+	}
+
+out_result:
+	result->esize_bytes = (size_t) esize * PAGE_SIZE;
+	result->dsize_bytes = (size_t) dsize * PAGE_SIZE;
+	result->data = data;
+
+out:
+	base_asce_free(asce);
+	free_page(page);
+
+	return rc;
+}
+
+/**
+ * sclp_sd_data_reset() - Reset Store Data result buffer
+ * @data: Data buffer to reset
+ *
+ * Reset @data to initial state and release associated memory.
+ */
+static void sclp_sd_data_reset(struct sclp_sd_data *data)
+{
+	vfree(data->data);
+	data->data = NULL;
+	data->dsize_bytes = 0;
+	data->esize_bytes = 0;
+}
+
+/**
+ * sclp_sd_file_release() - Release function for sclp_sd_file object
+ * @kobj: Kobject embedded in sclp_sd_file object
+ */
+static void sclp_sd_file_release(struct kobject *kobj)
+{
+	struct sclp_sd_file *sd_file = to_sd_file(kobj);
+
+	sclp_sd_data_reset(&sd_file->data);
+	kfree(sd_file);
+}
+
+/**
+ * sclp_sd_file_update() - Update contents of sclp_sd_file object
+ * @sd_file: Object to update
+ *
+ * Obtain the current version of data associated with the Store Data entity
+ * @sd_file.
+ *
+ * On success, return %0 and generate a KOBJ_CHANGE event to indicate that the
+ * data may have changed. Return non-zero otherwise.
+ */
+static int sclp_sd_file_update(struct sclp_sd_file *sd_file)
+{
+	const char *name = kobject_name(&sd_file->kobj);
+	struct sclp_sd_data data;
+	int rc;
+
+	rc = sclp_sd_store_data(&data, sd_file->di);
+	if (rc) {
+		if (rc == -ENOENT) {
+			pr_info("No data is available for the %s data entity\n",
+				 name);
+		}
+		return rc;
+	}
+
+	mutex_lock(&sd_file->data_mutex);
+	sclp_sd_data_reset(&sd_file->data);
+	sd_file->data = data;
+	mutex_unlock(&sd_file->data_mutex);
+
+	pr_info("A %zu-byte %s data entity was retrieved\n", data.dsize_bytes,
+		name);
+	kobject_uevent(&sd_file->kobj, KOBJ_CHANGE);
+
+	return 0;
+}
+
+/**
+ * sclp_sd_file_update_async() - Wrapper for asynchronous update call
+ * @data: Object to update
+ */
+static void sclp_sd_file_update_async(void *data, async_cookie_t cookie)
+{
+	struct sclp_sd_file *sd_file = data;
+
+	sclp_sd_file_update(sd_file);
+}
+
+/**
+ * reload_store() - Store function for "reload" sysfs attribute
+ * @kobj: Kobject of sclp_sd_file object
+ *
+ * Initiate a reload of the data associated with an sclp_sd_file object.
+ */
+static ssize_t reload_store(struct kobject *kobj, struct kobj_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct sclp_sd_file *sd_file = to_sd_file(kobj);
+
+	sclp_sd_file_update(sd_file);
+
+	return count;
+}
+
+static struct kobj_attribute reload_attr = __ATTR_WO(reload);
+
+static struct attribute *sclp_sd_file_default_attrs[] = {
+	&reload_attr.attr,
+	NULL,
+};
+
+static struct kobj_type sclp_sd_file_ktype = {
+	.sysfs_ops = &kobj_sysfs_ops,
+	.release = sclp_sd_file_release,
+	.default_attrs = sclp_sd_file_default_attrs,
+};
+
+/**
+ * data_read() - Read function for "read" sysfs attribute
+ * @kobj: Kobject of sclp_sd_file object
+ * @buffer: Target buffer
+ * @off: Requested file offset
+ * @size: Requested number of bytes
+ *
+ * Store the requested portion of the Store Data entity contents into the
+ * specified buffer. Return the number of bytes stored on success, or %0
+ * on EOF.
+ */
+static ssize_t data_read(struct file *file, struct kobject *kobj,
+			 struct bin_attribute *attr, char *buffer,
+			 loff_t off, size_t size)
+{
+	struct sclp_sd_file *sd_file = to_sd_file(kobj);
+	size_t data_size;
+	char *data;
+
+	mutex_lock(&sd_file->data_mutex);
+
+	data = sd_file->data.data;
+	data_size = sd_file->data.dsize_bytes;
+	if (!data || off >= data_size) {
+		size = 0;
+	} else {
+		if (off + size > data_size)
+			size = data_size - off;
+		memcpy(buffer, data + off, size);
+	}
+
+	mutex_unlock(&sd_file->data_mutex);
+
+	return size;
+}
+
+/**
+ * sclp_sd_file_create() - Add a sysfs file representing a Store Data entity
+ * @name: Name of file
+ * @di: DI value associated with this entity
+ *
+ * Create a sysfs directory with the given @name located under
+ *
+ *   /sys/firmware/sclp_sd/
+ *
+ * The files in this directory can be used to access the contents of the Store
+ * Data entity associated with @DI.
+ *
+ * Return pointer to resulting sclp_sd_file object on success, %NULL otherwise.
+ * The object must be freed by calling kobject_put() on the embedded kobject
+ * pointer after use.
+ */
+static __init struct sclp_sd_file *sclp_sd_file_create(const char *name, u8 di)
+{
+	struct sclp_sd_file *sd_file;
+	int rc;
+
+	sd_file = kzalloc(sizeof(*sd_file), GFP_KERNEL);
+	if (!sd_file)
+		return NULL;
+	sd_file->di = di;
+	mutex_init(&sd_file->data_mutex);
+
+	/* Create kobject located under /sys/firmware/sclp_sd/ */
+	sd_file->kobj.kset = sclp_sd_kset;
+	rc = kobject_init_and_add(&sd_file->kobj, &sclp_sd_file_ktype, NULL,
+				  "%s", name);
+	if (rc) {
+		kobject_put(&sd_file->kobj);
+		return NULL;
+	}
+
+	sysfs_bin_attr_init(&sd_file->data_attr);
+	sd_file->data_attr.attr.name = "data";
+	sd_file->data_attr.attr.mode = 0444;
+	sd_file->data_attr.read = data_read;
+
+	rc = sysfs_create_bin_file(&sd_file->kobj, &sd_file->data_attr);
+	if (rc) {
+		kobject_put(&sd_file->kobj);
+		return NULL;
+	}
+
+	/*
+	 * For completeness only - users interested in entity data should listen
+	 * for KOBJ_CHANGE instead.
+	 */
+	kobject_uevent(&sd_file->kobj, KOBJ_ADD);
+
+	/* Don't let a slow Store Data request delay further initialization */
+	async_schedule(sclp_sd_file_update_async, sd_file);
+
+	return sd_file;
+}
+
+/**
+ * sclp_sd_init() - Initialize sclp_sd support and register sysfs files
+ */
+static __init int sclp_sd_init(void)
+{
+	int rc;
+
+	rc = sclp_register(&sclp_sd_register);
+	if (rc)
+		return rc;
+
+	/* Create kset named "sclp_sd" located under /sys/firmware/ */
+	rc = -ENOMEM;
+	sclp_sd_kset = kset_create_and_add("sclp_sd", NULL, firmware_kobj);
+	if (!sclp_sd_kset)
+		goto err_kset;
+
+	rc = -EINVAL;
+	config_file = sclp_sd_file_create("config", SD_DI_CONFIG);
+	if (!config_file)
+		goto err_config;
+
+	return 0;
+
+err_config:
+	kset_unregister(sclp_sd_kset);
+err_kset:
+	sclp_unregister(&sclp_sd_register);
+
+	return rc;
+}
+device_initcall(sclp_sd_init);
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
new file mode 100644
index 0000000..8e0b69a
--- /dev/null
+++ b/drivers/s390/char/sclp_sdias.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCLP "store data in absolute storage"
+ *
+ * Copyright IBM Corp. 2003, 2013
+ * Author(s): Michael Holzheu
+ */
+
+#define KMSG_COMPONENT "sclp_sdias"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/completion.h>
+#include <linux/sched.h>
+#include <asm/sclp.h>
+#include <asm/debug.h>
+#include <asm/ipl.h>
+
+#include "sclp_sdias.h"
+#include "sclp.h"
+#include "sclp_rw.h"
+
+#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
+
+#define SDIAS_RETRIES 300
+
+static struct debug_info *sdias_dbf;
+
+static struct sclp_register sclp_sdias_register = {
+	.send_mask = EVTYP_SDIAS_MASK,
+};
+
+static struct sdias_sccb sccb __attribute__((aligned(4096)));
+static struct sdias_evbuf sdias_evbuf;
+
+static DECLARE_COMPLETION(evbuf_accepted);
+static DECLARE_COMPLETION(evbuf_done);
+static DEFINE_MUTEX(sdias_mutex);
+
+/*
+ * Called by SCLP base when read event data has been completed (async mode only)
+ */
+static void sclp_sdias_receiver_fn(struct evbuf_header *evbuf)
+{
+	memcpy(&sdias_evbuf, evbuf,
+	       min_t(unsigned long, sizeof(sdias_evbuf), evbuf->length));
+	complete(&evbuf_done);
+	TRACE("sclp_sdias_receiver_fn done\n");
+}
+
+/*
+ * Called by SCLP base when sdias event has been accepted
+ */
+static void sdias_callback(struct sclp_req *request, void *data)
+{
+	complete(&evbuf_accepted);
+	TRACE("callback done\n");
+}
+
+static int sdias_sclp_send(struct sclp_req *req)
+{
+	int retries;
+	int rc;
+
+	for (retries = SDIAS_RETRIES; retries; retries--) {
+		TRACE("add request\n");
+		rc = sclp_add_request(req);
+		if (rc) {
+			/* not initiated, wait some time and retry */
+			set_current_state(TASK_INTERRUPTIBLE);
+			TRACE("add request failed: rc = %i\n",rc);
+			schedule_timeout(msecs_to_jiffies(500));
+			continue;
+		}
+		/* initiated, wait for completion of service call */
+		wait_for_completion(&evbuf_accepted);
+		if (req->status == SCLP_REQ_FAILED) {
+			TRACE("sclp request failed\n");
+			continue;
+		}
+		/* if not accepted, retry */
+		if (!(sccb.evbuf.hdr.flags & 0x80)) {
+			TRACE("sclp request failed: flags=%x\n",
+			      sccb.evbuf.hdr.flags);
+			continue;
+		}
+		/*
+		 * for the sync interface the response is in the initial sccb
+		 */
+		if (!sclp_sdias_register.receiver_fn) {
+			memcpy(&sdias_evbuf, &sccb.evbuf, sizeof(sdias_evbuf));
+			TRACE("sync request done\n");
+			return 0;
+		}
+		/* otherwise we wait for completion */
+		wait_for_completion(&evbuf_done);
+		TRACE("request done\n");
+		return 0;
+	}
+	return -EIO;
+}
+
+/*
+ * Get number of blocks (4K) available in the HSA
+ */
+int sclp_sdias_blk_count(void)
+{
+	struct sclp_req request;
+	int rc;
+
+	mutex_lock(&sdias_mutex);
+
+	memset(&sccb, 0, sizeof(sccb));
+	memset(&request, 0, sizeof(request));
+
+	sccb.hdr.length = sizeof(sccb);
+	sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
+	sccb.evbuf.hdr.type = EVTYP_SDIAS;
+	sccb.evbuf.event_qual = SDIAS_EQ_SIZE;
+	sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
+	sccb.evbuf.event_id = 4712;
+	sccb.evbuf.dbs = 1;
+
+	request.sccb = &sccb;
+	request.command = SCLP_CMDW_WRITE_EVENT_DATA;
+	request.status = SCLP_REQ_FILLED;
+	request.callback = sdias_callback;
+
+	rc = sdias_sclp_send(&request);
+	if (rc) {
+		pr_err("sclp_send failed for get_nr_blocks\n");
+		goto out;
+	}
+	if (sccb.hdr.response_code != 0x0020) {
+		TRACE("send failed: %x\n", sccb.hdr.response_code);
+		rc = -EIO;
+		goto out;
+	}
+
+	switch (sdias_evbuf.event_status) {
+		case 0:
+			rc = sdias_evbuf.blk_cnt;
+			break;
+		default:
+			pr_err("SCLP error: %x\n", sdias_evbuf.event_status);
+			rc = -EIO;
+			goto out;
+	}
+	TRACE("%i blocks\n", rc);
+out:
+	mutex_unlock(&sdias_mutex);
+	return rc;
+}
+
+/*
+ * Copy from HSA to absolute storage (not reentrant):
+ *
+ * @dest     : Address of buffer where data should be copied
+ * @start_blk: Start Block (beginning with 1)
+ * @nr_blks  : Number of 4K blocks to copy
+ *
+ * Return Value: 0 : Requested 'number' of blocks of data copied
+ *		 <0: ERROR - negative event status
+ */
+int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
+{
+	struct sclp_req request;
+	int rc;
+
+	mutex_lock(&sdias_mutex);
+
+	memset(&sccb, 0, sizeof(sccb));
+	memset(&request, 0, sizeof(request));
+
+	sccb.hdr.length = sizeof(sccb);
+	sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
+	sccb.evbuf.hdr.type = EVTYP_SDIAS;
+	sccb.evbuf.hdr.flags = 0;
+	sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA;
+	sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
+	sccb.evbuf.event_id = 4712;
+	sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64;
+	sccb.evbuf.event_status = 0;
+	sccb.evbuf.blk_cnt = nr_blks;
+	sccb.evbuf.asa = (unsigned long)dest;
+	sccb.evbuf.fbn = start_blk;
+	sccb.evbuf.lbn = 0;
+	sccb.evbuf.dbs = 1;
+
+	request.sccb	 = &sccb;
+	request.command  = SCLP_CMDW_WRITE_EVENT_DATA;
+	request.status	 = SCLP_REQ_FILLED;
+	request.callback = sdias_callback;
+
+	rc = sdias_sclp_send(&request);
+	if (rc) {
+		pr_err("sclp_send failed: %x\n", rc);
+		goto out;
+	}
+	if (sccb.hdr.response_code != 0x0020) {
+		TRACE("copy failed: %x\n", sccb.hdr.response_code);
+		rc = -EIO;
+		goto out;
+	}
+
+	switch (sdias_evbuf.event_status) {
+	case SDIAS_EVSTATE_ALL_STORED:
+		TRACE("all stored\n");
+		break;
+	case SDIAS_EVSTATE_PART_STORED:
+		TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
+		break;
+	case SDIAS_EVSTATE_NO_DATA:
+		TRACE("no data\n");
+		/* fall through */
+	default:
+		pr_err("Error from SCLP while copying hsa. Event status = %x\n",
+		       sdias_evbuf.event_status);
+		rc = -EIO;
+	}
+out:
+	mutex_unlock(&sdias_mutex);
+	return rc;
+}
+
+static int __init sclp_sdias_register_check(void)
+{
+	int rc;
+
+	rc = sclp_register(&sclp_sdias_register);
+	if (rc)
+		return rc;
+	if (sclp_sdias_blk_count() == 0) {
+		sclp_unregister(&sclp_sdias_register);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static int __init sclp_sdias_init_sync(void)
+{
+	TRACE("Try synchronous mode\n");
+	sclp_sdias_register.receive_mask = 0;
+	sclp_sdias_register.receiver_fn = NULL;
+	return sclp_sdias_register_check();
+}
+
+static int __init sclp_sdias_init_async(void)
+{
+	TRACE("Try asynchronous mode\n");
+	sclp_sdias_register.receive_mask = EVTYP_SDIAS_MASK;
+	sclp_sdias_register.receiver_fn = sclp_sdias_receiver_fn;
+	return sclp_sdias_register_check();
+}
+
+int __init sclp_sdias_init(void)
+{
+	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+		return 0;
+	sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
+	debug_register_view(sdias_dbf, &debug_sprintf_view);
+	debug_set_level(sdias_dbf, 6);
+	if (sclp_sdias_init_sync() == 0)
+		goto out;
+	if (sclp_sdias_init_async() == 0)
+		goto out;
+	TRACE("init failed\n");
+	return -ENODEV;
+out:
+	TRACE("init done\n");
+	return 0;
+}
+
+void __exit sclp_sdias_exit(void)
+{
+	debug_unregister(sdias_dbf);
+	sclp_unregister(&sclp_sdias_register);
+}
diff --git a/drivers/s390/char/sclp_sdias.h b/drivers/s390/char/sclp_sdias.h
new file mode 100644
index 0000000..bc36cf8
--- /dev/null
+++ b/drivers/s390/char/sclp_sdias.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SCLP "store data in absolute storage"
+ *
+ * Copyright IBM Corp. 2003, 2013
+ */
+
+#ifndef SCLP_SDIAS_H
+#define SCLP_SDIAS_H
+
+#include "sclp.h"
+
+#define SDIAS_EQ_STORE_DATA		0x0
+#define SDIAS_EQ_SIZE			0x1
+#define SDIAS_DI_FCP_DUMP		0x0
+#define SDIAS_ASA_SIZE_32		0x0
+#define SDIAS_ASA_SIZE_64		0x1
+#define SDIAS_EVSTATE_ALL_STORED	0x0
+#define SDIAS_EVSTATE_NO_DATA		0x3
+#define SDIAS_EVSTATE_PART_STORED	0x10
+
+struct sdias_evbuf {
+	struct	evbuf_header hdr;
+	u8	event_qual;
+	u8	data_id;
+	u64	reserved2;
+	u32	event_id;
+	u16	reserved3;
+	u8	asa_size;
+	u8	event_status;
+	u32	reserved4;
+	u32	blk_cnt;
+	u64	asa;
+	u32	reserved5;
+	u32	fbn;
+	u32	reserved6;
+	u32	lbn;
+	u16	reserved7;
+	u16	dbs;
+} __packed;
+
+struct sdias_sccb {
+	struct sccb_header	hdr;
+	struct sdias_evbuf	evbuf;
+} __packed;
+
+#endif /* SCLP_SDIAS_H */
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
new file mode 100644
index 0000000..5aff8b6
--- /dev/null
+++ b/drivers/s390/char/sclp_tty.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    SCLP line mode terminal driver.
+ *
+ *  S390 version
+ *    Copyright IBM Corp. 1999
+ *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kmod.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/gfp.h>
+#include <linux/uaccess.h>
+
+#include "ctrlchar.h"
+#include "sclp.h"
+#include "sclp_rw.h"
+#include "sclp_tty.h"
+
+/*
+ * size of a buffer that collects single characters coming in
+ * via sclp_tty_put_char()
+ */
+#define SCLP_TTY_BUF_SIZE 512
+
+/*
+ * There is exactly one SCLP terminal, so we can keep things simple
+ * and allocate all variables statically.
+ */
+
+/* Lock to guard over changes to global variables. */
+static spinlock_t sclp_tty_lock;
+/* List of free pages that can be used for console output buffering. */
+static struct list_head sclp_tty_pages;
+/* List of full struct sclp_buffer structures ready for output. */
+static struct list_head sclp_tty_outqueue;
+/* Counter how many buffers are emitted. */
+static int sclp_tty_buffer_count;
+/* Pointer to current console buffer. */
+static struct sclp_buffer *sclp_ttybuf;
+/* Timer for delayed output of console messages. */
+static struct timer_list sclp_tty_timer;
+
+static struct tty_port sclp_port;
+static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE];
+static unsigned short int sclp_tty_chars_count;
+
+struct tty_driver *sclp_tty_driver;
+
+static int sclp_tty_tolower;
+static int sclp_tty_columns = 80;
+
+#define SPACES_PER_TAB 8
+#define CASE_DELIMITER 0x6c /* to separate upper and lower case (% in EBCDIC) */
+
+/* This routine is called whenever we try to open a SCLP terminal. */
+static int
+sclp_tty_open(struct tty_struct *tty, struct file *filp)
+{
+	tty_port_tty_set(&sclp_port, tty);
+	tty->driver_data = NULL;
+	sclp_port.low_latency = 0;
+	return 0;
+}
+
+/* This routine is called when the SCLP terminal is closed. */
+static void
+sclp_tty_close(struct tty_struct *tty, struct file *filp)
+{
+	if (tty->count > 1)
+		return;
+	tty_port_tty_set(&sclp_port, NULL);
+}
+
+/*
+ * This routine returns the numbers of characters the tty driver
+ * will accept for queuing to be written.  This number is subject
+ * to change as output buffers get emptied, or if the output flow
+ * control is acted. This is not an exact number because not every
+ * character needs the same space in the sccb. The worst case is
+ * a string of newlines. Every newline creates a new message which
+ * needs 82 bytes.
+ */
+static int
+sclp_tty_write_room (struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct list_head *l;
+	int count;
+
+	spin_lock_irqsave(&sclp_tty_lock, flags);
+	count = 0;
+	if (sclp_ttybuf != NULL)
+		count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct msg_buf);
+	list_for_each(l, &sclp_tty_pages)
+		count += NR_EMPTY_MSG_PER_SCCB;
+	spin_unlock_irqrestore(&sclp_tty_lock, flags);
+	return count;
+}
+
+static void
+sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
+{
+	unsigned long flags;
+	void *page;
+
+	do {
+		page = sclp_unmake_buffer(buffer);
+		spin_lock_irqsave(&sclp_tty_lock, flags);
+		/* Remove buffer from outqueue */
+		list_del(&buffer->list);
+		sclp_tty_buffer_count--;
+		list_add_tail((struct list_head *) page, &sclp_tty_pages);
+		/* Check if there is a pending buffer on the out queue. */
+		buffer = NULL;
+		if (!list_empty(&sclp_tty_outqueue))
+			buffer = list_entry(sclp_tty_outqueue.next,
+					    struct sclp_buffer, list);
+		spin_unlock_irqrestore(&sclp_tty_lock, flags);
+	} while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback));
+
+	tty_port_tty_wakeup(&sclp_port);
+}
+
+static inline void
+__sclp_ttybuf_emit(struct sclp_buffer *buffer)
+{
+	unsigned long flags;
+	int count;
+	int rc;
+
+	spin_lock_irqsave(&sclp_tty_lock, flags);
+	list_add_tail(&buffer->list, &sclp_tty_outqueue);
+	count = sclp_tty_buffer_count++;
+	spin_unlock_irqrestore(&sclp_tty_lock, flags);
+	if (count)
+		return;
+	rc = sclp_emit_buffer(buffer, sclp_ttybuf_callback);
+	if (rc)
+		sclp_ttybuf_callback(buffer, rc);
+}
+
+/*
+ * When this routine is called from the timer then we flush the
+ * temporary write buffer.
+ */
+static void
+sclp_tty_timeout(struct timer_list *unused)
+{
+	unsigned long flags;
+	struct sclp_buffer *buf;
+
+	spin_lock_irqsave(&sclp_tty_lock, flags);
+	buf = sclp_ttybuf;
+	sclp_ttybuf = NULL;
+	spin_unlock_irqrestore(&sclp_tty_lock, flags);
+
+	if (buf != NULL) {
+		__sclp_ttybuf_emit(buf);
+	}
+}
+
+/*
+ * Write a string to the sclp tty.
+ */
+static int sclp_tty_write_string(const unsigned char *str, int count, int may_fail)
+{
+	unsigned long flags;
+	void *page;
+	int written;
+	int overall_written;
+	struct sclp_buffer *buf;
+
+	if (count <= 0)
+		return 0;
+	overall_written = 0;
+	spin_lock_irqsave(&sclp_tty_lock, flags);
+	do {
+		/* Create a sclp output buffer if none exists yet */
+		if (sclp_ttybuf == NULL) {
+			while (list_empty(&sclp_tty_pages)) {
+				spin_unlock_irqrestore(&sclp_tty_lock, flags);
+				if (may_fail)
+					goto out;
+				else
+					sclp_sync_wait();
+				spin_lock_irqsave(&sclp_tty_lock, flags);
+			}
+			page = sclp_tty_pages.next;
+			list_del((struct list_head *) page);
+			sclp_ttybuf = sclp_make_buffer(page, sclp_tty_columns,
+						       SPACES_PER_TAB);
+		}
+		/* try to write the string to the current output buffer */
+		written = sclp_write(sclp_ttybuf, str, count);
+		overall_written += written;
+		if (written == count)
+			break;
+		/*
+		 * Not all characters could be written to the current
+		 * output buffer. Emit the buffer, create a new buffer
+		 * and then output the rest of the string.
+		 */
+		buf = sclp_ttybuf;
+		sclp_ttybuf = NULL;
+		spin_unlock_irqrestore(&sclp_tty_lock, flags);
+		__sclp_ttybuf_emit(buf);
+		spin_lock_irqsave(&sclp_tty_lock, flags);
+		str += written;
+		count -= written;
+	} while (count > 0);
+	/* Setup timer to output current console buffer after 1/10 second */
+	if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) &&
+	    !timer_pending(&sclp_tty_timer)) {
+		mod_timer(&sclp_tty_timer, jiffies + HZ / 10);
+	}
+	spin_unlock_irqrestore(&sclp_tty_lock, flags);
+out:
+	return overall_written;
+}
+
+/*
+ * This routine is called by the kernel to write a series of characters to the
+ * tty device. The characters may come from user space or kernel space. This
+ * routine will return the number of characters actually accepted for writing.
+ */
+static int
+sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	if (sclp_tty_chars_count > 0) {
+		sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
+		sclp_tty_chars_count = 0;
+	}
+	return sclp_tty_write_string(buf, count, 1);
+}
+
+/*
+ * This routine is called by the kernel to write a single character to the tty
+ * device. If the kernel uses this routine, it must call the flush_chars()
+ * routine (if defined) when it is done stuffing characters into the driver.
+ *
+ * Characters provided to sclp_tty_put_char() are buffered by the SCLP driver.
+ * If the given character is a '\n' the contents of the SCLP write buffer
+ * - including previous characters from sclp_tty_put_char() and strings from
+ * sclp_write() without final '\n' - will be written.
+ */
+static int
+sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	sclp_tty_chars[sclp_tty_chars_count++] = ch;
+	if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
+		sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
+		sclp_tty_chars_count = 0;
+	}
+	return 1;
+}
+
+/*
+ * This routine is called by the kernel after it has written a series of
+ * characters to the tty device using put_char().
+ */
+static void
+sclp_tty_flush_chars(struct tty_struct *tty)
+{
+	if (sclp_tty_chars_count > 0) {
+		sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
+		sclp_tty_chars_count = 0;
+	}
+}
+
+/*
+ * This routine returns the number of characters in the write buffer of the
+ * SCLP driver. The provided number includes all characters that are stored
+ * in the SCCB (will be written next time the SCLP is not busy) as well as
+ * characters in the write buffer (will not be written as long as there is a
+ * final line feed missing).
+ */
+static int
+sclp_tty_chars_in_buffer(struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct list_head *l;
+	struct sclp_buffer *t;
+	int count;
+
+	spin_lock_irqsave(&sclp_tty_lock, flags);
+	count = 0;
+	if (sclp_ttybuf != NULL)
+		count = sclp_chars_in_buffer(sclp_ttybuf);
+	list_for_each(l, &sclp_tty_outqueue) {
+		t = list_entry(l, struct sclp_buffer, list);
+		count += sclp_chars_in_buffer(t);
+	}
+	spin_unlock_irqrestore(&sclp_tty_lock, flags);
+	return count;
+}
+
+/*
+ * removes all content from buffers of low level driver
+ */
+static void
+sclp_tty_flush_buffer(struct tty_struct *tty)
+{
+	if (sclp_tty_chars_count > 0) {
+		sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
+		sclp_tty_chars_count = 0;
+	}
+}
+
+/*
+ * push input to tty
+ */
+static void
+sclp_tty_input(unsigned char* buf, unsigned int count)
+{
+	struct tty_struct *tty = tty_port_tty_get(&sclp_port);
+	unsigned int cchar;
+
+	/*
+	 * If this tty driver is currently closed
+	 * then throw the received input away.
+	 */
+	if (tty == NULL)
+		return;
+	cchar = ctrlchar_handle(buf, count, tty);
+	switch (cchar & CTRLCHAR_MASK) {
+	case CTRLCHAR_SYSRQ:
+		break;
+	case CTRLCHAR_CTRL:
+		tty_insert_flip_char(&sclp_port, cchar, TTY_NORMAL);
+		tty_flip_buffer_push(&sclp_port);
+		break;
+	case CTRLCHAR_NONE:
+		/* send (normal) input to line discipline */
+		if (count < 2 ||
+		    (strncmp((const char *) buf + count - 2, "^n", 2) &&
+		     strncmp((const char *) buf + count - 2, "\252n", 2))) {
+			/* add the auto \n */
+			tty_insert_flip_string(&sclp_port, buf, count);
+			tty_insert_flip_char(&sclp_port, '\n', TTY_NORMAL);
+		} else
+			tty_insert_flip_string(&sclp_port, buf, count - 2);
+		tty_flip_buffer_push(&sclp_port);
+		break;
+	}
+	tty_kref_put(tty);
+}
+
+/*
+ * get a EBCDIC string in upper/lower case,
+ * find out characters in lower/upper case separated by a special character,
+ * modifiy original string,
+ * returns length of resulting string
+ */
+static int sclp_switch_cases(unsigned char *buf, int count)
+{
+	unsigned char *ip, *op;
+	int toggle;
+
+	/* initially changing case is off */
+	toggle = 0;
+	ip = op = buf;
+	while (count-- > 0) {
+		/* compare with special character */
+		if (*ip == CASE_DELIMITER) {
+			/* followed by another special character? */
+			if (count && ip[1] == CASE_DELIMITER) {
+				/*
+				 * ... then put a single copy of the special
+				 * character to the output string
+				 */
+				*op++ = *ip++;
+				count--;
+			} else
+				/*
+				 * ... special character follower by a normal
+				 * character toggles the case change behaviour
+				 */
+				toggle = ~toggle;
+			/* skip special character */
+			ip++;
+		} else
+			/* not the special character */
+			if (toggle)
+				/* but case switching is on */
+				if (sclp_tty_tolower)
+					/* switch to uppercase */
+					*op++ = _ebc_toupper[(int) *ip++];
+				else
+					/* switch to lowercase */
+					*op++ = _ebc_tolower[(int) *ip++];
+			else
+				/* no case switching, copy the character */
+				*op++ = *ip++;
+	}
+	/* return length of reformatted string. */
+	return op - buf;
+}
+
+static void sclp_get_input(struct gds_subvector *sv)
+{
+	unsigned char *str;
+	int count;
+
+	str = (unsigned char *) (sv + 1);
+	count = sv->length - sizeof(*sv);
+	if (sclp_tty_tolower)
+		EBC_TOLOWER(str, count);
+	count = sclp_switch_cases(str, count);
+	/* convert EBCDIC to ASCII (modify original input in SCCB) */
+	sclp_ebcasc_str(str, count);
+
+	/* transfer input to high level driver */
+	sclp_tty_input(str, count);
+}
+
+static inline void sclp_eval_selfdeftextmsg(struct gds_subvector *sv)
+{
+	void *end;
+
+	end = (void *) sv + sv->length;
+	for (sv = sv + 1; (void *) sv < end; sv = (void *) sv + sv->length)
+		if (sv->key == 0x30)
+			sclp_get_input(sv);
+}
+
+static inline void sclp_eval_textcmd(struct gds_vector *v)
+{
+	struct gds_subvector *sv;
+	void *end;
+
+	end = (void *) v + v->length;
+	for (sv = (struct gds_subvector *) (v + 1);
+	     (void *) sv < end; sv = (void *) sv + sv->length)
+		if (sv->key == GDS_KEY_SELFDEFTEXTMSG)
+			sclp_eval_selfdeftextmsg(sv);
+
+}
+
+static inline void sclp_eval_cpmsu(struct gds_vector *v)
+{
+	void *end;
+
+	end = (void *) v + v->length;
+	for (v = v + 1; (void *) v < end; v = (void *) v + v->length)
+		if (v->gds_id == GDS_ID_TEXTCMD)
+			sclp_eval_textcmd(v);
+}
+
+
+static inline void sclp_eval_mdsmu(struct gds_vector *v)
+{
+	v = sclp_find_gds_vector(v + 1, (void *) v + v->length, GDS_ID_CPMSU);
+	if (v)
+		sclp_eval_cpmsu(v);
+}
+
+static void sclp_tty_receiver(struct evbuf_header *evbuf)
+{
+	struct gds_vector *v;
+
+	v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
+				 GDS_ID_MDSMU);
+	if (v)
+		sclp_eval_mdsmu(v);
+}
+
+static void
+sclp_tty_state_change(struct sclp_register *reg)
+{
+}
+
+static struct sclp_register sclp_input_event =
+{
+	.receive_mask = EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK,
+	.state_change_fn = sclp_tty_state_change,
+	.receiver_fn = sclp_tty_receiver
+};
+
+static const struct tty_operations sclp_ops = {
+	.open = sclp_tty_open,
+	.close = sclp_tty_close,
+	.write = sclp_tty_write,
+	.put_char = sclp_tty_put_char,
+	.flush_chars = sclp_tty_flush_chars,
+	.write_room = sclp_tty_write_room,
+	.chars_in_buffer = sclp_tty_chars_in_buffer,
+	.flush_buffer = sclp_tty_flush_buffer,
+};
+
+static int __init
+sclp_tty_init(void)
+{
+	struct tty_driver *driver;
+	void *page;
+	int i;
+	int rc;
+
+	/* z/VM multiplexes the line mode output on the 32xx screen */
+	if (MACHINE_IS_VM && !CONSOLE_IS_SCLP)
+		return 0;
+	if (!sclp.has_linemode)
+		return 0;
+	driver = alloc_tty_driver(1);
+	if (!driver)
+		return -ENOMEM;
+
+	rc = sclp_rw_init();
+	if (rc) {
+		put_tty_driver(driver);
+		return rc;
+	}
+	/* Allocate pages for output buffering */
+	INIT_LIST_HEAD(&sclp_tty_pages);
+	for (i = 0; i < MAX_KMEM_PAGES; i++) {
+		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		if (page == NULL) {
+			put_tty_driver(driver);
+			return -ENOMEM;
+		}
+		list_add_tail((struct list_head *) page, &sclp_tty_pages);
+	}
+	INIT_LIST_HEAD(&sclp_tty_outqueue);
+	spin_lock_init(&sclp_tty_lock);
+	timer_setup(&sclp_tty_timer, sclp_tty_timeout, 0);
+	sclp_ttybuf = NULL;
+	sclp_tty_buffer_count = 0;
+	if (MACHINE_IS_VM) {
+		/*
+		 * save 4 characters for the CPU number
+		 * written at start of each line by VM/CP
+		 */
+		sclp_tty_columns = 76;
+		/* case input lines to lowercase */
+		sclp_tty_tolower = 1;
+	}
+	sclp_tty_chars_count = 0;
+
+	rc = sclp_register(&sclp_input_event);
+	if (rc) {
+		put_tty_driver(driver);
+		return rc;
+	}
+
+	tty_port_init(&sclp_port);
+
+	driver->driver_name = "sclp_line";
+	driver->name = "sclp_line";
+	driver->major = TTY_MAJOR;
+	driver->minor_start = 64;
+	driver->type = TTY_DRIVER_TYPE_SYSTEM;
+	driver->subtype = SYSTEM_TYPE_TTY;
+	driver->init_termios = tty_std_termios;
+	driver->init_termios.c_iflag = IGNBRK | IGNPAR;
+	driver->init_termios.c_oflag = ONLCR;
+	driver->init_termios.c_lflag = ISIG | ECHO;
+	driver->flags = TTY_DRIVER_REAL_RAW;
+	tty_set_operations(driver, &sclp_ops);
+	tty_port_link_device(&sclp_port, driver, 0);
+	rc = tty_register_driver(driver);
+	if (rc) {
+		put_tty_driver(driver);
+		tty_port_destroy(&sclp_port);
+		return rc;
+	}
+	sclp_tty_driver = driver;
+	return 0;
+}
+device_initcall(sclp_tty_init);
diff --git a/drivers/s390/char/sclp_tty.h b/drivers/s390/char/sclp_tty.h
new file mode 100644
index 0000000..0fa2d59
--- /dev/null
+++ b/drivers/s390/char/sclp_tty.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    interface to the SCLP-read/write driver
+ *
+ *  S390 version
+ *    Copyright IBM Corp. 1999
+ *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __SCLP_TTY_H__
+#define __SCLP_TTY_H__
+
+#include <linux/tty_driver.h>
+
+extern struct tty_driver *sclp_tty_driver;
+
+#endif	/* __SCLP_TTY_H__ */
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
new file mode 100644
index 0000000..3f9a6ef
--- /dev/null
+++ b/drivers/s390/char/sclp_vt220.c
@@ -0,0 +1,898 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCLP VT220 terminal driver.
+ *
+ * Copyright IBM Corp. 2003, 2009
+ *
+ * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/kdev_t.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+
+#include <linux/uaccess.h>
+#include "sclp.h"
+#include "ctrlchar.h"
+
+#define SCLP_VT220_MAJOR		TTY_MAJOR
+#define SCLP_VT220_MINOR		65
+#define SCLP_VT220_DRIVER_NAME		"sclp_vt220"
+#define SCLP_VT220_DEVICE_NAME		"ttysclp"
+#define SCLP_VT220_CONSOLE_NAME		"ttyS"
+#define SCLP_VT220_CONSOLE_INDEX	1	/* console=ttyS1 */
+
+/* Representation of a single write request */
+struct sclp_vt220_request {
+	struct list_head list;
+	struct sclp_req sclp_req;
+	int retry_count;
+};
+
+/* VT220 SCCB */
+struct sclp_vt220_sccb {
+	struct sccb_header header;
+	struct evbuf_header evbuf;
+};
+
+#define SCLP_VT220_MAX_CHARS_PER_BUFFER	(PAGE_SIZE - \
+					 sizeof(struct sclp_vt220_request) - \
+					 sizeof(struct sclp_vt220_sccb))
+
+/* Structures and data needed to register tty driver */
+static struct tty_driver *sclp_vt220_driver;
+
+static struct tty_port sclp_vt220_port;
+
+/* Lock to protect internal data from concurrent access */
+static spinlock_t sclp_vt220_lock;
+
+/* List of empty pages to be used as write request buffers */
+static struct list_head sclp_vt220_empty;
+
+/* List of pending requests */
+static struct list_head sclp_vt220_outqueue;
+
+/* Suspend mode flag */
+static int sclp_vt220_suspended;
+
+/* Flag that output queue is currently running */
+static int sclp_vt220_queue_running;
+
+/* Timer used for delaying write requests to merge subsequent messages into
+ * a single buffer */
+static struct timer_list sclp_vt220_timer;
+
+/* Pointer to current request buffer which has been partially filled but not
+ * yet sent */
+static struct sclp_vt220_request *sclp_vt220_current_request;
+
+/* Number of characters in current request buffer */
+static int sclp_vt220_buffered_chars;
+
+/* Counter controlling core driver initialization. */
+static int __initdata sclp_vt220_init_count;
+
+/* Flag indicating that sclp_vt220_current_request should really
+ * have been already queued but wasn't because the SCLP was processing
+ * another buffer */
+static int sclp_vt220_flush_later;
+
+static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
+static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
+				   enum sclp_pm_event sclp_pm_event);
+static int __sclp_vt220_emit(struct sclp_vt220_request *request);
+static void sclp_vt220_emit_current(void);
+
+/* Registration structure for SCLP output event buffers */
+static struct sclp_register sclp_vt220_register = {
+	.send_mask		= EVTYP_VT220MSG_MASK,
+	.pm_event_fn		= sclp_vt220_pm_event_fn,
+};
+
+/* Registration structure for SCLP input event buffers */
+static struct sclp_register sclp_vt220_register_input = {
+	.receive_mask		= EVTYP_VT220MSG_MASK,
+	.receiver_fn		= sclp_vt220_receiver_fn,
+};
+
+
+/*
+ * Put provided request buffer back into queue and check emit pending
+ * buffers if necessary.
+ */
+static void
+sclp_vt220_process_queue(struct sclp_vt220_request *request)
+{
+	unsigned long flags;
+	void *page;
+
+	do {
+		/* Put buffer back to list of empty buffers */
+		page = request->sclp_req.sccb;
+		spin_lock_irqsave(&sclp_vt220_lock, flags);
+		/* Move request from outqueue to empty queue */
+		list_del(&request->list);
+		list_add_tail((struct list_head *) page, &sclp_vt220_empty);
+		/* Check if there is a pending buffer on the out queue. */
+		request = NULL;
+		if (!list_empty(&sclp_vt220_outqueue))
+			request = list_entry(sclp_vt220_outqueue.next,
+					     struct sclp_vt220_request, list);
+		if (!request || sclp_vt220_suspended) {
+			sclp_vt220_queue_running = 0;
+			spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	} while (__sclp_vt220_emit(request));
+	if (request == NULL && sclp_vt220_flush_later)
+		sclp_vt220_emit_current();
+	tty_port_tty_wakeup(&sclp_vt220_port);
+}
+
+#define SCLP_BUFFER_MAX_RETRY		1
+
+/*
+ * Callback through which the result of a write request is reported by the
+ * SCLP.
+ */
+static void
+sclp_vt220_callback(struct sclp_req *request, void *data)
+{
+	struct sclp_vt220_request *vt220_request;
+	struct sclp_vt220_sccb *sccb;
+
+	vt220_request = (struct sclp_vt220_request *) data;
+	if (request->status == SCLP_REQ_FAILED) {
+		sclp_vt220_process_queue(vt220_request);
+		return;
+	}
+	sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
+
+	/* Check SCLP response code and choose suitable action	*/
+	switch (sccb->header.response_code) {
+	case 0x0020 :
+		break;
+
+	case 0x05f0: /* Target resource in improper state */
+		break;
+
+	case 0x0340: /* Contained SCLP equipment check */
+		if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
+			break;
+		/* Remove processed buffers and requeue rest */
+		if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
+			/* Not all buffers were processed */
+			sccb->header.response_code = 0x0000;
+			vt220_request->sclp_req.status = SCLP_REQ_FILLED;
+			if (sclp_add_request(request) == 0)
+				return;
+		}
+		break;
+
+	case 0x0040: /* SCLP equipment check */
+		if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
+			break;
+		sccb->header.response_code = 0x0000;
+		vt220_request->sclp_req.status = SCLP_REQ_FILLED;
+		if (sclp_add_request(request) == 0)
+			return;
+		break;
+
+	default:
+		break;
+	}
+	sclp_vt220_process_queue(vt220_request);
+}
+
+/*
+ * Emit vt220 request buffer to SCLP. Return zero on success, non-zero
+ * otherwise.
+ */
+static int
+__sclp_vt220_emit(struct sclp_vt220_request *request)
+{
+	request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
+	request->sclp_req.status = SCLP_REQ_FILLED;
+	request->sclp_req.callback = sclp_vt220_callback;
+	request->sclp_req.callback_data = (void *) request;
+
+	return sclp_add_request(&request->sclp_req);
+}
+
+/*
+ * Queue and emit current request.
+ */
+static void
+sclp_vt220_emit_current(void)
+{
+	unsigned long flags;
+	struct sclp_vt220_request *request;
+	struct sclp_vt220_sccb *sccb;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	if (sclp_vt220_current_request) {
+		sccb = (struct sclp_vt220_sccb *) 
+				sclp_vt220_current_request->sclp_req.sccb;
+		/* Only emit buffers with content */
+		if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
+			list_add_tail(&sclp_vt220_current_request->list,
+				      &sclp_vt220_outqueue);
+			sclp_vt220_current_request = NULL;
+			if (timer_pending(&sclp_vt220_timer))
+				del_timer(&sclp_vt220_timer);
+		}
+		sclp_vt220_flush_later = 0;
+	}
+	if (sclp_vt220_queue_running || sclp_vt220_suspended)
+		goto out_unlock;
+	if (list_empty(&sclp_vt220_outqueue))
+		goto out_unlock;
+	request = list_first_entry(&sclp_vt220_outqueue,
+				   struct sclp_vt220_request, list);
+	sclp_vt220_queue_running = 1;
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+
+	if (__sclp_vt220_emit(request))
+		sclp_vt220_process_queue(request);
+	return;
+out_unlock:
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+}
+
+#define SCLP_NORMAL_WRITE	0x00
+
+/*
+ * Helper function to initialize a page with the sclp request structure.
+ */
+static struct sclp_vt220_request *
+sclp_vt220_initialize_page(void *page)
+{
+	struct sclp_vt220_request *request;
+	struct sclp_vt220_sccb *sccb;
+
+	/* Place request structure at end of page */
+	request = ((struct sclp_vt220_request *)
+			((addr_t) page + PAGE_SIZE)) - 1;
+	request->retry_count = 0;
+	request->sclp_req.sccb = page;
+	/* SCCB goes at start of page */
+	sccb = (struct sclp_vt220_sccb *) page;
+	memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
+	sccb->header.length = sizeof(struct sclp_vt220_sccb);
+	sccb->header.function_code = SCLP_NORMAL_WRITE;
+	sccb->header.response_code = 0x0000;
+	sccb->evbuf.type = EVTYP_VT220MSG;
+	sccb->evbuf.length = sizeof(struct evbuf_header);
+
+	return request;
+}
+
+static inline unsigned int
+sclp_vt220_space_left(struct sclp_vt220_request *request)
+{
+	struct sclp_vt220_sccb *sccb;
+	sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
+	return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
+	       sccb->header.length;
+}
+
+static inline unsigned int
+sclp_vt220_chars_stored(struct sclp_vt220_request *request)
+{
+	struct sclp_vt220_sccb *sccb;
+	sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
+	return sccb->evbuf.length - sizeof(struct evbuf_header);
+}
+
+/*
+ * Add msg to buffer associated with request. Return the number of characters
+ * added.
+ */
+static int
+sclp_vt220_add_msg(struct sclp_vt220_request *request,
+		   const unsigned char *msg, int count, int convertlf)
+{
+	struct sclp_vt220_sccb *sccb;
+	void *buffer;
+	unsigned char c;
+	int from;
+	int to;
+
+	if (count > sclp_vt220_space_left(request))
+		count = sclp_vt220_space_left(request);
+	if (count <= 0)
+		return 0;
+
+	sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
+	buffer = (void *) ((addr_t) sccb + sccb->header.length);
+
+	if (convertlf) {
+		/* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
+		for (from=0, to=0;
+		     (from < count) && (to < sclp_vt220_space_left(request));
+		     from++) {
+			/* Retrieve character */
+			c = msg[from];
+			/* Perform conversion */
+			if (c == 0x0a) {
+				if (to + 1 < sclp_vt220_space_left(request)) {
+					((unsigned char *) buffer)[to++] = c;
+					((unsigned char *) buffer)[to++] = 0x0d;
+				} else
+					break;
+
+			} else
+				((unsigned char *) buffer)[to++] = c;
+		}
+		sccb->header.length += to;
+		sccb->evbuf.length += to;
+		return from;
+	} else {
+		memcpy(buffer, (const void *) msg, count);
+		sccb->header.length += count;
+		sccb->evbuf.length += count;
+		return count;
+	}
+}
+
+/*
+ * Emit buffer after having waited long enough for more data to arrive.
+ */
+static void
+sclp_vt220_timeout(struct timer_list *unused)
+{
+	sclp_vt220_emit_current();
+}
+
+#define BUFFER_MAX_DELAY	HZ/20
+
+/*
+ * Drop oldest console buffer if sclp_con_drop is set
+ */
+static int
+sclp_vt220_drop_buffer(void)
+{
+	struct list_head *list;
+	struct sclp_vt220_request *request;
+	void *page;
+
+	if (!sclp_console_drop)
+		return 0;
+	list = sclp_vt220_outqueue.next;
+	if (sclp_vt220_queue_running)
+		/* The first element is in I/O */
+		list = list->next;
+	if (list == &sclp_vt220_outqueue)
+		return 0;
+	list_del(list);
+	request = list_entry(list, struct sclp_vt220_request, list);
+	page = request->sclp_req.sccb;
+	list_add_tail((struct list_head *) page, &sclp_vt220_empty);
+	return 1;
+}
+
+/* 
+ * Internal implementation of the write function. Write COUNT bytes of data
+ * from memory at BUF
+ * to the SCLP interface. In case that the data does not fit into the current
+ * write buffer, emit the current one and allocate a new one. If there are no
+ * more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
+ * is non-zero, the buffer will be scheduled for emitting after a timeout -
+ * otherwise the user has to explicitly call the flush function.
+ * A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
+ * buffer should be converted to 0x0a 0x0d. After completion, return the number
+ * of bytes written.
+ */
+static int
+__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
+		   int convertlf, int may_fail)
+{
+	unsigned long flags;
+	void *page;
+	int written;
+	int overall_written;
+
+	if (count <= 0)
+		return 0;
+	overall_written = 0;
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	do {
+		/* Create an sclp output buffer if none exists yet */
+		if (sclp_vt220_current_request == NULL) {
+			if (list_empty(&sclp_vt220_empty))
+				sclp_console_full++;
+			while (list_empty(&sclp_vt220_empty)) {
+				if (may_fail || sclp_vt220_suspended)
+					goto out;
+				if (sclp_vt220_drop_buffer())
+					break;
+				spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+
+				sclp_sync_wait();
+				spin_lock_irqsave(&sclp_vt220_lock, flags);
+			}
+			page = (void *) sclp_vt220_empty.next;
+			list_del((struct list_head *) page);
+			sclp_vt220_current_request =
+				sclp_vt220_initialize_page(page);
+		}
+		/* Try to write the string to the current request buffer */
+		written = sclp_vt220_add_msg(sclp_vt220_current_request,
+					     buf, count, convertlf);
+		overall_written += written;
+		if (written == count)
+			break;
+		/*
+		 * Not all characters could be written to the current
+		 * output buffer. Emit the buffer, create a new buffer
+		 * and then output the rest of the string.
+		 */
+		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+		sclp_vt220_emit_current();
+		spin_lock_irqsave(&sclp_vt220_lock, flags);
+		buf += written;
+		count -= written;
+	} while (count > 0);
+	/* Setup timer to output current console buffer after some time */
+	if (sclp_vt220_current_request != NULL &&
+	    !timer_pending(&sclp_vt220_timer) && do_schedule) {
+		sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
+		add_timer(&sclp_vt220_timer);
+	}
+out:
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	return overall_written;
+}
+
+/*
+ * This routine is called by the kernel to write a series of
+ * characters to the tty device.  The characters may come from
+ * user space or kernel space.  This routine will return the
+ * number of characters actually accepted for writing.
+ */
+static int
+sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	return __sclp_vt220_write(buf, count, 1, 0, 1);
+}
+
+#define SCLP_VT220_SESSION_ENDED	0x01
+#define	SCLP_VT220_SESSION_STARTED	0x80
+#define SCLP_VT220_SESSION_DATA		0x00
+
+#ifdef CONFIG_MAGIC_SYSRQ
+
+static int sysrq_pressed;
+static struct sysrq_work sysrq;
+
+static void sclp_vt220_reset_session(void)
+{
+	sysrq_pressed = 0;
+}
+
+static void sclp_vt220_handle_input(const char *buffer, unsigned int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++) {
+		/* Handle magic sys request */
+		if (buffer[i] == ('O' ^ 0100)) { /* CTRL-O */
+			/*
+			 * If pressed again, reset sysrq_pressed
+			 * and flip CTRL-O character
+			 */
+			sysrq_pressed = !sysrq_pressed;
+			if (sysrq_pressed)
+				continue;
+		} else if (sysrq_pressed) {
+			sysrq.key = buffer[i];
+			schedule_sysrq_work(&sysrq);
+			sysrq_pressed = 0;
+			continue;
+		}
+		tty_insert_flip_char(&sclp_vt220_port, buffer[i], 0);
+	}
+}
+
+#else
+
+static void sclp_vt220_reset_session(void)
+{
+}
+
+static void sclp_vt220_handle_input(const char *buffer, unsigned int count)
+{
+	tty_insert_flip_string(&sclp_vt220_port, buffer, count);
+}
+
+#endif
+
+/*
+ * Called by the SCLP to report incoming event buffers.
+ */
+static void
+sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
+{
+	char *buffer;
+	unsigned int count;
+
+	buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
+	count = evbuf->length - sizeof(struct evbuf_header);
+
+	switch (*buffer) {
+	case SCLP_VT220_SESSION_ENDED:
+	case SCLP_VT220_SESSION_STARTED:
+		sclp_vt220_reset_session();
+		break;
+	case SCLP_VT220_SESSION_DATA:
+		/* Send input to line discipline */
+		buffer++;
+		count--;
+		sclp_vt220_handle_input(buffer, count);
+		tty_flip_buffer_push(&sclp_vt220_port);
+		break;
+	}
+}
+
+/*
+ * This routine is called when a particular tty device is opened.
+ */
+static int
+sclp_vt220_open(struct tty_struct *tty, struct file *filp)
+{
+	if (tty->count == 1) {
+		tty_port_tty_set(&sclp_vt220_port, tty);
+		sclp_vt220_port.low_latency = 0;
+		if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
+			tty->winsize.ws_row = 24;
+			tty->winsize.ws_col = 80;
+		}
+	}
+	return 0;
+}
+
+/*
+ * This routine is called when a particular tty device is closed.
+ */
+static void
+sclp_vt220_close(struct tty_struct *tty, struct file *filp)
+{
+	if (tty->count == 1)
+		tty_port_tty_set(&sclp_vt220_port, NULL);
+}
+
+/*
+ * This routine is called by the kernel to write a single
+ * character to the tty device.  If the kernel uses this routine,
+ * it must call the flush_chars() routine (if defined) when it is
+ * done stuffing characters into the driver.
+ */
+static int
+sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	return __sclp_vt220_write(&ch, 1, 0, 0, 1);
+}
+
+/*
+ * This routine is called by the kernel after it has written a
+ * series of characters to the tty device using put_char().  
+ */
+static void
+sclp_vt220_flush_chars(struct tty_struct *tty)
+{
+	if (!sclp_vt220_queue_running)
+		sclp_vt220_emit_current();
+	else
+		sclp_vt220_flush_later = 1;
+}
+
+/*
+ * This routine returns the numbers of characters the tty driver
+ * will accept for queuing to be written.  This number is subject
+ * to change as output buffers get emptied, or if the output flow
+ * control is acted.
+ */
+static int
+sclp_vt220_write_room(struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct list_head *l;
+	int count;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	count = 0;
+	if (sclp_vt220_current_request != NULL)
+		count = sclp_vt220_space_left(sclp_vt220_current_request);
+	list_for_each(l, &sclp_vt220_empty)
+		count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	return count;
+}
+
+/*
+ * Return number of buffered chars.
+ */
+static int
+sclp_vt220_chars_in_buffer(struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct list_head *l;
+	struct sclp_vt220_request *r;
+	int count;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	count = 0;
+	if (sclp_vt220_current_request != NULL)
+		count = sclp_vt220_chars_stored(sclp_vt220_current_request);
+	list_for_each(l, &sclp_vt220_outqueue) {
+		r = list_entry(l, struct sclp_vt220_request, list);
+		count += sclp_vt220_chars_stored(r);
+	}
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	return count;
+}
+
+/*
+ * Pass on all buffers to the hardware. Return only when there are no more
+ * buffers pending.
+ */
+static void
+sclp_vt220_flush_buffer(struct tty_struct *tty)
+{
+	sclp_vt220_emit_current();
+}
+
+/* Release allocated pages. */
+static void __init __sclp_vt220_free_pages(void)
+{
+	struct list_head *page, *p;
+
+	list_for_each_safe(page, p, &sclp_vt220_empty) {
+		list_del(page);
+		free_page((unsigned long) page);
+	}
+}
+
+/* Release memory and unregister from sclp core. Controlled by init counting -
+ * only the last invoker will actually perform these actions. */
+static void __init __sclp_vt220_cleanup(void)
+{
+	sclp_vt220_init_count--;
+	if (sclp_vt220_init_count != 0)
+		return;
+	sclp_unregister(&sclp_vt220_register);
+	__sclp_vt220_free_pages();
+	tty_port_destroy(&sclp_vt220_port);
+}
+
+/* Allocate buffer pages and register with sclp core. Controlled by init
+ * counting - only the first invoker will actually perform these actions. */
+static int __init __sclp_vt220_init(int num_pages)
+{
+	void *page;
+	int i;
+	int rc;
+
+	sclp_vt220_init_count++;
+	if (sclp_vt220_init_count != 1)
+		return 0;
+	spin_lock_init(&sclp_vt220_lock);
+	INIT_LIST_HEAD(&sclp_vt220_empty);
+	INIT_LIST_HEAD(&sclp_vt220_outqueue);
+	timer_setup(&sclp_vt220_timer, sclp_vt220_timeout, 0);
+	tty_port_init(&sclp_vt220_port);
+	sclp_vt220_current_request = NULL;
+	sclp_vt220_buffered_chars = 0;
+	sclp_vt220_flush_later = 0;
+
+	/* Allocate pages for output buffering */
+	rc = -ENOMEM;
+	for (i = 0; i < num_pages; i++) {
+		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		if (!page)
+			goto out;
+		list_add_tail(page, &sclp_vt220_empty);
+	}
+	rc = sclp_register(&sclp_vt220_register);
+out:
+	if (rc) {
+		__sclp_vt220_free_pages();
+		sclp_vt220_init_count--;
+		tty_port_destroy(&sclp_vt220_port);
+	}
+	return rc;
+}
+
+static const struct tty_operations sclp_vt220_ops = {
+	.open = sclp_vt220_open,
+	.close = sclp_vt220_close,
+	.write = sclp_vt220_write,
+	.put_char = sclp_vt220_put_char,
+	.flush_chars = sclp_vt220_flush_chars,
+	.write_room = sclp_vt220_write_room,
+	.chars_in_buffer = sclp_vt220_chars_in_buffer,
+	.flush_buffer = sclp_vt220_flush_buffer,
+};
+
+/*
+ * Register driver with SCLP and Linux and initialize internal tty structures.
+ */
+static int __init sclp_vt220_tty_init(void)
+{
+	struct tty_driver *driver;
+	int rc;
+
+	/* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
+	 * symmetry between VM and LPAR systems regarding ttyS1. */
+	driver = alloc_tty_driver(1);
+	if (!driver)
+		return -ENOMEM;
+	rc = __sclp_vt220_init(MAX_KMEM_PAGES);
+	if (rc)
+		goto out_driver;
+
+	driver->driver_name = SCLP_VT220_DRIVER_NAME;
+	driver->name = SCLP_VT220_DEVICE_NAME;
+	driver->major = SCLP_VT220_MAJOR;
+	driver->minor_start = SCLP_VT220_MINOR;
+	driver->type = TTY_DRIVER_TYPE_SYSTEM;
+	driver->subtype = SYSTEM_TYPE_TTY;
+	driver->init_termios = tty_std_termios;
+	driver->flags = TTY_DRIVER_REAL_RAW;
+	tty_set_operations(driver, &sclp_vt220_ops);
+	tty_port_link_device(&sclp_vt220_port, driver, 0);
+
+	rc = tty_register_driver(driver);
+	if (rc)
+		goto out_init;
+	rc = sclp_register(&sclp_vt220_register_input);
+	if (rc)
+		goto out_reg;
+	sclp_vt220_driver = driver;
+	return 0;
+
+out_reg:
+	tty_unregister_driver(driver);
+out_init:
+	__sclp_vt220_cleanup();
+out_driver:
+	put_tty_driver(driver);
+	return rc;
+}
+__initcall(sclp_vt220_tty_init);
+
+static void __sclp_vt220_flush_buffer(void)
+{
+	unsigned long flags;
+
+	sclp_vt220_emit_current();
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	if (timer_pending(&sclp_vt220_timer))
+		del_timer(&sclp_vt220_timer);
+	while (sclp_vt220_queue_running) {
+		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+		sclp_sync_wait();
+		spin_lock_irqsave(&sclp_vt220_lock, flags);
+	}
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+}
+
+/*
+ * Resume console: If there are cached messages, emit them.
+ */
+static void sclp_vt220_resume(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	sclp_vt220_suspended = 0;
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	sclp_vt220_emit_current();
+}
+
+/*
+ * Suspend console: Set suspend flag and flush console
+ */
+static void sclp_vt220_suspend(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sclp_vt220_lock, flags);
+	sclp_vt220_suspended = 1;
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+	__sclp_vt220_flush_buffer();
+}
+
+static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
+				   enum sclp_pm_event sclp_pm_event)
+{
+	switch (sclp_pm_event) {
+	case SCLP_PM_EVENT_FREEZE:
+		sclp_vt220_suspend();
+		break;
+	case SCLP_PM_EVENT_RESTORE:
+	case SCLP_PM_EVENT_THAW:
+		sclp_vt220_resume();
+		break;
+	}
+}
+
+#ifdef CONFIG_SCLP_VT220_CONSOLE
+
+static void
+sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
+{
+	__sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
+}
+
+static struct tty_driver *
+sclp_vt220_con_device(struct console *c, int *index)
+{
+	*index = 0;
+	return sclp_vt220_driver;
+}
+
+static int
+sclp_vt220_notify(struct notifier_block *self,
+			  unsigned long event, void *data)
+{
+	__sclp_vt220_flush_buffer();
+	return NOTIFY_OK;
+}
+
+static struct notifier_block on_panic_nb = {
+	.notifier_call = sclp_vt220_notify,
+	.priority = 1,
+};
+
+static struct notifier_block on_reboot_nb = {
+	.notifier_call = sclp_vt220_notify,
+	.priority = 1,
+};
+
+/* Structure needed to register with printk */
+static struct console sclp_vt220_console =
+{
+	.name = SCLP_VT220_CONSOLE_NAME,
+	.write = sclp_vt220_con_write,
+	.device = sclp_vt220_con_device,
+	.flags = CON_PRINTBUFFER,
+	.index = SCLP_VT220_CONSOLE_INDEX
+};
+
+static int __init
+sclp_vt220_con_init(void)
+{
+	int rc;
+
+	rc = __sclp_vt220_init(sclp_console_pages);
+	if (rc)
+		return rc;
+	/* Attach linux console */
+	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+	register_reboot_notifier(&on_reboot_nb);
+	register_console(&sclp_vt220_console);
+	return 0;
+}
+
+console_initcall(sclp_vt220_con_init);
+#endif /* CONFIG_SCLP_VT220_CONSOLE */
+
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
new file mode 100644
index 0000000..8bec5f9
--- /dev/null
+++ b/drivers/s390/char/tape.h
@@ -0,0 +1,371 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    tape device driver for 3480/3490E/3590 tapes.
+ *
+ *  S390 and zSeries version
+ *    Copyright IBM Corp. 2001, 2009
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *		 Stefan Bader <shbader@de.ibm.com>
+ */
+
+#ifndef _TAPE_H
+#define _TAPE_H
+
+#include <asm/ccwdev.h>
+#include <asm/debug.h>
+#include <asm/idals.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtio.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+struct gendisk;
+
+/*
+ * Define DBF_LIKE_HELL for lots of messages in the debug feature.
+ */
+#define DBF_LIKE_HELL
+#ifdef  DBF_LIKE_HELL
+#define DBF_LH(level, str, ...) \
+do { \
+	debug_sprintf_event(TAPE_DBF_AREA, level, str, ## __VA_ARGS__); \
+} while (0)
+#else
+#define DBF_LH(level, str, ...) do {} while(0)
+#endif
+
+/*
+ * macros s390 debug feature (dbf)
+ */
+#define DBF_EVENT(d_level, d_str...) \
+do { \
+	debug_sprintf_event(TAPE_DBF_AREA, d_level, d_str); \
+} while (0)
+
+#define DBF_EXCEPTION(d_level, d_str...) \
+do { \
+	debug_sprintf_exception(TAPE_DBF_AREA, d_level, d_str); \
+} while (0)
+
+#define TAPE_VERSION_MAJOR 2
+#define TAPE_VERSION_MINOR 0
+#define TAPE_MAGIC "tape"
+
+#define TAPE_MINORS_PER_DEV 2	    /* two minors per device */
+#define TAPEBLOCK_HSEC_SIZE	2048
+#define TAPEBLOCK_HSEC_S2B	2
+#define TAPEBLOCK_RETRIES	5
+
+enum tape_medium_state {
+	MS_UNKNOWN,
+	MS_LOADED,
+	MS_UNLOADED,
+	MS_SIZE
+};
+
+enum tape_state {
+	TS_UNUSED=0,
+	TS_IN_USE,
+	TS_BLKUSE,
+	TS_INIT,
+	TS_NOT_OPER,
+	TS_SIZE
+};
+
+enum tape_op {
+	TO_BLOCK,	/* Block read */
+	TO_BSB,		/* Backward space block */
+	TO_BSF,		/* Backward space filemark */
+	TO_DSE,		/* Data security erase */
+	TO_FSB,		/* Forward space block */
+	TO_FSF,		/* Forward space filemark */
+	TO_LBL,		/* Locate block label */
+	TO_NOP,		/* No operation */
+	TO_RBA,		/* Read backward */
+	TO_RBI,		/* Read block information */
+	TO_RFO,		/* Read forward */
+	TO_REW,		/* Rewind tape */
+	TO_RUN,		/* Rewind and unload tape */
+	TO_WRI,		/* Write block */
+	TO_WTM,		/* Write tape mark */
+	TO_MSEN,	/* Medium sense */
+	TO_LOAD,	/* Load tape */
+	TO_READ_CONFIG, /* Read configuration data */
+	TO_READ_ATTMSG, /* Read attention message */
+	TO_DIS,		/* Tape display */
+	TO_ASSIGN,	/* Assign tape to channel path */
+	TO_UNASSIGN,	/* Unassign tape from channel path */
+	TO_CRYPT_ON,	/* Enable encrpytion */
+	TO_CRYPT_OFF,	/* Disable encrpytion */
+	TO_KEKL_SET,	/* Set KEK label */
+	TO_KEKL_QUERY,	/* Query KEK label */
+	TO_RDC,		/* Read device characteristics */
+	TO_SIZE,	/* #entries in tape_op_t */
+};
+
+/* Forward declaration */
+struct tape_device;
+
+/* tape_request->status can be: */
+enum tape_request_status {
+	TAPE_REQUEST_INIT,	/* request is ready to be processed */
+	TAPE_REQUEST_QUEUED,	/* request is queued to be processed */
+	TAPE_REQUEST_IN_IO,	/* request is currently in IO */
+	TAPE_REQUEST_DONE,	/* request is completed. */
+	TAPE_REQUEST_CANCEL,	/* request should be canceled. */
+	TAPE_REQUEST_LONG_BUSY, /* request has to be restarted after long busy */
+};
+
+/* Tape CCW request */
+struct tape_request {
+	struct list_head list;		/* list head for request queueing. */
+	struct tape_device *device;	/* tape device of this request */
+	struct ccw1 *cpaddr;		/* address of the channel program. */
+	void *cpdata;			/* pointer to ccw data. */
+	enum tape_request_status status;/* status of this request */
+	int options;			/* options for execution. */
+	int retries;			/* retry counter for error recovery. */
+	int rescnt;			/* residual count from devstat. */
+	struct timer_list timer;	/* timer for std_assign_timeout(). */
+
+	/* Callback for delivering final status. */
+	void (*callback)(struct tape_request *, void *);
+	void *callback_data;
+
+	enum tape_op op;
+	int rc;
+};
+
+/* Function type for magnetic tape commands */
+typedef int (*tape_mtop_fn)(struct tape_device *, int);
+
+/* Size of the array containing the mtops for a discipline */
+#define TAPE_NR_MTOPS (MTMKPART+1)
+
+/* Tape Discipline */
+struct tape_discipline {
+	struct module *owner;
+	int  (*setup_device)(struct tape_device *);
+	void (*cleanup_device)(struct tape_device *);
+	int (*irq)(struct tape_device *, struct tape_request *, struct irb *);
+	struct tape_request *(*read_block)(struct tape_device *, size_t);
+	struct tape_request *(*write_block)(struct tape_device *, size_t);
+	void (*process_eov)(struct tape_device*);
+	/* ioctl function for additional ioctls. */
+	int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long);
+	/* Array of tape commands with TAPE_NR_MTOPS entries */
+	tape_mtop_fn *mtop_array;
+};
+
+/*
+ * The discipline irq function either returns an error code (<0) which
+ * means that the request has failed with an error or one of the following:
+ */
+#define TAPE_IO_SUCCESS		0	/* request successful */
+#define TAPE_IO_PENDING		1	/* request still running */
+#define TAPE_IO_RETRY		2	/* retry to current request */
+#define TAPE_IO_STOP		3	/* stop the running request */
+#define TAPE_IO_LONG_BUSY	4	/* delay the running request */
+
+/* Char Frontend Data */
+struct tape_char_data {
+	struct idal_buffer *idal_buf;	/* idal buffer for user char data */
+	int block_size;			/*   of size block_size. */
+};
+
+/* Tape Info */
+struct tape_device {
+	/* entry in tape_device_list */
+	struct list_head		node;
+
+	int				cdev_id;
+	struct ccw_device *		cdev;
+	struct tape_class_device *	nt;
+	struct tape_class_device *	rt;
+
+	/* Device mutex to serialize tape commands. */
+	struct mutex			mutex;
+
+	/* Device discipline information. */
+	struct tape_discipline *	discipline;
+	void *				discdata;
+
+	/* Generic status flags */
+	long				tape_generic_status;
+
+	/* Device state information. */
+	wait_queue_head_t		state_change_wq;
+	enum tape_state			tape_state;
+	enum tape_medium_state		medium_state;
+	unsigned char *			modeset_byte;
+
+	/* Reference count. */
+	atomic_t			ref_count;
+
+	/* Request queue. */
+	struct list_head		req_queue;
+
+	/* Request wait queue. */
+	wait_queue_head_t		wait_queue;
+
+	/* Each tape device has (currently) two minor numbers. */
+	int				first_minor;
+
+	/* Number of tapemarks required for correct termination. */
+	int				required_tapemarks;
+
+	/* Block ID of the BOF */
+	unsigned int			bof;
+
+	/* Character device frontend data */
+	struct tape_char_data		char_data;
+
+	/* Function to start or stop the next request later. */
+	struct delayed_work		tape_dnr;
+
+	/* Timer for long busy */
+	struct timer_list		lb_timeout;
+
+};
+
+/* Externals from tape_core.c */
+extern struct tape_request *tape_alloc_request(int cplength, int datasize);
+extern void tape_free_request(struct tape_request *);
+extern int tape_do_io(struct tape_device *, struct tape_request *);
+extern int tape_do_io_async(struct tape_device *, struct tape_request *);
+extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *);
+extern int tape_cancel_io(struct tape_device *, struct tape_request *);
+void tape_hotplug_event(struct tape_device *, int major, int action);
+
+static inline int
+tape_do_io_free(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	rc = tape_do_io(device, request);
+	tape_free_request(request);
+	return rc;
+}
+
+static inline void
+tape_do_io_async_free(struct tape_device *device, struct tape_request *request)
+{
+	request->callback = (void *) tape_free_request;
+	request->callback_data = NULL;
+	tape_do_io_async(device, request);
+}
+
+extern int tape_oper_handler(int irq, int status);
+extern void tape_noper_handler(int irq, int status);
+extern int tape_open(struct tape_device *);
+extern int tape_release(struct tape_device *);
+extern int tape_mtop(struct tape_device *, int, int);
+extern void tape_state_set(struct tape_device *, enum tape_state);
+
+extern int tape_generic_online(struct tape_device *, struct tape_discipline *);
+extern int tape_generic_offline(struct ccw_device *);
+extern int tape_generic_pm_suspend(struct ccw_device *);
+
+/* Externals from tape_devmap.c */
+extern int tape_generic_probe(struct ccw_device *);
+extern void tape_generic_remove(struct ccw_device *);
+
+extern struct tape_device *tape_find_device(int devindex);
+extern struct tape_device *tape_get_device(struct tape_device *);
+extern void tape_put_device(struct tape_device *);
+
+/* Externals from tape_char.c */
+extern int tapechar_init(void);
+extern void tapechar_exit(void);
+extern int  tapechar_setup_device(struct tape_device *);
+extern void tapechar_cleanup_device(struct tape_device *);
+
+/* tape initialisation functions */
+#ifdef CONFIG_PROC_FS
+extern void tape_proc_init (void);
+extern void tape_proc_cleanup (void);
+#else
+static inline void tape_proc_init (void) {;}
+static inline void tape_proc_cleanup (void) {;}
+#endif
+
+/* a function for dumping device sense info */
+extern void tape_dump_sense_dbf(struct tape_device *, struct tape_request *,
+				struct irb *);
+
+/* functions for handling the status of a device */
+extern void tape_med_state_set(struct tape_device *, enum tape_medium_state);
+
+/* The debug area */
+extern debug_info_t *TAPE_DBF_AREA;
+
+/* functions for building ccws */
+static inline struct ccw1 *
+tape_ccw_cc(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
+{
+	ccw->cmd_code = cmd_code;
+	ccw->flags = CCW_FLAG_CC;
+	ccw->count = memsize;
+	ccw->cda = (__u32)(addr_t) cda;
+	return ccw + 1;
+}
+
+static inline struct ccw1 *
+tape_ccw_end(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
+{
+	ccw->cmd_code = cmd_code;
+	ccw->flags = 0;
+	ccw->count = memsize;
+	ccw->cda = (__u32)(addr_t) cda;
+	return ccw + 1;
+}
+
+static inline struct ccw1 *
+tape_ccw_cmd(struct ccw1 *ccw, __u8 cmd_code)
+{
+	ccw->cmd_code = cmd_code;
+	ccw->flags = 0;
+	ccw->count = 0;
+	ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
+	return ccw + 1;
+}
+
+static inline struct ccw1 *
+tape_ccw_repeat(struct ccw1 *ccw, __u8 cmd_code, int count)
+{
+	while (count-- > 0) {
+		ccw->cmd_code = cmd_code;
+		ccw->flags = CCW_FLAG_CC;
+		ccw->count = 0;
+		ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
+		ccw++;
+	}
+	return ccw;
+}
+
+static inline struct ccw1 *
+tape_ccw_cc_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
+{
+	ccw->cmd_code = cmd_code;
+	ccw->flags    = CCW_FLAG_CC;
+	idal_buffer_set_cda(idal, ccw);
+	return ccw++;
+}
+
+static inline struct ccw1 *
+tape_ccw_end_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
+{
+	ccw->cmd_code = cmd_code;
+	ccw->flags    = 0;
+	idal_buffer_set_cda(idal, ccw);
+	return ccw++;
+}
+
+/* Global vars */
+extern const char *tape_state_verbose[];
+extern const char *tape_op_verbose[];
+
+#endif /* for ifdef tape.h */
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
new file mode 100644
index 0000000..6d73ee3
--- /dev/null
+++ b/drivers/s390/char/tape_34xx.c
@@ -0,0 +1,1233 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    tape device discipline for 3480/3490 tapes.
+ *
+ *    Copyright IBM Corp. 2001, 2009
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "tape_34xx"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#define TAPE_DBF_AREA	tape_34xx_dbf
+
+#include "tape.h"
+#include "tape_std.h"
+
+/*
+ * Pointer to debug area.
+ */
+debug_info_t *TAPE_DBF_AREA = NULL;
+EXPORT_SYMBOL(TAPE_DBF_AREA);
+
+#define TAPE34XX_FMT_3480	0
+#define TAPE34XX_FMT_3480_2_XF	1
+#define TAPE34XX_FMT_3480_XF	2
+
+struct tape_34xx_block_id {
+	unsigned int	wrap		: 1;
+	unsigned int	segment		: 7;
+	unsigned int	format		: 2;
+	unsigned int	block		: 22;
+};
+
+/*
+ * A list of block ID's is used to faster seek blocks.
+ */
+struct tape_34xx_sbid {
+	struct list_head		list;
+	struct tape_34xx_block_id	bid;
+};
+
+static void tape_34xx_delete_sbid_from(struct tape_device *, int);
+
+/*
+ * Medium sense for 34xx tapes. There is no 'real' medium sense call.
+ * So we just do a normal sense.
+ */
+static void __tape_34xx_medium_sense(struct tape_request *request)
+{
+	struct tape_device *device = request->device;
+	unsigned char *sense;
+
+	if (request->rc == 0) {
+		sense = request->cpdata;
+
+		/*
+		 * This isn't quite correct. But since INTERVENTION_REQUIRED
+		 * means that the drive is 'neither ready nor on-line' it is
+		 * only slightly inaccurate to say there is no tape loaded if
+		 * the drive isn't online...
+		 */
+		if (sense[0] & SENSE_INTERVENTION_REQUIRED)
+			tape_med_state_set(device, MS_UNLOADED);
+		else
+			tape_med_state_set(device, MS_LOADED);
+
+		if (sense[1] & SENSE_WRITE_PROTECT)
+			device->tape_generic_status |= GMT_WR_PROT(~0);
+		else
+			device->tape_generic_status &= ~GMT_WR_PROT(~0);
+	} else
+		DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
+			request->rc);
+	tape_free_request(request);
+}
+
+static int tape_34xx_medium_sense(struct tape_device *device)
+{
+	struct tape_request *request;
+	int rc;
+
+	request = tape_alloc_request(1, 32);
+	if (IS_ERR(request)) {
+		DBF_EXCEPTION(6, "MSEN fail\n");
+		return PTR_ERR(request);
+	}
+
+	request->op = TO_MSEN;
+	tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+	rc = tape_do_io_interruptible(device, request);
+	__tape_34xx_medium_sense(request);
+	return rc;
+}
+
+static void tape_34xx_medium_sense_async(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(1, 32);
+	if (IS_ERR(request)) {
+		DBF_EXCEPTION(6, "MSEN fail\n");
+		return;
+	}
+
+	request->op = TO_MSEN;
+	tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+	request->callback = (void *) __tape_34xx_medium_sense;
+	request->callback_data = NULL;
+	tape_do_io_async(device, request);
+}
+
+struct tape_34xx_work {
+	struct tape_device	*device;
+	enum tape_op		 op;
+	struct work_struct	 work;
+};
+
+/*
+ * These functions are currently used only to schedule a medium_sense for
+ * later execution. This is because we get an interrupt whenever a medium
+ * is inserted but cannot call tape_do_io* from an interrupt context.
+ * Maybe that's useful for other actions we want to start from the
+ * interrupt handler.
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
+ */
+static void
+tape_34xx_work_handler(struct work_struct *work)
+{
+	struct tape_34xx_work *p =
+		container_of(work, struct tape_34xx_work, work);
+	struct tape_device *device = p->device;
+
+	switch(p->op) {
+		case TO_MSEN:
+			tape_34xx_medium_sense_async(device);
+			break;
+		default:
+			DBF_EVENT(3, "T34XX: internal error: unknown work\n");
+	}
+	tape_put_device(device);
+	kfree(p);
+}
+
+static int
+tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
+{
+	struct tape_34xx_work *p;
+
+	if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
+		return -ENOMEM;
+
+	INIT_WORK(&p->work, tape_34xx_work_handler);
+
+	p->device = tape_get_device(device);
+	p->op     = op;
+
+	schedule_work(&p->work);
+	return 0;
+}
+
+/*
+ * Done Handler is called when dev stat = DEVICE-END (successful operation)
+ */
+static inline int
+tape_34xx_done(struct tape_request *request)
+{
+	DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
+
+	switch (request->op) {
+		case TO_DSE:
+		case TO_RUN:
+		case TO_WRI:
+		case TO_WTM:
+		case TO_ASSIGN:
+		case TO_UNASSIGN:
+			tape_34xx_delete_sbid_from(request->device, 0);
+			break;
+		default:
+			;
+	}
+	return TAPE_IO_SUCCESS;
+}
+
+static inline int
+tape_34xx_erp_failed(struct tape_request *request, int rc)
+{
+	DBF_EVENT(3, "Error recovery failed for %s (RC=%d)\n",
+		  tape_op_verbose[request->op], rc);
+	return rc;
+}
+
+static inline int
+tape_34xx_erp_succeeded(struct tape_request *request)
+{
+	DBF_EVENT(3, "Error Recovery successful for %s\n",
+		  tape_op_verbose[request->op]);
+	return tape_34xx_done(request);
+}
+
+static inline int
+tape_34xx_erp_retry(struct tape_request *request)
+{
+	DBF_EVENT(3, "xerp retr %s\n", tape_op_verbose[request->op]);
+	return TAPE_IO_RETRY;
+}
+
+/*
+ * This function is called, when no request is outstanding and we get an
+ * interrupt
+ */
+static int
+tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb)
+{
+	if (irb->scsw.cmd.dstat == 0x85) { /* READY */
+		/* A medium was inserted in the drive. */
+		DBF_EVENT(6, "xuud med\n");
+		tape_34xx_delete_sbid_from(device, 0);
+		tape_34xx_schedule_work(device, TO_MSEN);
+	} else {
+		DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
+		tape_dump_sense_dbf(device, NULL, irb);
+	}
+	return TAPE_IO_SUCCESS;
+}
+
+/*
+ * Read Opposite Error Recovery Function:
+ * Used, when Read Forward does not work
+ */
+static int
+tape_34xx_erp_read_opposite(struct tape_device *device,
+			    struct tape_request *request)
+{
+	if (request->op == TO_RFO) {
+		/*
+		 * We did read forward, but the data could not be read
+		 * *correctly*. We transform the request to a read backward
+		 * and try again.
+		 */
+		tape_std_read_backward(device, request);
+		return tape_34xx_erp_retry(request);
+	}
+
+	/*
+	 * We tried to read forward and backward, but hat no
+	 * success -> failed.
+	 */
+	return tape_34xx_erp_failed(request, -EIO);
+}
+
+static int
+tape_34xx_erp_bug(struct tape_device *device, struct tape_request *request,
+		  struct irb *irb, int no)
+{
+	if (request->op != TO_ASSIGN) {
+		dev_err(&device->cdev->dev, "An unexpected condition %d "
+			"occurred in tape error recovery\n", no);
+		tape_dump_sense_dbf(device, request, irb);
+	}
+	return tape_34xx_erp_failed(request, -EIO);
+}
+
+/*
+ * Handle data overrun between cu and drive. The channel speed might
+ * be too slow.
+ */
+static int
+tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request,
+		      struct irb *irb)
+{
+	if (irb->ecw[3] == 0x40) {
+		dev_warn (&device->cdev->dev, "A data overrun occurred between"
+			" the control unit and tape unit\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	}
+	return tape_34xx_erp_bug(device, request, irb, -1);
+}
+
+/*
+ * Handle record sequence error.
+ */
+static int
+tape_34xx_erp_sequence(struct tape_device *device,
+		       struct tape_request *request, struct irb *irb)
+{
+	if (irb->ecw[3] == 0x41) {
+		/*
+		 * cu detected incorrect block-id sequence on tape.
+		 */
+		dev_warn (&device->cdev->dev, "The block ID sequence on the "
+			"tape is incorrect\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	}
+	/*
+	 * Record sequence error bit is set, but erpa does not
+	 * show record sequence error.
+	 */
+	return tape_34xx_erp_bug(device, request, irb, -2);
+}
+
+/*
+ * This function analyses the tape's sense-data in case of a unit-check.
+ * If possible, it tries to recover from the error. Else the user is
+ * informed about the problem.
+ */
+static int
+tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
+		     struct irb *irb)
+{
+	int inhibit_cu_recovery;
+	__u8* sense;
+
+	inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0;
+	sense = irb->ecw;
+
+	if (
+		sense[0] & SENSE_COMMAND_REJECT &&
+		sense[1] & SENSE_WRITE_PROTECT
+	) {
+		if (
+			request->op == TO_DSE ||
+			request->op == TO_WRI ||
+			request->op == TO_WTM
+		) {
+			/* medium is write protected */
+			return tape_34xx_erp_failed(request, -EACCES);
+		} else {
+			return tape_34xx_erp_bug(device, request, irb, -3);
+		}
+	}
+
+	/*
+	 * Special cases for various tape-states when reaching
+	 * end of recorded area
+	 *
+	 * FIXME: Maybe a special case of the special case:
+	 *        sense[0] == SENSE_EQUIPMENT_CHECK &&
+	 *        sense[1] == SENSE_DRIVE_ONLINE    &&
+	 *        sense[3] == 0x47 (Volume Fenced)
+	 *
+	 *        This was caused by continued FSF or FSR after an
+	 *        'End Of Data'.
+	 */
+	if ((
+		sense[0] == SENSE_DATA_CHECK      ||
+		sense[0] == SENSE_EQUIPMENT_CHECK ||
+		sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK
+	) && (
+		sense[1] == SENSE_DRIVE_ONLINE ||
+		sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE
+	)) {
+		switch (request->op) {
+		/*
+		 * sense[0] == SENSE_DATA_CHECK   &&
+		 * sense[1] == SENSE_DRIVE_ONLINE
+		 * sense[3] == 0x36 (End Of Data)
+		 *
+		 * Further seeks might return a 'Volume Fenced'.
+		 */
+		case TO_FSF:
+		case TO_FSB:
+			/* Trying to seek beyond end of recorded area */
+			return tape_34xx_erp_failed(request, -ENOSPC);
+		case TO_BSB:
+			return tape_34xx_erp_retry(request);
+
+		/*
+		 * sense[0] == SENSE_DATA_CHECK   &&
+		 * sense[1] == SENSE_DRIVE_ONLINE &&
+		 * sense[3] == 0x36 (End Of Data)
+		 */
+		case TO_LBL:
+			/* Block could not be located. */
+			tape_34xx_delete_sbid_from(device, 0);
+			return tape_34xx_erp_failed(request, -EIO);
+
+		case TO_RFO:
+			/* Read beyond end of recorded area -> 0 bytes read */
+			return tape_34xx_erp_failed(request, 0);
+
+		/*
+		 * sense[0] == SENSE_EQUIPMENT_CHECK &&
+		 * sense[1] == SENSE_DRIVE_ONLINE    &&
+		 * sense[3] == 0x38 (Physical End Of Volume)
+		 */
+		case TO_WRI:
+			/* Writing at physical end of volume */
+			return tape_34xx_erp_failed(request, -ENOSPC);
+		default:
+			return tape_34xx_erp_failed(request, 0);
+		}
+	}
+
+	/* Sensing special bits */
+	if (sense[0] & SENSE_BUS_OUT_CHECK)
+		return tape_34xx_erp_retry(request);
+
+	if (sense[0] & SENSE_DATA_CHECK) {
+		/*
+		 * hardware failure, damaged tape or improper
+		 * operating conditions
+		 */
+		switch (sense[3]) {
+		case 0x23:
+			/* a read data check occurred */
+			if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
+			    inhibit_cu_recovery)
+				// data check is not permanent, may be
+				// recovered. We always use async-mode with
+				// cu-recovery, so this should *never* happen.
+				return tape_34xx_erp_bug(device, request,
+							 irb, -4);
+
+			/* data check is permanent, CU recovery has failed */
+			dev_warn (&device->cdev->dev, "A read error occurred "
+				"that cannot be recovered\n");
+			return tape_34xx_erp_failed(request, -EIO);
+		case 0x25:
+			// a write data check occurred
+			if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
+			    inhibit_cu_recovery)
+				// data check is not permanent, may be
+				// recovered. We always use async-mode with
+				// cu-recovery, so this should *never* happen.
+				return tape_34xx_erp_bug(device, request,
+							 irb, -5);
+
+			// data check is permanent, cu-recovery has failed
+			dev_warn (&device->cdev->dev, "A write error on the "
+				"tape cannot be recovered\n");
+			return tape_34xx_erp_failed(request, -EIO);
+		case 0x26:
+			/* Data Check (read opposite) occurred. */
+			return tape_34xx_erp_read_opposite(device, request);
+		case 0x28:
+			/* ID-Mark at tape start couldn't be written */
+			dev_warn (&device->cdev->dev, "Writing the ID-mark "
+				"failed\n");
+			return tape_34xx_erp_failed(request, -EIO);
+		case 0x31:
+			/* Tape void. Tried to read beyond end of device. */
+			dev_warn (&device->cdev->dev, "Reading the tape beyond"
+				" the end of the recorded area failed\n");
+			return tape_34xx_erp_failed(request, -ENOSPC);
+		case 0x41:
+			/* Record sequence error. */
+			dev_warn (&device->cdev->dev, "The tape contains an "
+				"incorrect block ID sequence\n");
+			return tape_34xx_erp_failed(request, -EIO);
+		default:
+			/* all data checks for 3480 should result in one of
+			 * the above erpa-codes. For 3490, other data-check
+			 * conditions do exist. */
+			if (device->cdev->id.driver_info == tape_3480)
+				return tape_34xx_erp_bug(device, request,
+							 irb, -6);
+		}
+	}
+
+	if (sense[0] & SENSE_OVERRUN)
+		return tape_34xx_erp_overrun(device, request, irb);
+
+	if (sense[1] & SENSE_RECORD_SEQUENCE_ERR)
+		return tape_34xx_erp_sequence(device, request, irb);
+
+	/* Sensing erpa codes */
+	switch (sense[3]) {
+	case 0x00:
+		/* Unit check with erpa code 0. Report and ignore. */
+		return TAPE_IO_SUCCESS;
+	case 0x21:
+		/*
+		 * Data streaming not operational. CU will switch to
+		 * interlock mode. Reissue the command.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x22:
+		/*
+		 * Path equipment check. Might be drive adapter error, buffer
+		 * error on the lower interface, internal path not usable,
+		 * or error during cartridge load.
+		 */
+		dev_warn (&device->cdev->dev, "A path equipment check occurred"
+			" for the tape device\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x24:
+		/*
+		 * Load display check. Load display was command was issued,
+		 * but the drive is displaying a drive check message. Can
+		 * be threated as "device end".
+		 */
+		return tape_34xx_erp_succeeded(request);
+	case 0x27:
+		/*
+		 * Command reject. May indicate illegal channel program or
+		 * buffer over/underrun. Since all channel programs are
+		 * issued by this driver and ought be correct, we assume a
+		 * over/underrun situation and retry the channel program.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x29:
+		/*
+		 * Function incompatible. Either the tape is idrc compressed
+		 * but the hardware isn't capable to do idrc, or a perform
+		 * subsystem func is issued and the CU is not on-line.
+		 */
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x2a:
+		/*
+		 * Unsolicited environmental data. An internal counter
+		 * overflows, we can ignore this and reissue the cmd.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x2b:
+		/*
+		 * Environmental data present. Indicates either unload
+		 * completed ok or read buffered log command completed ok.
+		 */
+		if (request->op == TO_RUN) {
+			/* Rewind unload completed ok. */
+			tape_med_state_set(device, MS_UNLOADED);
+			return tape_34xx_erp_succeeded(request);
+		}
+		/* tape_34xx doesn't use read buffered log commands. */
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x2c:
+		/*
+		 * Permanent equipment check. CU has tried recovery, but
+		 * did not succeed.
+		 */
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x2d:
+		/* Data security erase failure. */
+		if (request->op == TO_DSE)
+			return tape_34xx_erp_failed(request, -EIO);
+		/* Data security erase failure, but no such command issued. */
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x2e:
+		/*
+		 * Not capable. This indicates either that the drive fails
+		 * reading the format id mark or that that format specified
+		 * is not supported by the drive.
+		 */
+		dev_warn (&device->cdev->dev, "The tape unit cannot process "
+			"the tape format\n");
+		return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
+	case 0x30:
+		/* The medium is write protected. */
+		dev_warn (&device->cdev->dev, "The tape medium is write-"
+			"protected\n");
+		return tape_34xx_erp_failed(request, -EACCES);
+	case 0x32:
+		// Tension loss. We cannot recover this, it's an I/O error.
+		dev_warn (&device->cdev->dev, "The tape does not have the "
+			"required tape tension\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x33:
+		/*
+		 * Load Failure. The cartridge was not inserted correctly or
+		 * the tape is not threaded correctly.
+		 */
+		dev_warn (&device->cdev->dev, "The tape unit failed to load"
+			" the cartridge\n");
+		tape_34xx_delete_sbid_from(device, 0);
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x34:
+		/*
+		 * Unload failure. The drive cannot maintain tape tension
+		 * and control tape movement during an unload operation.
+		 */
+		dev_warn (&device->cdev->dev, "Automatic unloading of the tape"
+			" cartridge failed\n");
+		if (request->op == TO_RUN)
+			return tape_34xx_erp_failed(request, -EIO);
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x35:
+		/*
+		 * Drive equipment check. One of the following:
+		 * - cu cannot recover from a drive detected error
+		 * - a check code message is shown on drive display
+		 * - the cartridge loader does not respond correctly
+		 * - a failure occurs during an index, load, or unload cycle
+		 */
+		dev_warn (&device->cdev->dev, "An equipment check has occurred"
+			" on the tape unit\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x36:
+		if (device->cdev->id.driver_info == tape_3490)
+			/* End of data. */
+			return tape_34xx_erp_failed(request, -EIO);
+		/* This erpa is reserved for 3480 */
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x37:
+		/*
+		 * Tape length error. The tape is shorter than reported in
+		 * the beginning-of-tape data.
+		 */
+		dev_warn (&device->cdev->dev, "The tape information states an"
+			" incorrect length\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x38:
+		/*
+		 * Physical end of tape. A read/write operation reached
+		 * the physical end of tape.
+		 */
+		if (request->op==TO_WRI ||
+		    request->op==TO_DSE ||
+		    request->op==TO_WTM)
+			return tape_34xx_erp_failed(request, -ENOSPC);
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x39:
+		/* Backward at Beginning of tape. */
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x3a:
+		/* Drive switched to not ready. */
+		dev_warn (&device->cdev->dev, "The tape unit is not ready\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x3b:
+		/* Manual rewind or unload. This causes an I/O error. */
+		dev_warn (&device->cdev->dev, "The tape medium has been "
+			"rewound or unloaded manually\n");
+		tape_34xx_delete_sbid_from(device, 0);
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x42:
+		/*
+		 * Degraded mode. A condition that can cause degraded
+		 * performance is detected.
+		 */
+		dev_warn (&device->cdev->dev, "The tape subsystem is running "
+			"in degraded mode\n");
+		return tape_34xx_erp_retry(request);
+	case 0x43:
+		/* Drive not ready. */
+		tape_34xx_delete_sbid_from(device, 0);
+		tape_med_state_set(device, MS_UNLOADED);
+		/* Some commands commands are successful even in this case */
+		if (sense[1] & SENSE_DRIVE_ONLINE) {
+			switch(request->op) {
+				case TO_ASSIGN:
+				case TO_UNASSIGN:
+				case TO_DIS:
+				case TO_NOP:
+					return tape_34xx_done(request);
+					break;
+				default:
+					break;
+			}
+		}
+		return tape_34xx_erp_failed(request, -ENOMEDIUM);
+	case 0x44:
+		/* Locate Block unsuccessful. */
+		if (request->op != TO_BLOCK && request->op != TO_LBL)
+			/* No locate block was issued. */
+			return tape_34xx_erp_bug(device, request,
+						 irb, sense[3]);
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x45:
+		/* The drive is assigned to a different channel path. */
+		dev_warn (&device->cdev->dev, "The tape unit is already "
+			"assigned\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x46:
+		/*
+		 * Drive not on-line. Drive may be switched offline,
+		 * the power supply may be switched off or
+		 * the drive address may not be set correctly.
+		 */
+		dev_warn (&device->cdev->dev, "The tape unit is not online\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x47:
+		/* Volume fenced. CU reports volume integrity is lost. */
+		dev_warn (&device->cdev->dev, "The control unit has fenced "
+			"access to the tape volume\n");
+		tape_34xx_delete_sbid_from(device, 0);
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x48:
+		/* Log sense data and retry request. */
+		return tape_34xx_erp_retry(request);
+	case 0x49:
+		/* Bus out check. A parity check error on the bus was found. */
+		dev_warn (&device->cdev->dev, "A parity error occurred on the "
+			"tape bus\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x4a:
+		/* Control unit erp failed. */
+		dev_warn (&device->cdev->dev, "I/O error recovery failed on "
+			"the tape control unit\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x4b:
+		/*
+		 * CU and drive incompatible. The drive requests micro-program
+		 * patches, which are not available on the CU.
+		 */
+		dev_warn (&device->cdev->dev, "The tape unit requires a "
+			"firmware update\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x4c:
+		/*
+		 * Recovered Check-One failure. Cu develops a hardware error,
+		 * but is able to recover.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x4d:
+		if (device->cdev->id.driver_info == tape_3490)
+			/*
+			 * Resetting event received. Since the driver does
+			 * not support resetting event recovery (which has to
+			 * be handled by the I/O Layer), retry our command.
+			 */
+			return tape_34xx_erp_retry(request);
+		/* This erpa is reserved for 3480. */
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x4e:
+		if (device->cdev->id.driver_info == tape_3490) {
+			/*
+			 * Maximum block size exceeded. This indicates, that
+			 * the block to be written is larger than allowed for
+			 * buffered mode.
+			 */
+			dev_warn (&device->cdev->dev, "The maximum block size"
+				" for buffered mode is exceeded\n");
+			return tape_34xx_erp_failed(request, -ENOBUFS);
+		}
+		/* This erpa is reserved for 3480. */
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x50:
+		/*
+		 * Read buffered log (Overflow). CU is running in extended
+		 * buffered log mode, and a counter overflows. This should
+		 * never happen, since we're never running in extended
+		 * buffered log mode.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x51:
+		/*
+		 * Read buffered log (EOV). EOF processing occurs while the
+		 * CU is in extended buffered log mode. This should never
+		 * happen, since we're never running in extended buffered
+		 * log mode.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x52:
+		/* End of Volume complete. Rewind unload completed ok. */
+		if (request->op == TO_RUN) {
+			tape_med_state_set(device, MS_UNLOADED);
+			tape_34xx_delete_sbid_from(device, 0);
+			return tape_34xx_erp_succeeded(request);
+		}
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	case 0x53:
+		/* Global command intercept. */
+		return tape_34xx_erp_retry(request);
+	case 0x54:
+		/* Channel interface recovery (temporary). */
+		return tape_34xx_erp_retry(request);
+	case 0x55:
+		/* Channel interface recovery (permanent). */
+		dev_warn (&device->cdev->dev, "A channel interface error cannot be"
+			" recovered\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x56:
+		/* Channel protocol error. */
+		dev_warn (&device->cdev->dev, "A channel protocol error "
+			"occurred\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x57:
+		/*
+		 * 3480: Attention intercept.
+		 * 3490: Global status intercept.
+		 */
+		return tape_34xx_erp_retry(request);
+	case 0x5a:
+		/*
+		 * Tape length incompatible. The tape inserted is too long,
+		 * which could cause damage to the tape or the drive.
+		 */
+		dev_warn (&device->cdev->dev, "The tape unit does not support "
+			"the tape length\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x5b:
+		/* Format 3480 XF incompatible */
+		if (sense[1] & SENSE_BEGINNING_OF_TAPE)
+			/* The tape will get overwritten. */
+			return tape_34xx_erp_retry(request);
+		dev_warn (&device->cdev->dev, "The tape unit does not support"
+			" format 3480 XF\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x5c:
+		/* Format 3480-2 XF incompatible */
+		dev_warn (&device->cdev->dev, "The tape unit does not support tape "
+			"format 3480-2 XF\n");
+		return tape_34xx_erp_failed(request, -EIO);
+	case 0x5d:
+		/* Tape length violation. */
+		dev_warn (&device->cdev->dev, "The tape unit does not support"
+			" the current tape length\n");
+		return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
+	case 0x5e:
+		/* Compaction algorithm incompatible. */
+		dev_warn (&device->cdev->dev, "The tape unit does not support"
+			" the compaction algorithm\n");
+		return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
+
+		/* The following erpas should have been covered earlier. */
+	case 0x23: /* Read data check. */
+	case 0x25: /* Write data check. */
+	case 0x26: /* Data check (read opposite). */
+	case 0x28: /* Write id mark check. */
+	case 0x31: /* Tape void. */
+	case 0x40: /* Overrun error. */
+	case 0x41: /* Record sequence error. */
+		/* All other erpas are reserved for future use. */
+	default:
+		return tape_34xx_erp_bug(device, request, irb, sense[3]);
+	}
+}
+
+/*
+ * 3480/3490 interrupt handler
+ */
+static int
+tape_34xx_irq(struct tape_device *device, struct tape_request *request,
+	      struct irb *irb)
+{
+	if (request == NULL)
+		return tape_34xx_unsolicited_irq(device, irb);
+
+	if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
+	    (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
+	    (request->op == TO_WRI)) {
+		/* Write at end of volume */
+		return tape_34xx_erp_failed(request, -ENOSPC);
+	}
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+		return tape_34xx_unit_check(device, request, irb);
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+		/*
+		 * A unit exception occurs on skipping over a tapemark block.
+		 */
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
+			if (request->op == TO_BSB || request->op == TO_FSB)
+				request->rescnt++;
+			else
+				DBF_EVENT(5, "Unit Exception!\n");
+		}
+		return tape_34xx_done(request);
+	}
+
+	DBF_EVENT(6, "xunknownirq\n");
+	tape_dump_sense_dbf(device, request, irb);
+	return TAPE_IO_STOP;
+}
+
+/*
+ * ioctl_overload
+ */
+static int
+tape_34xx_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
+{
+	if (cmd == TAPE390_DISPLAY) {
+		struct display_struct disp;
+
+		if (copy_from_user(&disp, (char __user *) arg, sizeof(disp)) != 0)
+			return -EFAULT;
+
+		return tape_std_display(device, &disp);
+	} else
+		return -EINVAL;
+}
+
+static inline void
+tape_34xx_append_new_sbid(struct tape_34xx_block_id bid, struct list_head *l)
+{
+	struct tape_34xx_sbid *	new_sbid;
+
+	new_sbid = kmalloc(sizeof(*new_sbid), GFP_ATOMIC);
+	if (!new_sbid)
+		return;
+
+	new_sbid->bid = bid;
+	list_add(&new_sbid->list, l);
+}
+
+/*
+ * Build up the search block ID list. The block ID consists of a logical
+ * block number and a hardware specific part. The hardware specific part
+ * helps the tape drive to speed up searching for a specific block.
+ */
+static void
+tape_34xx_add_sbid(struct tape_device *device, struct tape_34xx_block_id bid)
+{
+	struct list_head *	sbid_list;
+	struct tape_34xx_sbid *	sbid;
+	struct list_head *	l;
+
+	/*
+	 * immediately return if there is no list at all or the block to add
+	 * is located in segment 1 of wrap 0 because this position is used
+	 * if no hardware position data is supplied.
+	 */
+	sbid_list = (struct list_head *) device->discdata;
+	if (!sbid_list || (bid.segment < 2 && bid.wrap == 0))
+		return;
+
+	/*
+	 * Search the position where to insert the new entry. Hardware
+	 * acceleration uses only the segment and wrap number. So we
+	 * need only one entry for a specific wrap/segment combination.
+	 * If there is a block with a lower number but the same hard-
+	 * ware position data we just update the block number in the
+	 * existing entry.
+	 */
+	list_for_each(l, sbid_list) {
+		sbid = list_entry(l, struct tape_34xx_sbid, list);
+
+		if (
+			(sbid->bid.segment == bid.segment) &&
+			(sbid->bid.wrap    == bid.wrap)
+		) {
+			if (bid.block < sbid->bid.block)
+				sbid->bid = bid;
+			else return;
+			break;
+		}
+
+		/* Sort in according to logical block number. */
+		if (bid.block < sbid->bid.block) {
+			tape_34xx_append_new_sbid(bid, l->prev);
+			break;
+		}
+	}
+	/* List empty or new block bigger than last entry. */
+	if (l == sbid_list)
+		tape_34xx_append_new_sbid(bid, l->prev);
+
+	DBF_LH(4, "Current list is:\n");
+	list_for_each(l, sbid_list) {
+		sbid = list_entry(l, struct tape_34xx_sbid, list);
+		DBF_LH(4, "%d:%03d@%05d\n",
+			sbid->bid.wrap,
+			sbid->bid.segment,
+			sbid->bid.block
+		);
+	}
+}
+
+/*
+ * Delete all entries from the search block ID list that belong to tape blocks
+ * equal or higher than the given number.
+ */
+static void
+tape_34xx_delete_sbid_from(struct tape_device *device, int from)
+{
+	struct list_head *	sbid_list;
+	struct tape_34xx_sbid *	sbid;
+	struct list_head *	l;
+	struct list_head *	n;
+
+	sbid_list = (struct list_head *) device->discdata;
+	if (!sbid_list)
+		return;
+
+	list_for_each_safe(l, n, sbid_list) {
+		sbid = list_entry(l, struct tape_34xx_sbid, list);
+		if (sbid->bid.block >= from) {
+			DBF_LH(4, "Delete sbid %d:%03d@%05d\n",
+				sbid->bid.wrap,
+				sbid->bid.segment,
+				sbid->bid.block
+			);
+			list_del(l);
+			kfree(sbid);
+		}
+	}
+}
+
+/*
+ * Merge hardware position data into a block id.
+ */
+static void
+tape_34xx_merge_sbid(
+	struct tape_device *		device,
+	struct tape_34xx_block_id *	bid
+) {
+	struct tape_34xx_sbid *	sbid;
+	struct tape_34xx_sbid *	sbid_to_use;
+	struct list_head *	sbid_list;
+	struct list_head *	l;
+
+	sbid_list = (struct list_head *) device->discdata;
+	bid->wrap    = 0;
+	bid->segment = 1;
+
+	if (!sbid_list || list_empty(sbid_list))
+		return;
+
+	sbid_to_use = NULL;
+	list_for_each(l, sbid_list) {
+		sbid = list_entry(l, struct tape_34xx_sbid, list);
+
+		if (sbid->bid.block >= bid->block)
+			break;
+		sbid_to_use = sbid;
+	}
+	if (sbid_to_use) {
+		bid->wrap    = sbid_to_use->bid.wrap;
+		bid->segment = sbid_to_use->bid.segment;
+		DBF_LH(4, "Use %d:%03d@%05d for %05d\n",
+			sbid_to_use->bid.wrap,
+			sbid_to_use->bid.segment,
+			sbid_to_use->bid.block,
+			bid->block
+		);
+	}
+}
+
+static int
+tape_34xx_setup_device(struct tape_device * device)
+{
+	int			rc;
+	struct list_head *	discdata;
+
+	DBF_EVENT(6, "34xx device setup\n");
+	if ((rc = tape_std_assign(device)) == 0) {
+		if ((rc = tape_34xx_medium_sense(device)) != 0) {
+			DBF_LH(3, "34xx medium sense returned %d\n", rc);
+		}
+	}
+	discdata = kmalloc(sizeof(struct list_head), GFP_KERNEL);
+	if (discdata) {
+			INIT_LIST_HEAD(discdata);
+			device->discdata = discdata;
+	}
+
+	return rc;
+}
+
+static void
+tape_34xx_cleanup_device(struct tape_device *device)
+{
+	tape_std_unassign(device);
+	
+	if (device->discdata) {
+		tape_34xx_delete_sbid_from(device, 0);
+		kfree(device->discdata);
+		device->discdata = NULL;
+	}
+}
+
+
+/*
+ * MTTELL: Tell block. Return the number of block relative to current file.
+ */
+static int
+tape_34xx_mttell(struct tape_device *device, int mt_count)
+{
+	struct {
+		struct tape_34xx_block_id	cbid;
+		struct tape_34xx_block_id	dbid;
+	} __attribute__ ((packed)) block_id;
+	int rc;
+
+	rc = tape_std_read_block_id(device, (__u64 *) &block_id);
+	if (rc)
+		return rc;
+
+	tape_34xx_add_sbid(device, block_id.cbid);
+	return block_id.cbid.block;
+}
+
+/*
+ * MTSEEK: seek to the specified block.
+ */
+static int
+tape_34xx_mtseek(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct tape_34xx_block_id *	bid;
+
+	if (mt_count > 0x3fffff) {
+		DBF_EXCEPTION(6, "xsee parm\n");
+		return -EINVAL;
+	}
+	request = tape_alloc_request(3, 4);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+
+	/* setup ccws */
+	request->op = TO_LBL;
+	bid         = (struct tape_34xx_block_id *) request->cpdata;
+	bid->format = (*device->modeset_byte & 0x08) ?
+			TAPE34XX_FMT_3480_XF : TAPE34XX_FMT_3480;
+	bid->block  = mt_count;
+	tape_34xx_merge_sbid(device, bid);
+
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * List of 3480/3490 magnetic tape commands.
+ */
+static tape_mtop_fn tape_34xx_mtop[TAPE_NR_MTOPS] = {
+	[MTRESET]	 = tape_std_mtreset,
+	[MTFSF]		 = tape_std_mtfsf,
+	[MTBSF]		 = tape_std_mtbsf,
+	[MTFSR]		 = tape_std_mtfsr,
+	[MTBSR]		 = tape_std_mtbsr,
+	[MTWEOF]	 = tape_std_mtweof,
+	[MTREW]		 = tape_std_mtrew,
+	[MTOFFL]	 = tape_std_mtoffl,
+	[MTNOP]		 = tape_std_mtnop,
+	[MTRETEN]	 = tape_std_mtreten,
+	[MTBSFM]	 = tape_std_mtbsfm,
+	[MTFSFM]	 = tape_std_mtfsfm,
+	[MTEOM]		 = tape_std_mteom,
+	[MTERASE]	 = tape_std_mterase,
+	[MTRAS1]	 = NULL,
+	[MTRAS2]	 = NULL,
+	[MTRAS3]	 = NULL,
+	[MTSETBLK]	 = tape_std_mtsetblk,
+	[MTSETDENSITY]	 = NULL,
+	[MTSEEK]	 = tape_34xx_mtseek,
+	[MTTELL]	 = tape_34xx_mttell,
+	[MTSETDRVBUFFER] = NULL,
+	[MTFSS]		 = NULL,
+	[MTBSS]		 = NULL,
+	[MTWSM]		 = NULL,
+	[MTLOCK]	 = NULL,
+	[MTUNLOCK]	 = NULL,
+	[MTLOAD]	 = tape_std_mtload,
+	[MTUNLOAD]	 = tape_std_mtunload,
+	[MTCOMPRESSION]	 = tape_std_mtcompression,
+	[MTSETPART]	 = NULL,
+	[MTMKPART]	 = NULL
+};
+
+/*
+ * Tape discipline structure for 3480 and 3490.
+ */
+static struct tape_discipline tape_discipline_34xx = {
+	.owner = THIS_MODULE,
+	.setup_device = tape_34xx_setup_device,
+	.cleanup_device = tape_34xx_cleanup_device,
+	.process_eov = tape_std_process_eov,
+	.irq = tape_34xx_irq,
+	.read_block = tape_std_read_block,
+	.write_block = tape_std_write_block,
+	.ioctl_fn = tape_34xx_ioctl,
+	.mtop_array = tape_34xx_mtop
+};
+
+static struct ccw_device_id tape_34xx_ids[] = {
+	{ CCW_DEVICE_DEVTYPE(0x3480, 0, 0x3480, 0), .driver_info = tape_3480},
+	{ CCW_DEVICE_DEVTYPE(0x3490, 0, 0x3490, 0), .driver_info = tape_3490},
+	{ /* end of list */ },
+};
+
+static int
+tape_34xx_online(struct ccw_device *cdev)
+{
+	return tape_generic_online(
+		dev_get_drvdata(&cdev->dev),
+		&tape_discipline_34xx
+	);
+}
+
+static struct ccw_driver tape_34xx_driver = {
+	.driver = {
+		.name = "tape_34xx",
+		.owner = THIS_MODULE,
+	},
+	.ids = tape_34xx_ids,
+	.probe = tape_generic_probe,
+	.remove = tape_generic_remove,
+	.set_online = tape_34xx_online,
+	.set_offline = tape_generic_offline,
+	.freeze = tape_generic_pm_suspend,
+	.int_class = IRQIO_TAP,
+};
+
+static int
+tape_34xx_init (void)
+{
+	int rc;
+
+	TAPE_DBF_AREA = debug_register ( "tape_34xx", 2, 2, 4*sizeof(long));
+	debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
+#ifdef DBF_LIKE_HELL
+	debug_set_level(TAPE_DBF_AREA, 6);
+#endif
+
+	DBF_EVENT(3, "34xx init\n");
+	/* Register driver for 3480/3490 tapes. */
+	rc = ccw_driver_register(&tape_34xx_driver);
+	if (rc)
+		DBF_EVENT(3, "34xx init failed\n");
+	else
+		DBF_EVENT(3, "34xx registered\n");
+	return rc;
+}
+
+static void
+tape_34xx_exit(void)
+{
+	ccw_driver_unregister(&tape_34xx_driver);
+
+	debug_unregister(TAPE_DBF_AREA);
+}
+
+MODULE_DEVICE_TABLE(ccw, tape_34xx_ids);
+MODULE_AUTHOR("(C) 2001-2002 IBM Deutschland Entwicklung GmbH");
+MODULE_DESCRIPTION("Linux on zSeries channel attached 3480 tape device driver");
+MODULE_LICENSE("GPL");
+
+module_init(tape_34xx_init);
+module_exit(tape_34xx_exit);
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
new file mode 100644
index 0000000..cdcde18
--- /dev/null
+++ b/drivers/s390/char/tape_3590.c
@@ -0,0 +1,1702 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    tape device discipline for 3590 tapes.
+ *
+ *    Copyright IBM Corp. 2001, 2009
+ *    Author(s): Stefan Bader <shbader@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "tape_3590"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <asm/ebcdic.h>
+
+#define TAPE_DBF_AREA	tape_3590_dbf
+#define BUFSIZE 512	/* size of buffers for dynamic generated messages */
+
+#include "tape.h"
+#include "tape_std.h"
+#include "tape_3590.h"
+
+static struct workqueue_struct *tape_3590_wq;
+
+/*
+ * Pointer to debug area.
+ */
+debug_info_t *TAPE_DBF_AREA = NULL;
+EXPORT_SYMBOL(TAPE_DBF_AREA);
+
+/*******************************************************************
+ * Error Recovery functions:
+ * - Read Opposite:		 implemented
+ * - Read Device (buffered) log: BRA
+ * - Read Library log:		 BRA
+ * - Swap Devices:		 BRA
+ * - Long Busy:			 implemented
+ * - Special Intercept:		 BRA
+ * - Read Alternate:		 implemented
+ *******************************************************************/
+
+static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
+	[0x00] = "",
+	[0x10] = "Lost Sense",
+	[0x11] = "Assigned Elsewhere",
+	[0x12] = "Allegiance Reset",
+	[0x13] = "Shared Access Violation",
+	[0x20] = "Command Reject",
+	[0x21] = "Configuration Error",
+	[0x22] = "Protection Exception",
+	[0x23] = "Write Protect",
+	[0x24] = "Write Length",
+	[0x25] = "Read-Only Format",
+	[0x31] = "Beginning of Partition",
+	[0x33] = "End of Partition",
+	[0x34] = "End of Data",
+	[0x35] = "Block not found",
+	[0x40] = "Device Intervention",
+	[0x41] = "Loader Intervention",
+	[0x42] = "Library Intervention",
+	[0x50] = "Write Error",
+	[0x51] = "Erase Error",
+	[0x52] = "Formatting Error",
+	[0x53] = "Read Error",
+	[0x54] = "Unsupported Format",
+	[0x55] = "No Formatting",
+	[0x56] = "Positioning lost",
+	[0x57] = "Read Length",
+	[0x60] = "Unsupported Medium",
+	[0x61] = "Medium Length Error",
+	[0x62] = "Medium removed",
+	[0x64] = "Load Check",
+	[0x65] = "Unload Check",
+	[0x70] = "Equipment Check",
+	[0x71] = "Bus out Check",
+	[0x72] = "Protocol Error",
+	[0x73] = "Interface Error",
+	[0x74] = "Overrun",
+	[0x75] = "Halt Signal",
+	[0x90] = "Device fenced",
+	[0x91] = "Device Path fenced",
+	[0xa0] = "Volume misplaced",
+	[0xa1] = "Volume inaccessible",
+	[0xa2] = "Volume in input",
+	[0xa3] = "Volume ejected",
+	[0xa4] = "All categories reserved",
+	[0xa5] = "Duplicate Volume",
+	[0xa6] = "Library Manager Offline",
+	[0xa7] = "Library Output Station full",
+	[0xa8] = "Vision System non-operational",
+	[0xa9] = "Library Manager Equipment Check",
+	[0xaa] = "Library Equipment Check",
+	[0xab] = "All Library Cells full",
+	[0xac] = "No Cleaner Volumes in Library",
+	[0xad] = "I/O Station door open",
+	[0xae] = "Subsystem environmental alert",
+};
+
+static int crypt_supported(struct tape_device *device)
+{
+	return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device));
+}
+
+static int crypt_enabled(struct tape_device *device)
+{
+	return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device));
+}
+
+static void ext_to_int_kekl(struct tape390_kekl *in,
+			    struct tape3592_kekl *out)
+{
+	int len;
+
+	memset(out, 0, sizeof(*out));
+	if (in->type == TAPE390_KEKL_TYPE_HASH)
+		out->flags |= 0x40;
+	if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH)
+		out->flags |= 0x80;
+	len = min(sizeof(out->label), strlen(in->label));
+	memcpy(out->label, in->label, len);
+	memset(out->label + len, ' ', sizeof(out->label) - len);
+	ASCEBC(out->label, sizeof(out->label));
+}
+
+static void int_to_ext_kekl(struct tape3592_kekl *in,
+			    struct tape390_kekl *out)
+{
+	memset(out, 0, sizeof(*out));
+	if(in->flags & 0x40)
+		out->type = TAPE390_KEKL_TYPE_HASH;
+	else
+		out->type = TAPE390_KEKL_TYPE_LABEL;
+	if(in->flags & 0x80)
+		out->type_on_tape = TAPE390_KEKL_TYPE_HASH;
+	else
+		out->type_on_tape = TAPE390_KEKL_TYPE_LABEL;
+	memcpy(out->label, in->label, sizeof(in->label));
+	EBCASC(out->label, sizeof(in->label));
+	strim(out->label);
+}
+
+static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
+				 struct tape390_kekl_pair *out)
+{
+	if (in->count == 0) {
+		out->kekl[0].type = TAPE390_KEKL_TYPE_NONE;
+		out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE;
+		out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
+		out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
+	} else if (in->count == 1) {
+		int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
+		out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
+		out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
+	} else if (in->count == 2) {
+		int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
+		int_to_ext_kekl(&in->kekl[1], &out->kekl[1]);
+	} else {
+		printk("Invalid KEKL number: %d\n", in->count);
+		BUG();
+	}
+}
+
+static int check_ext_kekl(struct tape390_kekl *kekl)
+{
+	if (kekl->type == TAPE390_KEKL_TYPE_NONE)
+		goto invalid;
+	if (kekl->type > TAPE390_KEKL_TYPE_HASH)
+		goto invalid;
+	if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE)
+		goto invalid;
+	if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH)
+		goto invalid;
+	if ((kekl->type == TAPE390_KEKL_TYPE_HASH) &&
+	    (kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL))
+		goto invalid;
+
+	return 0;
+invalid:
+	return -EINVAL;
+}
+
+static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls)
+{
+	if (check_ext_kekl(&kekls->kekl[0]))
+		goto invalid;
+	if (check_ext_kekl(&kekls->kekl[1]))
+		goto invalid;
+
+	return 0;
+invalid:
+	return -EINVAL;
+}
+
+/*
+ * Query KEKLs
+ */
+static int tape_3592_kekl_query(struct tape_device *device,
+				struct tape390_kekl_pair *ext_kekls)
+{
+	struct tape_request *request;
+	struct tape3592_kekl_query_order *order;
+	struct tape3592_kekl_query_data *int_kekls;
+	int rc;
+
+	DBF_EVENT(6, "tape3592_kekl_query\n");
+	int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA);
+	if (!int_kekls)
+		return -ENOMEM;
+	request = tape_alloc_request(2, sizeof(*order));
+	if (IS_ERR(request)) {
+		rc = PTR_ERR(request);
+		goto fail_malloc;
+	}
+	order = request->cpdata;
+	memset(order,0,sizeof(*order));
+	order->code = 0xe2;
+	order->max_count = 2;
+	request->op = TO_KEKL_QUERY;
+	tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
+	tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls),
+		     int_kekls);
+	rc = tape_do_io(device, request);
+	if (rc)
+		goto fail_request;
+	int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls);
+
+	rc = 0;
+fail_request:
+	tape_free_request(request);
+fail_malloc:
+	kfree(int_kekls);
+	return rc;
+}
+
+/*
+ * IOCTL: Query KEKLs
+ */
+static int tape_3592_ioctl_kekl_query(struct tape_device *device,
+				      unsigned long arg)
+{
+	int rc;
+	struct tape390_kekl_pair *ext_kekls;
+
+	DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n");
+	if (!crypt_supported(device))
+		return -ENOSYS;
+	if (!crypt_enabled(device))
+		return -EUNATCH;
+	ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
+	if (!ext_kekls)
+		return -ENOMEM;
+	rc = tape_3592_kekl_query(device, ext_kekls);
+	if (rc != 0)
+		goto fail;
+	if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) {
+		rc = -EFAULT;
+		goto fail;
+	}
+	rc = 0;
+fail:
+	kfree(ext_kekls);
+	return rc;
+}
+
+static int tape_3590_mttell(struct tape_device *device, int mt_count);
+
+/*
+ * Set KEKLs
+ */
+static int tape_3592_kekl_set(struct tape_device *device,
+			      struct tape390_kekl_pair *ext_kekls)
+{
+	struct tape_request *request;
+	struct tape3592_kekl_set_order *order;
+
+	DBF_EVENT(6, "tape3592_kekl_set\n");
+	if (check_ext_kekl_pair(ext_kekls)) {
+		DBF_EVENT(6, "invalid kekls\n");
+		return -EINVAL;
+	}
+	if (tape_3590_mttell(device, 0) != 0)
+		return -EBADSLT;
+	request = tape_alloc_request(1, sizeof(*order));
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	order = request->cpdata;
+	memset(order, 0, sizeof(*order));
+	order->code = 0xe3;
+	order->kekls.count = 2;
+	ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]);
+	ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]);
+	request->op = TO_KEKL_SET;
+	tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
+
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * IOCTL: Set KEKLs
+ */
+static int tape_3592_ioctl_kekl_set(struct tape_device *device,
+				    unsigned long arg)
+{
+	int rc;
+	struct tape390_kekl_pair *ext_kekls;
+
+	DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n");
+	if (!crypt_supported(device))
+		return -ENOSYS;
+	if (!crypt_enabled(device))
+		return -EUNATCH;
+	ext_kekls = memdup_user((char __user *)arg, sizeof(*ext_kekls));
+	if (IS_ERR(ext_kekls))
+		return PTR_ERR(ext_kekls);
+	rc = tape_3592_kekl_set(device, ext_kekls);
+	kfree(ext_kekls);
+	return rc;
+}
+
+/*
+ * Enable encryption
+ */
+static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device)
+{
+	struct tape_request *request;
+	char *data;
+
+	DBF_EVENT(6, "tape_3592_enable_crypt\n");
+	if (!crypt_supported(device))
+		return ERR_PTR(-ENOSYS);
+	request = tape_alloc_request(2, 72);
+	if (IS_ERR(request))
+		return request;
+	data = request->cpdata;
+	memset(data,0,72);
+
+	data[0]       = 0x05;
+	data[36 + 0]  = 0x03;
+	data[36 + 1]  = 0x03;
+	data[36 + 4]  = 0x40;
+	data[36 + 6]  = 0x01;
+	data[36 + 14] = 0x2f;
+	data[36 + 18] = 0xc3;
+	data[36 + 35] = 0x72;
+	request->op = TO_CRYPT_ON;
+	tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
+	tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
+	return request;
+}
+
+static int tape_3592_enable_crypt(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = __tape_3592_enable_crypt(device);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	return tape_do_io_free(device, request);
+}
+
+static void tape_3592_enable_crypt_async(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = __tape_3592_enable_crypt(device);
+	if (!IS_ERR(request))
+		tape_do_io_async_free(device, request);
+}
+
+/*
+ * Disable encryption
+ */
+static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device)
+{
+	struct tape_request *request;
+	char *data;
+
+	DBF_EVENT(6, "tape_3592_disable_crypt\n");
+	if (!crypt_supported(device))
+		return ERR_PTR(-ENOSYS);
+	request = tape_alloc_request(2, 72);
+	if (IS_ERR(request))
+		return request;
+	data = request->cpdata;
+	memset(data,0,72);
+
+	data[0]       = 0x05;
+	data[36 + 0]  = 0x03;
+	data[36 + 1]  = 0x03;
+	data[36 + 35] = 0x32;
+
+	request->op = TO_CRYPT_OFF;
+	tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
+	tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
+
+	return request;
+}
+
+static int tape_3592_disable_crypt(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = __tape_3592_disable_crypt(device);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	return tape_do_io_free(device, request);
+}
+
+static void tape_3592_disable_crypt_async(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = __tape_3592_disable_crypt(device);
+	if (!IS_ERR(request))
+		tape_do_io_async_free(device, request);
+}
+
+/*
+ * IOCTL: Set encryption status
+ */
+static int tape_3592_ioctl_crypt_set(struct tape_device *device,
+				     unsigned long arg)
+{
+	struct tape390_crypt_info info;
+
+	DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n");
+	if (!crypt_supported(device))
+		return -ENOSYS;
+	if (copy_from_user(&info, (char __user *)arg, sizeof(info)))
+		return -EFAULT;
+	if (info.status & ~TAPE390_CRYPT_ON_MASK)
+		return -EINVAL;
+	if (info.status & TAPE390_CRYPT_ON_MASK)
+		return tape_3592_enable_crypt(device);
+	else
+		return tape_3592_disable_crypt(device);
+}
+
+static int tape_3590_sense_medium(struct tape_device *device);
+
+/*
+ * IOCTL: Query enryption status
+ */
+static int tape_3592_ioctl_crypt_query(struct tape_device *device,
+				       unsigned long arg)
+{
+	DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n");
+	if (!crypt_supported(device))
+		return -ENOSYS;
+	tape_3590_sense_medium(device);
+	if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device),
+		sizeof(TAPE_3590_CRYPT_INFO(device))))
+		return -EFAULT;
+	else
+		return 0;
+}
+
+/*
+ * 3590 IOCTL Overload
+ */
+static int
+tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case TAPE390_DISPLAY: {
+		struct display_struct disp;
+
+		if (copy_from_user(&disp, (char __user *) arg, sizeof(disp)))
+			return -EFAULT;
+
+		return tape_std_display(device, &disp);
+	}
+	case TAPE390_KEKL_SET:
+		return tape_3592_ioctl_kekl_set(device, arg);
+	case TAPE390_KEKL_QUERY:
+		return tape_3592_ioctl_kekl_query(device, arg);
+	case TAPE390_CRYPT_SET:
+		return tape_3592_ioctl_crypt_set(device, arg);
+	case TAPE390_CRYPT_QUERY:
+		return tape_3592_ioctl_crypt_query(device, arg);
+	default:
+		return -EINVAL;	/* no additional ioctls */
+	}
+}
+
+/*
+ * SENSE Medium: Get Sense data about medium state
+ */
+static int tape_3590_sense_medium(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(1, 128);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_MSEN;
+	tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
+	return tape_do_io_free(device, request);
+}
+
+static void tape_3590_sense_medium_async(struct tape_device *device)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(1, 128);
+	if (IS_ERR(request))
+		return;
+	request->op = TO_MSEN;
+	tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
+	tape_do_io_async_free(device, request);
+}
+
+/*
+ * MTTELL: Tell block. Return the number of block relative to current file.
+ */
+static int
+tape_3590_mttell(struct tape_device *device, int mt_count)
+{
+	__u64 block_id;
+	int rc;
+
+	rc = tape_std_read_block_id(device, &block_id);
+	if (rc)
+		return rc;
+	return block_id >> 32;
+}
+
+/*
+ * MTSEEK: seek to the specified block.
+ */
+static int
+tape_3590_mtseek(struct tape_device *device, int count)
+{
+	struct tape_request *request;
+
+	DBF_EVENT(6, "xsee id: %x\n", count);
+	request = tape_alloc_request(3, 4);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_LBL;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	*(__u32 *) request->cpdata = count;
+	tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * Read Opposite Error Recovery Function:
+ * Used, when Read Forward does not work
+ */
+static void
+tape_3590_read_opposite(struct tape_device *device,
+			struct tape_request *request)
+{
+	struct tape_3590_disc_data *data;
+
+	/*
+	 * We have allocated 4 ccws in tape_std_read, so we can now
+	 * transform the request to a read backward, followed by a
+	 * forward space block.
+	 */
+	request->op = TO_RBA;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	data = device->discdata;
+	tape_ccw_cc_idal(request->cpaddr + 1, data->read_back_op,
+			 device->char_data.idal_buf);
+	tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
+	tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
+	DBF_EVENT(6, "xrop ccwg\n");
+}
+
+/*
+ * Read Attention Msg
+ * This should be done after an interrupt with attention bit (0x80)
+ * in device state.
+ *
+ * After a "read attention message" request there are two possible
+ * results:
+ *
+ * 1. A unit check is presented, when attention sense is present (e.g. when
+ * a medium has been unloaded). The attention sense comes then
+ * together with the unit check. The recovery action is either "retry"
+ * (in case there is an attention message pending) or "permanent error".
+ *
+ * 2. The attention msg is written to the "read subsystem data" buffer.
+ * In this case we probably should print it to the console.
+ */
+static void tape_3590_read_attmsg_async(struct tape_device *device)
+{
+	struct tape_request *request;
+	char *buf;
+
+	request = tape_alloc_request(3, 4096);
+	if (IS_ERR(request))
+		return;
+	request->op = TO_READ_ATTMSG;
+	buf = request->cpdata;
+	buf[0] = PREP_RD_SS_DATA;
+	buf[6] = RD_ATTMSG;	/* read att msg */
+	tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf);
+	tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+	tape_do_io_async_free(device, request);
+}
+
+/*
+ * These functions are used to schedule follow-up actions from within an
+ * interrupt context (like unsolicited interrupts).
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
+ */
+struct work_handler_data {
+	struct tape_device *device;
+	enum tape_op        op;
+	struct work_struct  work;
+};
+
+static void
+tape_3590_work_handler(struct work_struct *work)
+{
+	struct work_handler_data *p =
+		container_of(work, struct work_handler_data, work);
+
+	switch (p->op) {
+	case TO_MSEN:
+		tape_3590_sense_medium_async(p->device);
+		break;
+	case TO_READ_ATTMSG:
+		tape_3590_read_attmsg_async(p->device);
+		break;
+	case TO_CRYPT_ON:
+		tape_3592_enable_crypt_async(p->device);
+		break;
+	case TO_CRYPT_OFF:
+		tape_3592_disable_crypt_async(p->device);
+		break;
+	default:
+		DBF_EVENT(3, "T3590: work handler undefined for "
+			  "operation 0x%02x\n", p->op);
+	}
+	tape_put_device(p->device);
+	kfree(p);
+}
+
+static int
+tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
+{
+	struct work_handler_data *p;
+
+	if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
+		return -ENOMEM;
+
+	INIT_WORK(&p->work, tape_3590_work_handler);
+
+	p->device = tape_get_device(device);
+	p->op = op;
+
+	queue_work(tape_3590_wq, &p->work);
+	return 0;
+}
+
+static void tape_3590_med_state_set(struct tape_device *device,
+				    struct tape_3590_med_sense *sense)
+{
+	struct tape390_crypt_info *c_info;
+
+	c_info = &TAPE_3590_CRYPT_INFO(device);
+
+	DBF_EVENT(6, "medium state: %x:%x\n", sense->macst, sense->masst);
+	switch (sense->macst) {
+	case 0x04:
+	case 0x05:
+	case 0x06:
+		tape_med_state_set(device, MS_UNLOADED);
+		TAPE_3590_CRYPT_INFO(device).medium_status = 0;
+		return;
+	case 0x08:
+	case 0x09:
+		tape_med_state_set(device, MS_LOADED);
+		break;
+	default:
+		tape_med_state_set(device, MS_UNKNOWN);
+		return;
+	}
+	c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK;
+	if (sense->flags & MSENSE_CRYPT_MASK) {
+		DBF_EVENT(6, "Medium is encrypted (%04x)\n", sense->flags);
+		c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK;
+	} else	{
+		DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags);
+		c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK;
+	}
+}
+
+/*
+ * The done handler is called at device/channel end and wakes up the sleeping
+ * process
+ */
+static int
+tape_3590_done(struct tape_device *device, struct tape_request *request)
+{
+
+	DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
+
+	switch (request->op) {
+	case TO_BSB:
+	case TO_BSF:
+	case TO_DSE:
+	case TO_FSB:
+	case TO_FSF:
+	case TO_LBL:
+	case TO_RFO:
+	case TO_RBA:
+	case TO_REW:
+	case TO_WRI:
+	case TO_WTM:
+	case TO_BLOCK:
+	case TO_LOAD:
+		tape_med_state_set(device, MS_LOADED);
+		break;
+	case TO_RUN:
+		tape_med_state_set(device, MS_UNLOADED);
+		tape_3590_schedule_work(device, TO_CRYPT_OFF);
+		break;
+	case TO_MSEN:
+		tape_3590_med_state_set(device, request->cpdata);
+		break;
+	case TO_CRYPT_ON:
+		TAPE_3590_CRYPT_INFO(device).status
+			|= TAPE390_CRYPT_ON_MASK;
+		*(device->modeset_byte) |= 0x03;
+		break;
+	case TO_CRYPT_OFF:
+		TAPE_3590_CRYPT_INFO(device).status
+			&= ~TAPE390_CRYPT_ON_MASK;
+		*(device->modeset_byte) &= ~0x03;
+		break;
+	case TO_RBI:	/* RBI seems to succeed even without medium loaded. */
+	case TO_NOP:	/* Same to NOP. */
+	case TO_READ_CONFIG:
+	case TO_READ_ATTMSG:
+	case TO_DIS:
+	case TO_ASSIGN:
+	case TO_UNASSIGN:
+	case TO_SIZE:
+	case TO_KEKL_SET:
+	case TO_KEKL_QUERY:
+	case TO_RDC:
+		break;
+	}
+	return TAPE_IO_SUCCESS;
+}
+
+/*
+ * This function is called, when error recovery was successful
+ */
+static inline int
+tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request)
+{
+	DBF_EVENT(3, "Error Recovery successful for %s\n",
+		  tape_op_verbose[request->op]);
+	return tape_3590_done(device, request);
+}
+
+/*
+ * This function is called, when error recovery was not successful
+ */
+static inline int
+tape_3590_erp_failed(struct tape_device *device, struct tape_request *request,
+		     struct irb *irb, int rc)
+{
+	DBF_EVENT(3, "Error Recovery failed for %s\n",
+		  tape_op_verbose[request->op]);
+	tape_dump_sense_dbf(device, request, irb);
+	return rc;
+}
+
+/*
+ * Error Recovery do retry
+ */
+static inline int
+tape_3590_erp_retry(struct tape_device *device, struct tape_request *request,
+		    struct irb *irb)
+{
+	DBF_EVENT(2, "Retry: %s\n", tape_op_verbose[request->op]);
+	tape_dump_sense_dbf(device, request, irb);
+	return TAPE_IO_RETRY;
+}
+
+/*
+ * Handle unsolicited interrupts
+ */
+static int
+tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb)
+{
+	if (irb->scsw.cmd.dstat == DEV_STAT_CHN_END)
+		/* Probably result of halt ssch */
+		return TAPE_IO_PENDING;
+	else if (irb->scsw.cmd.dstat == 0x85)
+		/* Device Ready */
+		DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id);
+	else if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		tape_3590_schedule_work(device, TO_READ_ATTMSG);
+	} else {
+		DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
+		tape_dump_sense_dbf(device, NULL, irb);
+	}
+	/* check medium state */
+	tape_3590_schedule_work(device, TO_MSEN);
+	return TAPE_IO_SUCCESS;
+}
+
+/*
+ * Basic Recovery routine
+ */
+static int
+tape_3590_erp_basic(struct tape_device *device, struct tape_request *request,
+		    struct irb *irb, int rc)
+{
+	struct tape_3590_sense *sense;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+
+	switch (sense->bra) {
+	case SENSE_BRA_PER:
+		return tape_3590_erp_failed(device, request, irb, rc);
+	case SENSE_BRA_CONT:
+		return tape_3590_erp_succeded(device, request);
+	case SENSE_BRA_RE:
+		return tape_3590_erp_retry(device, request, irb);
+	case SENSE_BRA_DRE:
+		return tape_3590_erp_failed(device, request, irb, rc);
+	default:
+		BUG();
+		return TAPE_IO_STOP;
+	}
+}
+
+/*
+ *  RDL: Read Device (buffered) log
+ */
+static int
+tape_3590_erp_read_buf_log(struct tape_device *device,
+			   struct tape_request *request, struct irb *irb)
+{
+	/*
+	 * We just do the basic error recovery at the moment (retry).
+	 * Perhaps in the future, we read the log and dump it somewhere...
+	 */
+	return tape_3590_erp_basic(device, request, irb, -EIO);
+}
+
+/*
+ *  SWAP: Swap Devices
+ */
+static int
+tape_3590_erp_swap(struct tape_device *device, struct tape_request *request,
+		   struct irb *irb)
+{
+	/*
+	 * This error recovery should swap the tapes
+	 * if the original has a problem. The operation
+	 * should proceed with the new tape... this
+	 * should probably be done in user space!
+	 */
+	dev_warn (&device->cdev->dev, "The tape medium must be loaded into a "
+		"different tape unit\n");
+	return tape_3590_erp_basic(device, request, irb, -EIO);
+}
+
+/*
+ *  LBY: Long Busy
+ */
+static int
+tape_3590_erp_long_busy(struct tape_device *device,
+			struct tape_request *request, struct irb *irb)
+{
+	DBF_EVENT(6, "Device is busy\n");
+	return TAPE_IO_LONG_BUSY;
+}
+
+/*
+ *  SPI: Special Intercept
+ */
+static int
+tape_3590_erp_special_interrupt(struct tape_device *device,
+				struct tape_request *request, struct irb *irb)
+{
+	return tape_3590_erp_basic(device, request, irb, -EIO);
+}
+
+/*
+ *  RDA: Read Alternate
+ */
+static int
+tape_3590_erp_read_alternate(struct tape_device *device,
+			     struct tape_request *request, struct irb *irb)
+{
+	struct tape_3590_disc_data *data;
+
+	/*
+	 * The issued Read Backward or Read Previous command is not
+	 * supported by the device
+	 * The recovery action should be to issue another command:
+	 * Read Revious: if Read Backward is not supported
+	 * Read Backward: if Read Previous is not supported
+	 */
+	data = device->discdata;
+	if (data->read_back_op == READ_PREVIOUS) {
+		DBF_EVENT(2, "(%08x): No support for READ_PREVIOUS command\n",
+			  device->cdev_id);
+		data->read_back_op = READ_BACKWARD;
+	} else {
+		DBF_EVENT(2, "(%08x): No support for READ_BACKWARD command\n",
+			  device->cdev_id);
+		data->read_back_op = READ_PREVIOUS;
+	}
+	tape_3590_read_opposite(device, request);
+	return tape_3590_erp_retry(device, request, irb);
+}
+
+/*
+ * Error Recovery read opposite
+ */
+static int
+tape_3590_erp_read_opposite(struct tape_device *device,
+			    struct tape_request *request, struct irb *irb)
+{
+	switch (request->op) {
+	case TO_RFO:
+		/*
+		 * We did read forward, but the data could not be read.
+		 * We will read backward and then skip forward again.
+		 */
+		tape_3590_read_opposite(device, request);
+		return tape_3590_erp_retry(device, request, irb);
+	case TO_RBA:
+		/* We tried to read forward and backward, but hat no success */
+		return tape_3590_erp_failed(device, request, irb, -EIO);
+		break;
+	default:
+		return tape_3590_erp_failed(device, request, irb, -EIO);
+	}
+}
+
+/*
+ * Print an MIM (Media Information  Message) (message code f0)
+ */
+static void
+tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb)
+{
+	struct tape_3590_sense *sense;
+	char *exception, *service;
+
+	exception = kmalloc(BUFSIZE, GFP_ATOMIC);
+	service = kmalloc(BUFSIZE, GFP_ATOMIC);
+
+	if (!exception || !service)
+		goto out_nomem;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+	/* Exception Message */
+	switch (sense->fmt.f70.emc) {
+	case 0x02:
+		snprintf(exception, BUFSIZE, "Data degraded");
+		break;
+	case 0x03:
+		snprintf(exception, BUFSIZE, "Data degraded in partion %i",
+			sense->fmt.f70.mp);
+		break;
+	case 0x04:
+		snprintf(exception, BUFSIZE, "Medium degraded");
+		break;
+	case 0x05:
+		snprintf(exception, BUFSIZE, "Medium degraded in partition %i",
+			sense->fmt.f70.mp);
+		break;
+	case 0x06:
+		snprintf(exception, BUFSIZE, "Block 0 Error");
+		break;
+	case 0x07:
+		snprintf(exception, BUFSIZE, "Medium Exception 0x%02x",
+			sense->fmt.f70.md);
+		break;
+	default:
+		snprintf(exception, BUFSIZE, "0x%02x",
+			sense->fmt.f70.emc);
+		break;
+	}
+	/* Service Message */
+	switch (sense->fmt.f70.smc) {
+	case 0x02:
+		snprintf(service, BUFSIZE, "Reference Media maintenance "
+			"procedure %i", sense->fmt.f70.md);
+		break;
+	default:
+		snprintf(service, BUFSIZE, "0x%02x",
+			sense->fmt.f70.smc);
+		break;
+	}
+
+	dev_warn (&device->cdev->dev, "Tape media information: exception %s, "
+		"service %s\n", exception, service);
+
+out_nomem:
+	kfree(exception);
+	kfree(service);
+}
+
+/*
+ * Print an I/O Subsystem Service Information Message (message code f1)
+ */
+static void
+tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb)
+{
+	struct tape_3590_sense *sense;
+	char *exception, *service;
+
+	exception = kmalloc(BUFSIZE, GFP_ATOMIC);
+	service = kmalloc(BUFSIZE, GFP_ATOMIC);
+
+	if (!exception || !service)
+		goto out_nomem;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+	/* Exception Message */
+	switch (sense->fmt.f71.emc) {
+	case 0x01:
+		snprintf(exception, BUFSIZE, "Effect of failure is unknown");
+		break;
+	case 0x02:
+		snprintf(exception, BUFSIZE, "CU Exception - no performance "
+			"impact");
+		break;
+	case 0x03:
+		snprintf(exception, BUFSIZE, "CU Exception on channel "
+			"interface 0x%02x", sense->fmt.f71.md[0]);
+		break;
+	case 0x04:
+		snprintf(exception, BUFSIZE, "CU Exception on device path "
+			"0x%02x", sense->fmt.f71.md[0]);
+		break;
+	case 0x05:
+		snprintf(exception, BUFSIZE, "CU Exception on library path "
+			"0x%02x", sense->fmt.f71.md[0]);
+		break;
+	case 0x06:
+		snprintf(exception, BUFSIZE, "CU Exception on node 0x%02x",
+			sense->fmt.f71.md[0]);
+		break;
+	case 0x07:
+		snprintf(exception, BUFSIZE, "CU Exception on partition "
+			"0x%02x", sense->fmt.f71.md[0]);
+		break;
+	default:
+		snprintf(exception, BUFSIZE, "0x%02x",
+			sense->fmt.f71.emc);
+	}
+	/* Service Message */
+	switch (sense->fmt.f71.smc) {
+	case 0x01:
+		snprintf(service, BUFSIZE, "Repair impact is unknown");
+		break;
+	case 0x02:
+		snprintf(service, BUFSIZE, "Repair will not impact cu "
+			"performance");
+		break;
+	case 0x03:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable node "
+				"0x%x on CU", sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"nodes (0x%x-0x%x) on CU", sense->fmt.f71.md[1],
+				sense->fmt.f71.md[2]);
+		break;
+	case 0x04:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"channel path 0x%x on CU",
+				sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable channel"
+				" paths (0x%x-0x%x) on CU",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x05:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable device"
+				" path 0x%x on CU", sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable device"
+				" paths (0x%x-0x%x) on CU",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x06:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"library path 0x%x on CU",
+				sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"library paths (0x%x-0x%x) on CU",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x07:
+		snprintf(service, BUFSIZE, "Repair will disable access to CU");
+		break;
+	default:
+		snprintf(service, BUFSIZE, "0x%02x",
+			sense->fmt.f71.smc);
+	}
+
+	dev_warn (&device->cdev->dev, "I/O subsystem information: exception"
+		" %s, service %s\n", exception, service);
+out_nomem:
+	kfree(exception);
+	kfree(service);
+}
+
+/*
+ * Print an Device Subsystem Service Information Message (message code f2)
+ */
+static void
+tape_3590_print_dev_sim_msg_f2(struct tape_device *device, struct irb *irb)
+{
+	struct tape_3590_sense *sense;
+	char *exception, *service;
+
+	exception = kmalloc(BUFSIZE, GFP_ATOMIC);
+	service = kmalloc(BUFSIZE, GFP_ATOMIC);
+
+	if (!exception || !service)
+		goto out_nomem;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+	/* Exception Message */
+	switch (sense->fmt.f71.emc) {
+	case 0x01:
+		snprintf(exception, BUFSIZE, "Effect of failure is unknown");
+		break;
+	case 0x02:
+		snprintf(exception, BUFSIZE, "DV Exception - no performance"
+			" impact");
+		break;
+	case 0x03:
+		snprintf(exception, BUFSIZE, "DV Exception on channel "
+			"interface 0x%02x", sense->fmt.f71.md[0]);
+		break;
+	case 0x04:
+		snprintf(exception, BUFSIZE, "DV Exception on loader 0x%02x",
+			sense->fmt.f71.md[0]);
+		break;
+	case 0x05:
+		snprintf(exception, BUFSIZE, "DV Exception on message display"
+			" 0x%02x", sense->fmt.f71.md[0]);
+		break;
+	case 0x06:
+		snprintf(exception, BUFSIZE, "DV Exception in tape path");
+		break;
+	case 0x07:
+		snprintf(exception, BUFSIZE, "DV Exception in drive");
+		break;
+	default:
+		snprintf(exception, BUFSIZE, "0x%02x",
+			sense->fmt.f71.emc);
+	}
+	/* Service Message */
+	switch (sense->fmt.f71.smc) {
+	case 0x01:
+		snprintf(service, BUFSIZE, "Repair impact is unknown");
+		break;
+	case 0x02:
+		snprintf(service, BUFSIZE, "Repair will not impact device "
+			"performance");
+		break;
+	case 0x03:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"channel path 0x%x on DV",
+				sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"channel path (0x%x-0x%x) on DV",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x04:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"interface 0x%x on DV", sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"interfaces (0x%x-0x%x) on DV",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x05:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable loader"
+				" 0x%x on DV", sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable loader"
+				" (0x%x-0x%x) on DV",
+				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x07:
+		snprintf(service, BUFSIZE, "Repair will disable access to DV");
+		break;
+	case 0x08:
+		if (sense->fmt.f71.mdf == 0)
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"message display 0x%x on DV",
+				sense->fmt.f71.md[1]);
+		else
+			snprintf(service, BUFSIZE, "Repair will disable "
+				"message displays (0x%x-0x%x) on DV",
+				 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
+		break;
+	case 0x09:
+		snprintf(service, BUFSIZE, "Clean DV");
+		break;
+	default:
+		snprintf(service, BUFSIZE, "0x%02x",
+			sense->fmt.f71.smc);
+	}
+
+	dev_warn (&device->cdev->dev, "Device subsystem information: exception"
+		" %s, service %s\n", exception, service);
+out_nomem:
+	kfree(exception);
+	kfree(service);
+}
+
+/*
+ * Print standard ERA Message
+ */
+static void
+tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
+{
+	struct tape_3590_sense *sense;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+	if (sense->mc == 0)
+		return;
+	if ((sense->mc > 0) && (sense->mc < TAPE_3590_MAX_MSG)) {
+		if (tape_3590_msg[sense->mc] != NULL)
+			dev_warn (&device->cdev->dev, "The tape unit has "
+				"issued sense message %s\n",
+				tape_3590_msg[sense->mc]);
+		else
+			dev_warn (&device->cdev->dev, "The tape unit has "
+				"issued an unknown sense message code 0x%x\n",
+				sense->mc);
+		return;
+	}
+	if (sense->mc == 0xf0) {
+		/* Standard Media Information Message */
+		dev_warn (&device->cdev->dev, "MIM SEV=%i, MC=%02x, ES=%x/%x, "
+			"RC=%02x-%04x-%02x\n", sense->fmt.f70.sev, sense->mc,
+			sense->fmt.f70.emc, sense->fmt.f70.smc,
+			sense->fmt.f70.refcode, sense->fmt.f70.mid,
+			sense->fmt.f70.fid);
+		tape_3590_print_mim_msg_f0(device, irb);
+		return;
+	}
+	if (sense->mc == 0xf1) {
+		/* Standard I/O Subsystem Service Information Message */
+		dev_warn (&device->cdev->dev, "IOSIM SEV=%i, DEVTYPE=3590/%02x,"
+			" MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
+			sense->fmt.f71.sev, device->cdev->id.dev_model,
+			sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc,
+			sense->fmt.f71.refcode1, sense->fmt.f71.refcode2,
+			sense->fmt.f71.refcode3);
+		tape_3590_print_io_sim_msg_f1(device, irb);
+		return;
+	}
+	if (sense->mc == 0xf2) {
+		/* Standard Device Service Information Message */
+		dev_warn (&device->cdev->dev, "DEVSIM SEV=%i, DEVTYPE=3590/%02x"
+			", MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
+			sense->fmt.f71.sev, device->cdev->id.dev_model,
+			sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc,
+			sense->fmt.f71.refcode1, sense->fmt.f71.refcode2,
+			sense->fmt.f71.refcode3);
+		tape_3590_print_dev_sim_msg_f2(device, irb);
+		return;
+	}
+	if (sense->mc == 0xf3) {
+		/* Standard Library Service Information Message */
+		return;
+	}
+	dev_warn (&device->cdev->dev, "The tape unit has issued an unknown "
+		"sense message code %x\n", sense->mc);
+}
+
+static int tape_3590_crypt_error(struct tape_device *device,
+				 struct tape_request *request, struct irb *irb)
+{
+	u8 cu_rc;
+	u16 ekm_rc2;
+	char *sense;
+
+	sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data;
+	cu_rc = sense[0];
+	ekm_rc2 = *((u16*) &sense[10]);
+	if ((cu_rc == 0) && (ekm_rc2 == 0xee31))
+		/* key not defined on EKM */
+		return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED);
+	if ((cu_rc == 1) || (cu_rc == 2))
+		/* No connection to EKM */
+		return tape_3590_erp_basic(device, request, irb, -ENOTCONN);
+
+	dev_err (&device->cdev->dev, "The tape unit failed to obtain the "
+		"encryption key from EKM\n");
+
+	return tape_3590_erp_basic(device, request, irb, -ENOKEY);
+}
+
+/*
+ *  3590 error Recovery routine:
+ *  If possible, it tries to recover from the error. If this is not possible,
+ *  inform the user about the problem.
+ */
+static int
+tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
+		     struct irb *irb)
+{
+	struct tape_3590_sense *sense;
+
+	sense = (struct tape_3590_sense *) irb->ecw;
+
+	DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc);
+
+	/*
+	 * First check all RC-QRCs where we want to do something special
+	 *   - "break":     basic error recovery is done
+	 *   - "goto out:": just print error message if available
+	 */
+	switch (sense->rc_rqc) {
+
+	case 0x1110:
+		tape_3590_print_era_msg(device, irb);
+		return tape_3590_erp_read_buf_log(device, request, irb);
+
+	case 0x2011:
+		tape_3590_print_era_msg(device, irb);
+		return tape_3590_erp_read_alternate(device, request, irb);
+
+	case 0x2230:
+	case 0x2231:
+		tape_3590_print_era_msg(device, irb);
+		return tape_3590_erp_special_interrupt(device, request, irb);
+	case 0x2240:
+		return tape_3590_crypt_error(device, request, irb);
+
+	case 0x3010:
+		DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n",
+			  device->cdev_id);
+		return tape_3590_erp_basic(device, request, irb, -ENOSPC);
+	case 0x3012:
+		DBF_EVENT(2, "(%08x): Forward at End of Partition\n",
+			  device->cdev_id);
+		return tape_3590_erp_basic(device, request, irb, -ENOSPC);
+	case 0x3020:
+		DBF_EVENT(2, "(%08x): End of Data Mark\n", device->cdev_id);
+		return tape_3590_erp_basic(device, request, irb, -ENOSPC);
+
+	case 0x3122:
+		DBF_EVENT(2, "(%08x): Rewind Unload initiated\n",
+			  device->cdev_id);
+		return tape_3590_erp_basic(device, request, irb, -EIO);
+	case 0x3123:
+		DBF_EVENT(2, "(%08x): Rewind Unload complete\n",
+			  device->cdev_id);
+		tape_med_state_set(device, MS_UNLOADED);
+		tape_3590_schedule_work(device, TO_CRYPT_OFF);
+		return tape_3590_erp_basic(device, request, irb, 0);
+
+	case 0x4010:
+		/*
+		 * print additional msg since default msg
+		 * "device intervention" is not very meaningfull
+		 */
+		tape_med_state_set(device, MS_UNLOADED);
+		tape_3590_schedule_work(device, TO_CRYPT_OFF);
+		return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
+	case 0x4012:		/* Device Long Busy */
+		/* XXX: Also use long busy handling here? */
+		DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id);
+		tape_3590_print_era_msg(device, irb);
+		return tape_3590_erp_basic(device, request, irb, -EBUSY);
+	case 0x4014:
+		DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id);
+		return tape_3590_erp_long_busy(device, request, irb);
+
+	case 0x5010:
+		if (sense->rac == 0xd0) {
+			/* Swap */
+			tape_3590_print_era_msg(device, irb);
+			return tape_3590_erp_swap(device, request, irb);
+		}
+		if (sense->rac == 0x26) {
+			/* Read Opposite */
+			tape_3590_print_era_msg(device, irb);
+			return tape_3590_erp_read_opposite(device, request,
+							   irb);
+		}
+		return tape_3590_erp_basic(device, request, irb, -EIO);
+	case 0x5020:
+	case 0x5021:
+	case 0x5022:
+	case 0x5040:
+	case 0x5041:
+	case 0x5042:
+		tape_3590_print_era_msg(device, irb);
+		return tape_3590_erp_swap(device, request, irb);
+
+	case 0x5110:
+	case 0x5111:
+		return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE);
+
+	case 0x5120:
+	case 0x1120:
+		tape_med_state_set(device, MS_UNLOADED);
+		tape_3590_schedule_work(device, TO_CRYPT_OFF);
+		return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
+
+	case 0x6020:
+		return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE);
+
+	case 0x8011:
+		return tape_3590_erp_basic(device, request, irb, -EPERM);
+	case 0x8013:
+		dev_warn (&device->cdev->dev, "A different host has privileged"
+			" access to the tape unit\n");
+		return tape_3590_erp_basic(device, request, irb, -EPERM);
+	default:
+		return tape_3590_erp_basic(device, request, irb, -EIO);
+	}
+}
+
+/*
+ * 3590 interrupt handler:
+ */
+static int
+tape_3590_irq(struct tape_device *device, struct tape_request *request,
+	      struct irb *irb)
+{
+	if (request == NULL)
+		return tape_3590_unsolicited_irq(device, irb);
+
+	if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
+	    (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
+	    (request->op == TO_WRI)) {
+		/* Write at end of volume */
+		DBF_EVENT(2, "End of volume\n");
+		return tape_3590_erp_failed(device, request, irb, -ENOSPC);
+	}
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+		return tape_3590_unit_check(device, request, irb);
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+		if (irb->scsw.cmd.dstat == DEV_STAT_UNIT_EXCEP) {
+			if (request->op == TO_FSB || request->op == TO_BSB)
+				request->rescnt++;
+			else
+				DBF_EVENT(5, "Unit Exception!\n");
+		}
+
+		return tape_3590_done(device, request);
+	}
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) {
+		DBF_EVENT(2, "channel end\n");
+		return TAPE_IO_PENDING;
+	}
+
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		DBF_EVENT(2, "Unit Attention when busy..\n");
+		return TAPE_IO_PENDING;
+	}
+
+	DBF_EVENT(6, "xunknownirq\n");
+	tape_dump_sense_dbf(device, request, irb);
+	return TAPE_IO_STOP;
+}
+
+
+static int tape_3590_read_dev_chars(struct tape_device *device,
+				    struct tape_3590_rdc_data *rdc_data)
+{
+	int rc;
+	struct tape_request *request;
+
+	request = tape_alloc_request(1, sizeof(*rdc_data));
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_RDC;
+	tape_ccw_end(request->cpaddr, CCW_CMD_RDC, sizeof(*rdc_data),
+		     request->cpdata);
+	rc = tape_do_io(device, request);
+	if (rc == 0)
+		memcpy(rdc_data, request->cpdata, sizeof(*rdc_data));
+	tape_free_request(request);
+	return rc;
+}
+
+/*
+ * Setup device function
+ */
+static int
+tape_3590_setup_device(struct tape_device *device)
+{
+	int rc;
+	struct tape_3590_disc_data *data;
+	struct tape_3590_rdc_data *rdc_data;
+
+	DBF_EVENT(6, "3590 device setup\n");
+	data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA);
+	if (data == NULL)
+		return -ENOMEM;
+	data->read_back_op = READ_PREVIOUS;
+	device->discdata = data;
+
+	rdc_data = kmalloc(sizeof(*rdc_data), GFP_KERNEL | GFP_DMA);
+	if (!rdc_data) {
+		rc = -ENOMEM;
+		goto fail_kmalloc;
+	}
+	rc = tape_3590_read_dev_chars(device, rdc_data);
+	if (rc) {
+		DBF_LH(3, "Read device characteristics failed!\n");
+		goto fail_rdc_data;
+	}
+	rc = tape_std_assign(device);
+	if (rc)
+		goto fail_rdc_data;
+	if (rdc_data->data[31] == 0x13) {
+		data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK;
+		tape_3592_disable_crypt(device);
+	} else {
+		DBF_EVENT(6, "Device has NO crypto support\n");
+	}
+	/* Try to find out if medium is loaded */
+	rc = tape_3590_sense_medium(device);
+	if (rc) {
+		DBF_LH(3, "3590 medium sense returned %d\n", rc);
+		goto fail_rdc_data;
+	}
+	return 0;
+
+fail_rdc_data:
+	kfree(rdc_data);
+fail_kmalloc:
+	kfree(data);
+	return rc;
+}
+
+/*
+ * Cleanup device function
+ */
+static void
+tape_3590_cleanup_device(struct tape_device *device)
+{
+	flush_workqueue(tape_3590_wq);
+	tape_std_unassign(device);
+
+	kfree(device->discdata);
+	device->discdata = NULL;
+}
+
+/*
+ * List of 3590 magnetic tape commands.
+ */
+static tape_mtop_fn tape_3590_mtop[TAPE_NR_MTOPS] = {
+	[MTRESET]	 = tape_std_mtreset,
+	[MTFSF]		 = tape_std_mtfsf,
+	[MTBSF]		 = tape_std_mtbsf,
+	[MTFSR]		 = tape_std_mtfsr,
+	[MTBSR]		 = tape_std_mtbsr,
+	[MTWEOF]	 = tape_std_mtweof,
+	[MTREW]		 = tape_std_mtrew,
+	[MTOFFL]	 = tape_std_mtoffl,
+	[MTNOP]		 = tape_std_mtnop,
+	[MTRETEN]	 = tape_std_mtreten,
+	[MTBSFM]	 = tape_std_mtbsfm,
+	[MTFSFM]	 = tape_std_mtfsfm,
+	[MTEOM]		 = tape_std_mteom,
+	[MTERASE]	 = tape_std_mterase,
+	[MTRAS1]	 = NULL,
+	[MTRAS2]	 = NULL,
+	[MTRAS3]	 = NULL,
+	[MTSETBLK]	 = tape_std_mtsetblk,
+	[MTSETDENSITY]	 = NULL,
+	[MTSEEK]	 = tape_3590_mtseek,
+	[MTTELL]	 = tape_3590_mttell,
+	[MTSETDRVBUFFER] = NULL,
+	[MTFSS]		 = NULL,
+	[MTBSS]		 = NULL,
+	[MTWSM]		 = NULL,
+	[MTLOCK]	 = NULL,
+	[MTUNLOCK]	 = NULL,
+	[MTLOAD]	 = tape_std_mtload,
+	[MTUNLOAD]	 = tape_std_mtunload,
+	[MTCOMPRESSION]	 = tape_std_mtcompression,
+	[MTSETPART]	 = NULL,
+	[MTMKPART]	 = NULL
+};
+
+/*
+ * Tape discipline structure for 3590.
+ */
+static struct tape_discipline tape_discipline_3590 = {
+	.owner = THIS_MODULE,
+	.setup_device = tape_3590_setup_device,
+	.cleanup_device = tape_3590_cleanup_device,
+	.process_eov = tape_std_process_eov,
+	.irq = tape_3590_irq,
+	.read_block = tape_std_read_block,
+	.write_block = tape_std_write_block,
+	.ioctl_fn = tape_3590_ioctl,
+	.mtop_array = tape_3590_mtop
+};
+
+static struct ccw_device_id tape_3590_ids[] = {
+	{CCW_DEVICE_DEVTYPE(0x3590, 0, 0x3590, 0), .driver_info = tape_3590},
+	{CCW_DEVICE_DEVTYPE(0x3592, 0, 0x3592, 0), .driver_info = tape_3592},
+	{ /* end of list */ }
+};
+
+static int
+tape_3590_online(struct ccw_device *cdev)
+{
+	return tape_generic_online(dev_get_drvdata(&cdev->dev),
+				   &tape_discipline_3590);
+}
+
+static struct ccw_driver tape_3590_driver = {
+	.driver = {
+		.name = "tape_3590",
+		.owner = THIS_MODULE,
+	},
+	.ids = tape_3590_ids,
+	.probe = tape_generic_probe,
+	.remove = tape_generic_remove,
+	.set_offline = tape_generic_offline,
+	.set_online = tape_3590_online,
+	.freeze = tape_generic_pm_suspend,
+	.int_class = IRQIO_TAP,
+};
+
+/*
+ * Setup discipline structure.
+ */
+static int
+tape_3590_init(void)
+{
+	int rc;
+
+	TAPE_DBF_AREA = debug_register("tape_3590", 2, 2, 4 * sizeof(long));
+	debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
+#ifdef DBF_LIKE_HELL
+	debug_set_level(TAPE_DBF_AREA, 6);
+#endif
+
+	DBF_EVENT(3, "3590 init\n");
+
+	tape_3590_wq = alloc_workqueue("tape_3590", 0, 0);
+	if (!tape_3590_wq)
+		return -ENOMEM;
+
+	/* Register driver for 3590 tapes. */
+	rc = ccw_driver_register(&tape_3590_driver);
+	if (rc) {
+		destroy_workqueue(tape_3590_wq);
+		DBF_EVENT(3, "3590 init failed\n");
+	} else
+		DBF_EVENT(3, "3590 registered\n");
+	return rc;
+}
+
+static void
+tape_3590_exit(void)
+{
+	ccw_driver_unregister(&tape_3590_driver);
+	destroy_workqueue(tape_3590_wq);
+	debug_unregister(TAPE_DBF_AREA);
+}
+
+MODULE_DEVICE_TABLE(ccw, tape_3590_ids);
+MODULE_AUTHOR("(C) 2001,2006 IBM Corporation");
+MODULE_DESCRIPTION("Linux on zSeries channel attached 3590 tape device driver");
+MODULE_LICENSE("GPL");
+
+module_init(tape_3590_init);
+module_exit(tape_3590_exit);
diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h
new file mode 100644
index 0000000..b398d8a
--- /dev/null
+++ b/drivers/s390/char/tape_3590.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    tape device discipline for 3590 tapes.
+ *
+ *    Copyright IBM Corp. 2001, 2006
+ *    Author(s): Stefan Bader <shbader@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _TAPE_3590_H
+#define _TAPE_3590_H
+
+#define MEDIUM_SENSE	0xc2
+#define READ_PREVIOUS	0x0a
+#define MODE_SENSE	0xcf
+#define PERFORM_SS_FUNC 0x77
+#define READ_SS_DATA	0x3e
+
+#define PREP_RD_SS_DATA 0x18
+#define RD_ATTMSG	0x3
+
+#define SENSE_BRA_PER  0
+#define SENSE_BRA_CONT 1
+#define SENSE_BRA_RE   2
+#define SENSE_BRA_DRE  3
+
+#define SENSE_FMT_LIBRARY	0x23
+#define SENSE_FMT_UNSOLICITED	0x40
+#define SENSE_FMT_COMMAND_REJ	0x41
+#define SENSE_FMT_COMMAND_EXEC0 0x50
+#define SENSE_FMT_COMMAND_EXEC1 0x51
+#define SENSE_FMT_EVENT0	0x60
+#define SENSE_FMT_EVENT1	0x61
+#define SENSE_FMT_MIM		0x70
+#define SENSE_FMT_SIM		0x71
+
+#define MSENSE_UNASSOCIATED	 0x00
+#define MSENSE_ASSOCIATED_MOUNT	 0x01
+#define MSENSE_ASSOCIATED_UMOUNT 0x02
+#define MSENSE_CRYPT_MASK	 0x00000010
+
+#define TAPE_3590_MAX_MSG	 0xb0
+
+/* Datatypes */
+
+struct tape_3590_disc_data {
+	struct tape390_crypt_info crypt_info;
+	int read_back_op;
+};
+
+#define TAPE_3590_CRYPT_INFO(device) \
+	((struct tape_3590_disc_data*)(device->discdata))->crypt_info
+#define TAPE_3590_READ_BACK_OP(device) \
+	((struct tape_3590_disc_data*)(device->discdata))->read_back_op
+
+struct tape_3590_sense {
+
+	unsigned int command_rej:1;
+	unsigned int interv_req:1;
+	unsigned int bus_out_check:1;
+	unsigned int eq_check:1;
+	unsigned int data_check:1;
+	unsigned int overrun:1;
+	unsigned int def_unit_check:1;
+	unsigned int assgnd_elsew:1;
+
+	unsigned int locate_fail:1;
+	unsigned int inst_online:1;
+	unsigned int reserved:1;
+	unsigned int blk_seq_err:1;
+	unsigned int begin_part:1;
+	unsigned int wr_mode:1;
+	unsigned int wr_prot:1;
+	unsigned int not_cap:1;
+
+	unsigned int bra:2;
+	unsigned int lc:3;
+	unsigned int vlf_active:1;
+	unsigned int stm:1;
+	unsigned int med_pos:1;
+
+	unsigned int rac:8;
+
+	unsigned int rc_rqc:16;
+
+	unsigned int mc:8;
+
+	unsigned int sense_fmt:8;
+
+	union {
+		struct {
+			unsigned int emc:4;
+			unsigned int smc:4;
+			unsigned int sev:2;
+			unsigned int reserved:6;
+			unsigned int md:8;
+			unsigned int refcode:8;
+			unsigned int mid:16;
+			unsigned int mp:16;
+			unsigned char volid[6];
+			unsigned int fid:8;
+		} f70;
+		struct {
+			unsigned int emc:4;
+			unsigned int smc:4;
+			unsigned int sev:2;
+			unsigned int reserved1:5;
+			unsigned int mdf:1;
+			unsigned char md[3];
+			unsigned int simid:8;
+			unsigned int uid:16;
+			unsigned int refcode1:16;
+			unsigned int refcode2:16;
+			unsigned int refcode3:16;
+			unsigned int reserved2:8;
+		} f71;
+		unsigned char data[14];
+	} fmt;
+	unsigned char pad[10];
+
+} __attribute__ ((packed));
+
+struct tape_3590_med_sense {
+	unsigned int macst:4;
+	unsigned int masst:4;
+	char pad1[7];
+	unsigned int flags;
+	char pad2[116];
+} __attribute__ ((packed));
+
+struct tape_3590_rdc_data {
+	char data[64];
+} __attribute__ ((packed));
+
+/* Datastructures for 3592 encryption support */
+
+struct tape3592_kekl {
+	__u8 flags;
+	char label[64];
+} __attribute__ ((packed));
+
+struct tape3592_kekl_pair {
+	__u8 count;
+	struct tape3592_kekl kekl[2];
+} __attribute__ ((packed));
+
+struct tape3592_kekl_query_data {
+	__u16 len;
+	__u8  fmt;
+	__u8  mc;
+	__u32 id;
+	__u8  flags;
+	struct tape3592_kekl_pair kekls;
+	char reserved[116];
+} __attribute__ ((packed));
+
+struct tape3592_kekl_query_order {
+	__u8 code;
+	__u8 flags;
+	char reserved1[2];
+	__u8 max_count;
+	char reserved2[35];
+} __attribute__ ((packed));
+
+struct tape3592_kekl_set_order {
+	__u8 code;
+	__u8 flags;
+	char reserved1[2];
+	__u8 op;
+	struct tape3592_kekl_pair kekls;
+	char reserved2[120];
+} __attribute__ ((packed));
+
+#endif /* _TAPE_3590_H */
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
new file mode 100644
index 0000000..fc206c9
--- /dev/null
+++ b/drivers/s390/char/tape_char.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    character device frontend for tape device driver
+ *
+ *  S390 and zSeries version
+ *    Copyright IBM Corp. 2001, 2006
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/mtio.h>
+#include <linux/compat.h>
+
+#include <linux/uaccess.h>
+
+#define TAPE_DBF_AREA	tape_core_dbf
+
+#include "tape.h"
+#include "tape_std.h"
+#include "tape_class.h"
+
+#define TAPECHAR_MAJOR		0	/* get dynamic major */
+
+/*
+ * file operation structure for tape character frontend
+ */
+static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *);
+static int tapechar_open(struct inode *,struct file *);
+static int tapechar_release(struct inode *,struct file *);
+static long tapechar_ioctl(struct file *, unsigned int, unsigned long);
+#ifdef CONFIG_COMPAT
+static long tapechar_compat_ioctl(struct file *, unsigned int, unsigned long);
+#endif
+
+static const struct file_operations tape_fops =
+{
+	.owner = THIS_MODULE,
+	.read = tapechar_read,
+	.write = tapechar_write,
+	.unlocked_ioctl = tapechar_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = tapechar_compat_ioctl,
+#endif
+	.open = tapechar_open,
+	.release = tapechar_release,
+	.llseek = no_llseek,
+};
+
+static int tapechar_major = TAPECHAR_MAJOR;
+
+/*
+ * This function is called for every new tapedevice
+ */
+int
+tapechar_setup_device(struct tape_device * device)
+{
+	char	device_name[20];
+
+	sprintf(device_name, "ntibm%i", device->first_minor / 2);
+	device->nt = register_tape_dev(
+		&device->cdev->dev,
+		MKDEV(tapechar_major, device->first_minor),
+		&tape_fops,
+		device_name,
+		"non-rewinding"
+	);
+	device_name[0] = 'r';
+	device->rt = register_tape_dev(
+		&device->cdev->dev,
+		MKDEV(tapechar_major, device->first_minor + 1),
+		&tape_fops,
+		device_name,
+		"rewinding"
+	);
+
+	return 0;
+}
+
+void
+tapechar_cleanup_device(struct tape_device *device)
+{
+	unregister_tape_dev(&device->cdev->dev, device->rt);
+	device->rt = NULL;
+	unregister_tape_dev(&device->cdev->dev, device->nt);
+	device->nt = NULL;
+}
+
+static int
+tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
+{
+	struct idal_buffer *new;
+
+	if (device->char_data.idal_buf != NULL &&
+	    device->char_data.idal_buf->size == block_size)
+		return 0;
+
+	if (block_size > MAX_BLOCKSIZE) {
+		DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n",
+			block_size, MAX_BLOCKSIZE);
+		return -EINVAL;
+	}
+
+	/* The current idal buffer is not correct. Allocate a new one. */
+	new = idal_buffer_alloc(block_size, 0);
+	if (IS_ERR(new))
+		return -ENOMEM;
+
+	if (device->char_data.idal_buf != NULL)
+		idal_buffer_free(device->char_data.idal_buf);
+
+	device->char_data.idal_buf = new;
+
+	return 0;
+}
+
+/*
+ * Tape device read function
+ */
+static ssize_t
+tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
+{
+	struct tape_device *device;
+	struct tape_request *request;
+	size_t block_size;
+	int rc;
+
+	DBF_EVENT(6, "TCHAR:read\n");
+	device = (struct tape_device *) filp->private_data;
+
+	/*
+	 * If the tape isn't terminated yet, do it now. And since we then
+	 * are at the end of the tape there wouldn't be anything to read
+	 * anyways. So we return immediately.
+	 */
+	if(device->required_tapemarks) {
+		return tape_std_terminate_write(device);
+	}
+
+	/* Find out block size to use */
+	if (device->char_data.block_size != 0) {
+		if (count < device->char_data.block_size) {
+			DBF_EVENT(3, "TCHAR:read smaller than block "
+				  "size was requested\n");
+			return -EINVAL;
+		}
+		block_size = device->char_data.block_size;
+	} else {
+		block_size = count;
+	}
+
+	rc = tapechar_check_idalbuffer(device, block_size);
+	if (rc)
+		return rc;
+
+	DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size);
+	/* Let the discipline build the ccw chain. */
+	request = device->discipline->read_block(device, block_size);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	/* Execute it. */
+	rc = tape_do_io(device, request);
+	if (rc == 0) {
+		rc = block_size - request->rescnt;
+		DBF_EVENT(6, "TCHAR:rbytes:  %x\n", rc);
+		/* Copy data from idal buffer to user space. */
+		if (idal_buffer_to_user(device->char_data.idal_buf,
+					data, rc) != 0)
+			rc = -EFAULT;
+	}
+	tape_free_request(request);
+	return rc;
+}
+
+/*
+ * Tape device write function
+ */
+static ssize_t
+tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos)
+{
+	struct tape_device *device;
+	struct tape_request *request;
+	size_t block_size;
+	size_t written;
+	int nblocks;
+	int i, rc;
+
+	DBF_EVENT(6, "TCHAR:write\n");
+	device = (struct tape_device *) filp->private_data;
+	/* Find out block size and number of blocks */
+	if (device->char_data.block_size != 0) {
+		if (count < device->char_data.block_size) {
+			DBF_EVENT(3, "TCHAR:write smaller than block "
+				  "size was requested\n");
+			return -EINVAL;
+		}
+		block_size = device->char_data.block_size;
+		nblocks = count / block_size;
+	} else {
+		block_size = count;
+		nblocks = 1;
+	}
+
+	rc = tapechar_check_idalbuffer(device, block_size);
+	if (rc)
+		return rc;
+
+	DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size);
+	DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks);
+	/* Let the discipline build the ccw chain. */
+	request = device->discipline->write_block(device, block_size);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	rc = 0;
+	written = 0;
+	for (i = 0; i < nblocks; i++) {
+		/* Copy data from user space to idal buffer. */
+		if (idal_buffer_from_user(device->char_data.idal_buf,
+					  data, block_size)) {
+			rc = -EFAULT;
+			break;
+		}
+		rc = tape_do_io(device, request);
+		if (rc)
+			break;
+		DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
+			  block_size - request->rescnt);
+		written += block_size - request->rescnt;
+		if (request->rescnt != 0)
+			break;
+		data += block_size;
+	}
+	tape_free_request(request);
+	if (rc == -ENOSPC) {
+		/*
+		 * Ok, the device has no more space. It has NOT written
+		 * the block.
+		 */
+		if (device->discipline->process_eov)
+			device->discipline->process_eov(device);
+		if (written > 0)
+			rc = 0;
+
+	}
+
+	/*
+	 * After doing a write we always need two tapemarks to correctly
+	 * terminate the tape (one to terminate the file, the second to
+	 * flag the end of recorded data.
+	 * Since process_eov positions the tape in front of the written
+	 * tapemark it doesn't hurt to write two marks again.
+	 */
+	if (!rc)
+		device->required_tapemarks = 2;
+
+	return rc ? rc : written;
+}
+
+/*
+ * Character frontend tape device open function.
+ */
+static int
+tapechar_open (struct inode *inode, struct file *filp)
+{
+	struct tape_device *device;
+	int minor, rc;
+
+	DBF_EVENT(6, "TCHAR:open: %i:%i\n",
+		imajor(file_inode(filp)),
+		iminor(file_inode(filp)));
+
+	if (imajor(file_inode(filp)) != tapechar_major)
+		return -ENODEV;
+
+	minor = iminor(file_inode(filp));
+	device = tape_find_device(minor / TAPE_MINORS_PER_DEV);
+	if (IS_ERR(device)) {
+		DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n");
+		return PTR_ERR(device);
+	}
+
+	rc = tape_open(device);
+	if (rc == 0) {
+		filp->private_data = device;
+		nonseekable_open(inode, filp);
+	} else
+		tape_put_device(device);
+
+	return rc;
+}
+
+/*
+ * Character frontend tape device release function.
+ */
+
+static int
+tapechar_release(struct inode *inode, struct file *filp)
+{
+	struct tape_device *device;
+
+	DBF_EVENT(6, "TCHAR:release: %x\n", iminor(inode));
+	device = (struct tape_device *) filp->private_data;
+
+	/*
+	 * If this is the rewinding tape minor then rewind. In that case we
+	 * write all required tapemarks. Otherwise only one to terminate the
+	 * file.
+	 */
+	if ((iminor(inode) & 1) != 0) {
+		if (device->required_tapemarks)
+			tape_std_terminate_write(device);
+		tape_mtop(device, MTREW, 1);
+	} else {
+		if (device->required_tapemarks > 1) {
+			if (tape_mtop(device, MTWEOF, 1) == 0)
+				device->required_tapemarks--;
+		}
+	}
+
+	if (device->char_data.idal_buf != NULL) {
+		idal_buffer_free(device->char_data.idal_buf);
+		device->char_data.idal_buf = NULL;
+	}
+	tape_release(device);
+	filp->private_data = NULL;
+	tape_put_device(device);
+
+	return 0;
+}
+
+/*
+ * Tape device io controls.
+ */
+static int
+__tapechar_ioctl(struct tape_device *device,
+		 unsigned int no, unsigned long data)
+{
+	int rc;
+
+	if (no == MTIOCTOP) {
+		struct mtop op;
+
+		if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0)
+			return -EFAULT;
+		if (op.mt_count < 0)
+			return -EINVAL;
+
+		/*
+		 * Operations that change tape position should write final
+		 * tapemarks.
+		 */
+		switch (op.mt_op) {
+			case MTFSF:
+			case MTBSF:
+			case MTFSR:
+			case MTBSR:
+			case MTREW:
+			case MTOFFL:
+			case MTEOM:
+			case MTRETEN:
+			case MTBSFM:
+			case MTFSFM:
+			case MTSEEK:
+				if (device->required_tapemarks)
+					tape_std_terminate_write(device);
+			default:
+				;
+		}
+		rc = tape_mtop(device, op.mt_op, op.mt_count);
+
+		if (op.mt_op == MTWEOF && rc == 0) {
+			if (op.mt_count > device->required_tapemarks)
+				device->required_tapemarks = 0;
+			else
+				device->required_tapemarks -= op.mt_count;
+		}
+		return rc;
+	}
+	if (no == MTIOCPOS) {
+		/* MTIOCPOS: query the tape position. */
+		struct mtpos pos;
+
+		rc = tape_mtop(device, MTTELL, 1);
+		if (rc < 0)
+			return rc;
+		pos.mt_blkno = rc;
+		if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0)
+			return -EFAULT;
+		return 0;
+	}
+	if (no == MTIOCGET) {
+		/* MTIOCGET: query the tape drive status. */
+		struct mtget get;
+
+		memset(&get, 0, sizeof(get));
+		get.mt_type = MT_ISUNKNOWN;
+		get.mt_resid = 0 /* device->devstat.rescnt */;
+		get.mt_dsreg =
+			((device->char_data.block_size << MT_ST_BLKSIZE_SHIFT)
+			 & MT_ST_BLKSIZE_MASK);
+		/* FIXME: mt_gstat, mt_erreg, mt_fileno */
+		get.mt_gstat = 0;
+		get.mt_erreg = 0;
+		get.mt_fileno = 0;
+		get.mt_gstat  = device->tape_generic_status;
+
+		if (device->medium_state == MS_LOADED) {
+			rc = tape_mtop(device, MTTELL, 1);
+
+			if (rc < 0)
+				return rc;
+
+			if (rc == 0)
+				get.mt_gstat |= GMT_BOT(~0);
+
+			get.mt_blkno = rc;
+		}
+
+		if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0)
+			return -EFAULT;
+
+		return 0;
+	}
+	/* Try the discipline ioctl function. */
+	if (device->discipline->ioctl_fn == NULL)
+		return -EINVAL;
+	return device->discipline->ioctl_fn(device, no, data);
+}
+
+static long
+tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data)
+{
+	struct tape_device *device;
+	long rc;
+
+	DBF_EVENT(6, "TCHAR:ioct\n");
+
+	device = (struct tape_device *) filp->private_data;
+	mutex_lock(&device->mutex);
+	rc = __tapechar_ioctl(device, no, data);
+	mutex_unlock(&device->mutex);
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long
+tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
+{
+	struct tape_device *device = filp->private_data;
+	int rval = -ENOIOCTLCMD;
+	unsigned long argp;
+
+	/* The 'arg' argument of any ioctl function may only be used for
+	 * pointers because of the compat pointer conversion.
+	 * Consider this when adding new ioctls.
+	 */
+	argp = (unsigned long) compat_ptr(data);
+	if (device->discipline->ioctl_fn) {
+		mutex_lock(&device->mutex);
+		rval = device->discipline->ioctl_fn(device, no, argp);
+		mutex_unlock(&device->mutex);
+		if (rval == -EINVAL)
+			rval = -ENOIOCTLCMD;
+	}
+
+	return rval;
+}
+#endif /* CONFIG_COMPAT */
+
+/*
+ * Initialize character device frontend.
+ */
+int
+tapechar_init (void)
+{
+	dev_t	dev;
+
+	if (alloc_chrdev_region(&dev, 0, 256, "tape") != 0)
+		return -1;
+
+	tapechar_major = MAJOR(dev);
+
+	return 0;
+}
+
+/*
+ * cleanup
+ */
+void
+tapechar_exit(void)
+{
+	unregister_chrdev_region(MKDEV(tapechar_major, 0), 256);
+}
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
new file mode 100644
index 0000000..b58df0d
--- /dev/null
+++ b/drivers/s390/char/tape_class.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2004
+ *
+ * Tape class device support
+ *
+ * Author: Stefan Bader <shbader@de.ibm.com>
+ * Based on simple class device code by Greg K-H
+ */
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/slab.h>
+
+#include "tape_class.h"
+
+MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
+MODULE_DESCRIPTION(
+	"Copyright IBM Corp. 2004   All Rights Reserved.\n"
+	"tape_class.c"
+);
+MODULE_LICENSE("GPL");
+
+static struct class *tape_class;
+
+/*
+ * Register a tape device and return a pointer to the cdev structure.
+ *
+ * device
+ *	The pointer to the struct device of the physical (base) device.
+ * drivername
+ *	The pointer to the drivers name for it's character devices.
+ * dev
+ *	The intended major/minor number. The major number may be 0 to
+ *	get a dynamic major number.
+ * fops
+ *	The pointer to the drivers file operations for the tape device.
+ * devname
+ *	The pointer to the name of the character device.
+ */
+struct tape_class_device *register_tape_dev(
+	struct device *		device,
+	dev_t			dev,
+	const struct file_operations *fops,
+	char *			device_name,
+	char *			mode_name)
+{
+	struct tape_class_device *	tcd;
+	int		rc;
+	char *		s;
+
+	tcd = kzalloc(sizeof(struct tape_class_device), GFP_KERNEL);
+	if (!tcd)
+		return ERR_PTR(-ENOMEM);
+
+	strlcpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
+	for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
+		*s = '!';
+	strlcpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
+	for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
+		*s = '!';
+
+	tcd->char_device = cdev_alloc();
+	if (!tcd->char_device) {
+		rc = -ENOMEM;
+		goto fail_with_tcd;
+	}
+
+	tcd->char_device->owner = fops->owner;
+	tcd->char_device->ops   = fops;
+
+	rc = cdev_add(tcd->char_device, dev, 1);
+	if (rc)
+		goto fail_with_cdev;
+
+	tcd->class_device = device_create(tape_class, device,
+					  tcd->char_device->dev, NULL,
+					  "%s", tcd->device_name);
+	rc = PTR_ERR_OR_ZERO(tcd->class_device);
+	if (rc)
+		goto fail_with_cdev;
+	rc = sysfs_create_link(
+		&device->kobj,
+		&tcd->class_device->kobj,
+		tcd->mode_name
+	);
+	if (rc)
+		goto fail_with_class_device;
+
+	return tcd;
+
+fail_with_class_device:
+	device_destroy(tape_class, tcd->char_device->dev);
+
+fail_with_cdev:
+	cdev_del(tcd->char_device);
+
+fail_with_tcd:
+	kfree(tcd);
+
+	return ERR_PTR(rc);
+}
+EXPORT_SYMBOL(register_tape_dev);
+
+void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
+{
+	if (tcd != NULL && !IS_ERR(tcd)) {
+		sysfs_remove_link(&device->kobj, tcd->mode_name);
+		device_destroy(tape_class, tcd->char_device->dev);
+		cdev_del(tcd->char_device);
+		kfree(tcd);
+	}
+}
+EXPORT_SYMBOL(unregister_tape_dev);
+
+
+static int __init tape_init(void)
+{
+	tape_class = class_create(THIS_MODULE, "tape390");
+
+	return 0;
+}
+
+static void __exit tape_exit(void)
+{
+	class_destroy(tape_class);
+	tape_class = NULL;
+}
+
+postcore_initcall(tape_init);
+module_exit(tape_exit);
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
new file mode 100644
index 0000000..d25ac07
--- /dev/null
+++ b/drivers/s390/char/tape_class.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2004   All Rights Reserved.
+ *
+ * Tape class device support
+ *
+ * Author: Stefan Bader <shbader@de.ibm.com>
+ * Based on simple class device code by Greg K-H
+ */
+#ifndef __TAPE_CLASS_H__
+#define __TAPE_CLASS_H__
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/major.h>
+#include <linux/cdev.h>
+
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+
+#define TAPECLASS_NAME_LEN	32
+
+struct tape_class_device {
+	struct cdev		*char_device;
+	struct device		*class_device;
+	char			device_name[TAPECLASS_NAME_LEN];
+	char			mode_name[TAPECLASS_NAME_LEN];
+};
+
+/*
+ * Register a tape device and return a pointer to the tape class device
+ * created by the call.
+ *
+ * device
+ *	The pointer to the struct device of the physical (base) device.
+ * dev
+ *	The intended major/minor number. The major number may be 0 to
+ *	get a dynamic major number.
+ * fops
+ *	The pointer to the drivers file operations for the tape device.
+ * device_name
+ *	Pointer to the logical device name (will also be used as kobject name
+ *	of the cdev). This can also be called the name of the tape class
+ *	device.
+ * mode_name
+ *	Points to the name of the tape mode. This creates a link with that
+ *	name from the physical device to the logical device (class).
+ */
+struct tape_class_device *register_tape_dev(
+	struct device *		device,
+	dev_t			dev,
+	const struct file_operations *fops,
+	char *			device_name,
+	char *			node_name
+);
+void unregister_tape_dev(struct device *device, struct tape_class_device *tcd);
+
+#endif /* __TAPE_CLASS_H__ */
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
new file mode 100644
index 0000000..8d3370d
--- /dev/null
+++ b/drivers/s390/char/tape_core.c
@@ -0,0 +1,1374 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    basic function of the tape device driver
+ *
+ *  S390 and zSeries version
+ *    Copyright IBM Corp. 2001, 2009
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *		 Stefan Bader <shbader@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>	     // for kernel parameters
+#include <linux/kmod.h>	     // for requesting modules
+#include <linux/spinlock.h>  // for locks
+#include <linux/vmalloc.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <asm/types.h>	     // for variable types
+
+#define TAPE_DBF_AREA	tape_core_dbf
+
+#include "tape.h"
+#include "tape_std.h"
+
+#define LONG_BUSY_TIMEOUT 180 /* seconds */
+
+static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
+static void tape_delayed_next_request(struct work_struct *);
+static void tape_long_busy_timeout(struct timer_list *t);
+
+/*
+ * One list to contain all tape devices of all disciplines, so
+ * we can assign the devices to minor numbers of the same major
+ * The list is protected by the rwlock
+ */
+static LIST_HEAD(tape_device_list);
+static DEFINE_RWLOCK(tape_device_lock);
+
+/*
+ * Pointer to debug area.
+ */
+debug_info_t *TAPE_DBF_AREA = NULL;
+EXPORT_SYMBOL(TAPE_DBF_AREA);
+
+/*
+ * Printable strings for tape enumerations.
+ */
+const char *tape_state_verbose[TS_SIZE] =
+{
+	[TS_UNUSED]   = "UNUSED",
+	[TS_IN_USE]   = "IN_USE",
+	[TS_BLKUSE]   = "BLKUSE",
+	[TS_INIT]     = "INIT  ",
+	[TS_NOT_OPER] = "NOT_OP"
+};
+
+const char *tape_op_verbose[TO_SIZE] =
+{
+	[TO_BLOCK] = "BLK",	[TO_BSB] = "BSB",
+	[TO_BSF] = "BSF",	[TO_DSE] = "DSE",
+	[TO_FSB] = "FSB",	[TO_FSF] = "FSF",
+	[TO_LBL] = "LBL",	[TO_NOP] = "NOP",
+	[TO_RBA] = "RBA",	[TO_RBI] = "RBI",
+	[TO_RFO] = "RFO",	[TO_REW] = "REW",
+	[TO_RUN] = "RUN",	[TO_WRI] = "WRI",
+	[TO_WTM] = "WTM",	[TO_MSEN] = "MSN",
+	[TO_LOAD] = "LOA",	[TO_READ_CONFIG] = "RCF",
+	[TO_READ_ATTMSG] = "RAT",
+	[TO_DIS] = "DIS",	[TO_ASSIGN] = "ASS",
+	[TO_UNASSIGN] = "UAS",  [TO_CRYPT_ON] = "CON",
+	[TO_CRYPT_OFF] = "COF",	[TO_KEKL_SET] = "KLS",
+	[TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC",
+};
+
+static int devid_to_int(struct ccw_dev_id *dev_id)
+{
+	return dev_id->devno + (dev_id->ssid << 16);
+}
+
+/*
+ * Some channel attached tape specific attributes.
+ *
+ * FIXME: In the future the first_minor and blocksize attribute should be
+ *        replaced by a link to the cdev tree.
+ */
+static ssize_t
+tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tape_device *tdev;
+
+	tdev = dev_get_drvdata(dev);
+	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
+}
+
+static
+DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL);
+
+static ssize_t
+tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tape_device *tdev;
+
+	tdev = dev_get_drvdata(dev);
+	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
+}
+
+static
+DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL);
+
+static ssize_t
+tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tape_device *tdev;
+
+	tdev = dev_get_drvdata(dev);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
+		"OFFLINE" : tape_state_verbose[tdev->tape_state]);
+}
+
+static
+DEVICE_ATTR(state, 0444, tape_state_show, NULL);
+
+static ssize_t
+tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tape_device *tdev;
+	ssize_t rc;
+
+	tdev = dev_get_drvdata(dev);
+	if (tdev->first_minor < 0)
+		return scnprintf(buf, PAGE_SIZE, "N/A\n");
+
+	spin_lock_irq(get_ccwdev_lock(tdev->cdev));
+	if (list_empty(&tdev->req_queue))
+		rc = scnprintf(buf, PAGE_SIZE, "---\n");
+	else {
+		struct tape_request *req;
+
+		req = list_entry(tdev->req_queue.next, struct tape_request,
+			list);
+		rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
+	}
+	spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
+	return rc;
+}
+
+static
+DEVICE_ATTR(operation, 0444, tape_operation_show, NULL);
+
+static ssize_t
+tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tape_device *tdev;
+
+	tdev = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
+}
+
+static
+DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL);
+
+static struct attribute *tape_attrs[] = {
+	&dev_attr_medium_state.attr,
+	&dev_attr_first_minor.attr,
+	&dev_attr_state.attr,
+	&dev_attr_operation.attr,
+	&dev_attr_blocksize.attr,
+	NULL
+};
+
+static const struct attribute_group tape_attr_group = {
+	.attrs = tape_attrs,
+};
+
+/*
+ * Tape state functions
+ */
+void
+tape_state_set(struct tape_device *device, enum tape_state newstate)
+{
+	const char *str;
+
+	if (device->tape_state == TS_NOT_OPER) {
+		DBF_EVENT(3, "ts_set err: not oper\n");
+		return;
+	}
+	DBF_EVENT(4, "ts. dev:	%x\n", device->first_minor);
+	DBF_EVENT(4, "old ts:\t\n");
+	if (device->tape_state < TS_SIZE && device->tape_state >=0 )
+		str = tape_state_verbose[device->tape_state];
+	else
+		str = "UNKNOWN TS";
+	DBF_EVENT(4, "%s\n", str);
+	DBF_EVENT(4, "new ts:\t\n");
+	if (newstate < TS_SIZE && newstate >= 0)
+		str = tape_state_verbose[newstate];
+	else
+		str = "UNKNOWN TS";
+	DBF_EVENT(4, "%s\n", str);
+	device->tape_state = newstate;
+	wake_up(&device->state_change_wq);
+}
+
+struct tape_med_state_work_data {
+	struct tape_device *device;
+	enum tape_medium_state state;
+	struct work_struct  work;
+};
+
+static void
+tape_med_state_work_handler(struct work_struct *work)
+{
+	static char env_state_loaded[] = "MEDIUM_STATE=LOADED";
+	static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED";
+	struct tape_med_state_work_data *p =
+		container_of(work, struct tape_med_state_work_data, work);
+	struct tape_device *device = p->device;
+	char *envp[] = { NULL, NULL };
+
+	switch (p->state) {
+	case MS_UNLOADED:
+		pr_info("%s: The tape cartridge has been successfully "
+			"unloaded\n", dev_name(&device->cdev->dev));
+		envp[0] = env_state_unloaded;
+		kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
+		break;
+	case MS_LOADED:
+		pr_info("%s: A tape cartridge has been mounted\n",
+			dev_name(&device->cdev->dev));
+		envp[0] = env_state_loaded;
+		kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
+		break;
+	default:
+		break;
+	}
+	tape_put_device(device);
+	kfree(p);
+}
+
+static void
+tape_med_state_work(struct tape_device *device, enum tape_medium_state state)
+{
+	struct tape_med_state_work_data *p;
+
+	p = kzalloc(sizeof(*p), GFP_ATOMIC);
+	if (p) {
+		INIT_WORK(&p->work, tape_med_state_work_handler);
+		p->device = tape_get_device(device);
+		p->state = state;
+		schedule_work(&p->work);
+	}
+}
+
+void
+tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
+{
+	enum tape_medium_state oldstate;
+
+	oldstate = device->medium_state;
+	if (oldstate == newstate)
+		return;
+	device->medium_state = newstate;
+	switch(newstate){
+	case MS_UNLOADED:
+		device->tape_generic_status |= GMT_DR_OPEN(~0);
+		if (oldstate == MS_LOADED)
+			tape_med_state_work(device, MS_UNLOADED);
+		break;
+	case MS_LOADED:
+		device->tape_generic_status &= ~GMT_DR_OPEN(~0);
+		if (oldstate == MS_UNLOADED)
+			tape_med_state_work(device, MS_LOADED);
+		break;
+	default:
+		break;
+	}
+	wake_up(&device->state_change_wq);
+}
+
+/*
+ * Stop running ccw. Has to be called with the device lock held.
+ */
+static int
+__tape_cancel_io(struct tape_device *device, struct tape_request *request)
+{
+	int retries;
+	int rc;
+
+	/* Check if interrupt has already been processed */
+	if (request->callback == NULL)
+		return 0;
+
+	rc = 0;
+	for (retries = 0; retries < 5; retries++) {
+		rc = ccw_device_clear(device->cdev, (long) request);
+
+		switch (rc) {
+			case 0:
+				request->status	= TAPE_REQUEST_DONE;
+				return 0;
+			case -EBUSY:
+				request->status	= TAPE_REQUEST_CANCEL;
+				schedule_delayed_work(&device->tape_dnr, 0);
+				return 0;
+			case -ENODEV:
+				DBF_EXCEPTION(2, "device gone, retry\n");
+				break;
+			case -EIO:
+				DBF_EXCEPTION(2, "I/O error, retry\n");
+				break;
+			default:
+				BUG();
+		}
+	}
+
+	return rc;
+}
+
+/*
+ * Add device into the sorted list, giving it the first
+ * available minor number.
+ */
+static int
+tape_assign_minor(struct tape_device *device)
+{
+	struct tape_device *tmp;
+	int minor;
+
+	minor = 0;
+	write_lock(&tape_device_lock);
+	list_for_each_entry(tmp, &tape_device_list, node) {
+		if (minor < tmp->first_minor)
+			break;
+		minor += TAPE_MINORS_PER_DEV;
+	}
+	if (minor >= 256) {
+		write_unlock(&tape_device_lock);
+		return -ENODEV;
+	}
+	device->first_minor = minor;
+	list_add_tail(&device->node, &tmp->node);
+	write_unlock(&tape_device_lock);
+	return 0;
+}
+
+/* remove device from the list */
+static void
+tape_remove_minor(struct tape_device *device)
+{
+	write_lock(&tape_device_lock);
+	list_del_init(&device->node);
+	device->first_minor = -1;
+	write_unlock(&tape_device_lock);
+}
+
+/*
+ * Set a device online.
+ *
+ * This function is called by the common I/O layer to move a device from the
+ * detected but offline into the online state.
+ * If we return an error (RC < 0) the device remains in the offline state. This
+ * can happen if the device is assigned somewhere else, for example.
+ */
+int
+tape_generic_online(struct tape_device *device,
+		   struct tape_discipline *discipline)
+{
+	int rc;
+
+	DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline);
+
+	if (device->tape_state != TS_INIT) {
+		DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state);
+		return -EINVAL;
+	}
+
+	timer_setup(&device->lb_timeout, tape_long_busy_timeout, 0);
+
+	/* Let the discipline have a go at the device. */
+	device->discipline = discipline;
+	if (!try_module_get(discipline->owner)) {
+		return -EINVAL;
+	}
+
+	rc = discipline->setup_device(device);
+	if (rc)
+		goto out;
+	rc = tape_assign_minor(device);
+	if (rc)
+		goto out_discipline;
+
+	rc = tapechar_setup_device(device);
+	if (rc)
+		goto out_minor;
+
+	tape_state_set(device, TS_UNUSED);
+
+	DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id);
+
+	return 0;
+
+out_minor:
+	tape_remove_minor(device);
+out_discipline:
+	device->discipline->cleanup_device(device);
+	device->discipline = NULL;
+out:
+	module_put(discipline->owner);
+	return rc;
+}
+
+static void
+tape_cleanup_device(struct tape_device *device)
+{
+	tapechar_cleanup_device(device);
+	device->discipline->cleanup_device(device);
+	module_put(device->discipline->owner);
+	tape_remove_minor(device);
+	tape_med_state_set(device, MS_UNKNOWN);
+}
+
+/*
+ * Suspend device.
+ *
+ * Called by the common I/O layer if the drive should be suspended on user
+ * request. We refuse to suspend if the device is loaded or in use for the
+ * following reason:
+ * While the Linux guest is suspended, it might be logged off which causes
+ * devices to be detached. Tape devices are automatically rewound and unloaded
+ * during DETACH processing (unless the tape device was attached with the
+ * NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to
+ * resume the original state of the tape device, since we would need to
+ * manually re-load the cartridge which was active at suspend time.
+ */
+int tape_generic_pm_suspend(struct ccw_device *cdev)
+{
+	struct tape_device *device;
+
+	device = dev_get_drvdata(&cdev->dev);
+	if (!device) {
+		return -ENODEV;
+	}
+
+	DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n",
+		device->cdev_id, device);
+
+	if (device->medium_state != MS_UNLOADED) {
+		pr_err("A cartridge is loaded in tape device %s, "
+		       "refusing to suspend\n", dev_name(&cdev->dev));
+		return -EBUSY;
+	}
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	switch (device->tape_state) {
+		case TS_INIT:
+		case TS_NOT_OPER:
+		case TS_UNUSED:
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			break;
+		default:
+			pr_err("Tape device %s is busy, refusing to "
+			       "suspend\n", dev_name(&cdev->dev));
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			return -EBUSY;
+	}
+
+	DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id);
+	return 0;
+}
+
+/*
+ * Set device offline.
+ *
+ * Called by the common I/O layer if the drive should set offline on user
+ * request. We may prevent this by returning an error.
+ * Manual offline is only allowed while the drive is not in use.
+ */
+int
+tape_generic_offline(struct ccw_device *cdev)
+{
+	struct tape_device *device;
+
+	device = dev_get_drvdata(&cdev->dev);
+	if (!device) {
+		return -ENODEV;
+	}
+
+	DBF_LH(3, "(%08x): tape_generic_offline(%p)\n",
+		device->cdev_id, device);
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	switch (device->tape_state) {
+		case TS_INIT:
+		case TS_NOT_OPER:
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			break;
+		case TS_UNUSED:
+			tape_state_set(device, TS_INIT);
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			tape_cleanup_device(device);
+			break;
+		default:
+			DBF_EVENT(3, "(%08x): Set offline failed "
+				"- drive in use.\n",
+				device->cdev_id);
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			return -EBUSY;
+	}
+
+	DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id);
+	return 0;
+}
+
+/*
+ * Allocate memory for a new device structure.
+ */
+static struct tape_device *
+tape_alloc_device(void)
+{
+	struct tape_device *device;
+
+	device = kzalloc(sizeof(struct tape_device), GFP_KERNEL);
+	if (device == NULL) {
+		DBF_EXCEPTION(2, "ti:no mem\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA);
+	if (device->modeset_byte == NULL) {
+		DBF_EXCEPTION(2, "ti:no mem\n");
+		kfree(device);
+		return ERR_PTR(-ENOMEM);
+	}
+	mutex_init(&device->mutex);
+	INIT_LIST_HEAD(&device->req_queue);
+	INIT_LIST_HEAD(&device->node);
+	init_waitqueue_head(&device->state_change_wq);
+	init_waitqueue_head(&device->wait_queue);
+	device->tape_state = TS_INIT;
+	device->medium_state = MS_UNKNOWN;
+	*device->modeset_byte = 0;
+	device->first_minor = -1;
+	atomic_set(&device->ref_count, 1);
+	INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request);
+
+	return device;
+}
+
+/*
+ * Get a reference to an existing device structure. This will automatically
+ * increment the reference count.
+ */
+struct tape_device *
+tape_get_device(struct tape_device *device)
+{
+	int count;
+
+	count = atomic_inc_return(&device->ref_count);
+	DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count);
+	return device;
+}
+
+/*
+ * Decrease the reference counter of a devices structure. If the
+ * reference counter reaches zero free the device structure.
+ * The function returns a NULL pointer to be used by the caller
+ * for clearing reference pointers.
+ */
+void
+tape_put_device(struct tape_device *device)
+{
+	int count;
+
+	count = atomic_dec_return(&device->ref_count);
+	DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count);
+	BUG_ON(count < 0);
+	if (count == 0) {
+		kfree(device->modeset_byte);
+		kfree(device);
+	}
+}
+
+/*
+ * Find tape device by a device index.
+ */
+struct tape_device *
+tape_find_device(int devindex)
+{
+	struct tape_device *device, *tmp;
+
+	device = ERR_PTR(-ENODEV);
+	read_lock(&tape_device_lock);
+	list_for_each_entry(tmp, &tape_device_list, node) {
+		if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
+			device = tape_get_device(tmp);
+			break;
+		}
+	}
+	read_unlock(&tape_device_lock);
+	return device;
+}
+
+/*
+ * Driverfs tape probe function.
+ */
+int
+tape_generic_probe(struct ccw_device *cdev)
+{
+	struct tape_device *device;
+	int ret;
+	struct ccw_dev_id dev_id;
+
+	device = tape_alloc_device();
+	if (IS_ERR(device))
+		return -ENODEV;
+	ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP |
+				     CCWDEV_DO_MULTIPATH);
+	ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
+	if (ret) {
+		tape_put_device(device);
+		return ret;
+	}
+	dev_set_drvdata(&cdev->dev, device);
+	cdev->handler = __tape_do_irq;
+	device->cdev = cdev;
+	ccw_device_get_id(cdev, &dev_id);
+	device->cdev_id = devid_to_int(&dev_id);
+	return ret;
+}
+
+static void
+__tape_discard_requests(struct tape_device *device)
+{
+	struct tape_request *	request;
+	struct list_head *	l, *n;
+
+	list_for_each_safe(l, n, &device->req_queue) {
+		request = list_entry(l, struct tape_request, list);
+		if (request->status == TAPE_REQUEST_IN_IO)
+			request->status = TAPE_REQUEST_DONE;
+		list_del(&request->list);
+
+		/* Decrease ref_count for removed request. */
+		request->device = NULL;
+		tape_put_device(device);
+		request->rc = -EIO;
+		if (request->callback != NULL)
+			request->callback(request, request->callback_data);
+	}
+}
+
+/*
+ * Driverfs tape remove function.
+ *
+ * This function is called whenever the common I/O layer detects the device
+ * gone. This can happen at any time and we cannot refuse.
+ */
+void
+tape_generic_remove(struct ccw_device *cdev)
+{
+	struct tape_device *	device;
+
+	device = dev_get_drvdata(&cdev->dev);
+	if (!device) {
+		return;
+	}
+	DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	switch (device->tape_state) {
+		case TS_INIT:
+			tape_state_set(device, TS_NOT_OPER);
+		case TS_NOT_OPER:
+			/*
+			 * Nothing to do.
+			 */
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			break;
+		case TS_UNUSED:
+			/*
+			 * Need only to release the device.
+			 */
+			tape_state_set(device, TS_NOT_OPER);
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			tape_cleanup_device(device);
+			break;
+		default:
+			/*
+			 * There may be requests on the queue. We will not get
+			 * an interrupt for a request that was running. So we
+			 * just post them all as I/O errors.
+			 */
+			DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
+				device->cdev_id);
+			pr_warn("%s: A tape unit was detached while in use\n",
+				dev_name(&device->cdev->dev));
+			tape_state_set(device, TS_NOT_OPER);
+			__tape_discard_requests(device);
+			spin_unlock_irq(get_ccwdev_lock(device->cdev));
+			tape_cleanup_device(device);
+	}
+
+	device = dev_get_drvdata(&cdev->dev);
+	if (device) {
+		sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
+		dev_set_drvdata(&cdev->dev, NULL);
+		tape_put_device(device);
+	}
+}
+
+/*
+ * Allocate a new tape ccw request
+ */
+struct tape_request *
+tape_alloc_request(int cplength, int datasize)
+{
+	struct tape_request *request;
+
+	BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
+
+	DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
+
+	request = kzalloc(sizeof(struct tape_request), GFP_KERNEL);
+	if (request == NULL) {
+		DBF_EXCEPTION(1, "cqra nomem\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	/* allocate channel program */
+	if (cplength > 0) {
+		request->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
+					  GFP_ATOMIC | GFP_DMA);
+		if (request->cpaddr == NULL) {
+			DBF_EXCEPTION(1, "cqra nomem\n");
+			kfree(request);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+	/* alloc small kernel buffer */
+	if (datasize > 0) {
+		request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA);
+		if (request->cpdata == NULL) {
+			DBF_EXCEPTION(1, "cqra nomem\n");
+			kfree(request->cpaddr);
+			kfree(request);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+	DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr,
+		request->cpdata);
+
+	return request;
+}
+
+/*
+ * Free tape ccw request
+ */
+void
+tape_free_request (struct tape_request * request)
+{
+	DBF_LH(6, "Free request %p\n", request);
+
+	if (request->device)
+		tape_put_device(request->device);
+	kfree(request->cpdata);
+	kfree(request->cpaddr);
+	kfree(request);
+}
+
+static int
+__tape_start_io(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	rc = ccw_device_start(
+		device->cdev,
+		request->cpaddr,
+		(unsigned long) request,
+		0x00,
+		request->options
+	);
+	if (rc == 0) {
+		request->status = TAPE_REQUEST_IN_IO;
+	} else if (rc == -EBUSY) {
+		/* The common I/O subsystem is currently busy. Retry later. */
+		request->status = TAPE_REQUEST_QUEUED;
+		schedule_delayed_work(&device->tape_dnr, 0);
+		rc = 0;
+	} else {
+		/* Start failed. Remove request and indicate failure. */
+		DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc);
+	}
+	return rc;
+}
+
+static void
+__tape_start_next_request(struct tape_device *device)
+{
+	struct list_head *l, *n;
+	struct tape_request *request;
+	int rc;
+
+	DBF_LH(6, "__tape_start_next_request(%p)\n", device);
+	/*
+	 * Try to start each request on request queue until one is
+	 * started successful.
+	 */
+	list_for_each_safe(l, n, &device->req_queue) {
+		request = list_entry(l, struct tape_request, list);
+
+		/*
+		 * Avoid race condition if bottom-half was triggered more than
+		 * once.
+		 */
+		if (request->status == TAPE_REQUEST_IN_IO)
+			return;
+		/*
+		 * Request has already been stopped. We have to wait until
+		 * the request is removed from the queue in the interrupt
+		 * handling.
+		 */
+		if (request->status == TAPE_REQUEST_DONE)
+			return;
+
+		/*
+		 * We wanted to cancel the request but the common I/O layer
+		 * was busy at that time. This can only happen if this
+		 * function is called by delayed_next_request.
+		 * Otherwise we start the next request on the queue.
+		 */
+		if (request->status == TAPE_REQUEST_CANCEL) {
+			rc = __tape_cancel_io(device, request);
+		} else {
+			rc = __tape_start_io(device, request);
+		}
+		if (rc == 0)
+			return;
+
+		/* Set ending status. */
+		request->rc = rc;
+		request->status = TAPE_REQUEST_DONE;
+
+		/* Remove from request queue. */
+		list_del(&request->list);
+
+		/* Do callback. */
+		if (request->callback != NULL)
+			request->callback(request, request->callback_data);
+	}
+}
+
+static void
+tape_delayed_next_request(struct work_struct *work)
+{
+	struct tape_device *device =
+		container_of(work, struct tape_device, tape_dnr.work);
+
+	DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	__tape_start_next_request(device);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+}
+
+static void tape_long_busy_timeout(struct timer_list *t)
+{
+	struct tape_device *device = from_timer(device, t, lb_timeout);
+	struct tape_request *request;
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	request = list_entry(device->req_queue.next, struct tape_request, list);
+	BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
+	DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
+	__tape_start_next_request(device);
+	tape_put_device(device);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+}
+
+static void
+__tape_end_request(
+	struct tape_device *	device,
+	struct tape_request *	request,
+	int			rc)
+{
+	DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc);
+	if (request) {
+		request->rc = rc;
+		request->status = TAPE_REQUEST_DONE;
+
+		/* Remove from request queue. */
+		list_del(&request->list);
+
+		/* Do callback. */
+		if (request->callback != NULL)
+			request->callback(request, request->callback_data);
+	}
+
+	/* Start next request. */
+	if (!list_empty(&device->req_queue))
+		__tape_start_next_request(device);
+}
+
+/*
+ * Write sense data to dbf
+ */
+void
+tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
+		    struct irb *irb)
+{
+	unsigned int *sptr;
+	const char* op;
+
+	if (request != NULL)
+		op = tape_op_verbose[request->op];
+	else
+		op = "---";
+	DBF_EVENT(3, "DSTAT : %02x   CSTAT: %02x\n",
+		  irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
+	DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
+	sptr = (unsigned int *) irb->ecw;
+	DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
+	DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
+	DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
+	DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
+}
+
+/*
+ * I/O helper function. Adds the request to the request queue
+ * and starts it if the tape is idle. Has to be called with
+ * the device lock held.
+ */
+static int
+__tape_start_request(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	switch (request->op) {
+		case TO_MSEN:
+		case TO_ASSIGN:
+		case TO_UNASSIGN:
+		case TO_READ_ATTMSG:
+		case TO_RDC:
+			if (device->tape_state == TS_INIT)
+				break;
+			if (device->tape_state == TS_UNUSED)
+				break;
+		default:
+			if (device->tape_state == TS_BLKUSE)
+				break;
+			if (device->tape_state != TS_IN_USE)
+				return -ENODEV;
+	}
+
+	/* Increase use count of device for the added request. */
+	request->device = tape_get_device(device);
+
+	if (list_empty(&device->req_queue)) {
+		/* No other requests are on the queue. Start this one. */
+		rc = __tape_start_io(device, request);
+		if (rc)
+			return rc;
+
+		DBF_LH(5, "Request %p added for execution.\n", request);
+		list_add(&request->list, &device->req_queue);
+	} else {
+		DBF_LH(5, "Request %p add to queue.\n", request);
+		request->status = TAPE_REQUEST_QUEUED;
+		list_add_tail(&request->list, &device->req_queue);
+	}
+	return 0;
+}
+
+/*
+ * Add the request to the request queue, try to start it if the
+ * tape is idle. Return without waiting for end of i/o.
+ */
+int
+tape_do_io_async(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request);
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	/* Add request to request queue and try to start it. */
+	rc = __tape_start_request(device, request);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	return rc;
+}
+
+/*
+ * tape_do_io/__tape_wake_up
+ * Add the request to the request queue, try to start it if the
+ * tape is idle and wait uninterruptible for its completion.
+ */
+static void
+__tape_wake_up(struct tape_request *request, void *data)
+{
+	request->callback = NULL;
+	wake_up((wait_queue_head_t *) data);
+}
+
+int
+tape_do_io(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	/* Setup callback */
+	request->callback = __tape_wake_up;
+	request->callback_data = &device->wait_queue;
+	/* Add request to request queue and try to start it. */
+	rc = __tape_start_request(device, request);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	if (rc)
+		return rc;
+	/* Request added to the queue. Wait for its completion. */
+	wait_event(device->wait_queue, (request->callback == NULL));
+	/* Get rc from request */
+	return request->rc;
+}
+
+/*
+ * tape_do_io_interruptible/__tape_wake_up_interruptible
+ * Add the request to the request queue, try to start it if the
+ * tape is idle and wait uninterruptible for its completion.
+ */
+static void
+__tape_wake_up_interruptible(struct tape_request *request, void *data)
+{
+	request->callback = NULL;
+	wake_up_interruptible((wait_queue_head_t *) data);
+}
+
+int
+tape_do_io_interruptible(struct tape_device *device,
+			 struct tape_request *request)
+{
+	int rc;
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	/* Setup callback */
+	request->callback = __tape_wake_up_interruptible;
+	request->callback_data = &device->wait_queue;
+	rc = __tape_start_request(device, request);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	if (rc)
+		return rc;
+	/* Request added to the queue. Wait for its completion. */
+	rc = wait_event_interruptible(device->wait_queue,
+				      (request->callback == NULL));
+	if (rc != -ERESTARTSYS)
+		/* Request finished normally. */
+		return request->rc;
+
+	/* Interrupted by a signal. We have to stop the current request. */
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	rc = __tape_cancel_io(device, request);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	if (rc == 0) {
+		/* Wait for the interrupt that acknowledges the halt. */
+		do {
+			rc = wait_event_interruptible(
+				device->wait_queue,
+				(request->callback == NULL)
+			);
+		} while (rc == -ERESTARTSYS);
+
+		DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id);
+		rc = -ERESTARTSYS;
+	}
+	return rc;
+}
+
+/*
+ * Stop running ccw.
+ */
+int
+tape_cancel_io(struct tape_device *device, struct tape_request *request)
+{
+	int rc;
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	rc = __tape_cancel_io(device, request);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	return rc;
+}
+
+/*
+ * Tape interrupt routine, called from the ccw_device layer
+ */
+static void
+__tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+{
+	struct tape_device *device;
+	struct tape_request *request;
+	int rc;
+
+	device = dev_get_drvdata(&cdev->dev);
+	if (device == NULL) {
+		return;
+	}
+	request = (struct tape_request *) intparm;
+
+	DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request);
+
+	/* On special conditions irb is an error pointer */
+	if (IS_ERR(irb)) {
+		/* FIXME: What to do with the request? */
+		switch (PTR_ERR(irb)) {
+			case -ETIMEDOUT:
+				DBF_LH(1, "(%08x): Request timed out\n",
+				       device->cdev_id);
+			case -EIO:
+				__tape_end_request(device, request, -EIO);
+				break;
+			default:
+				DBF_LH(1, "(%08x): Unexpected i/o error %li\n",
+				       device->cdev_id,	PTR_ERR(irb));
+		}
+		return;
+	}
+
+	/*
+	 * If the condition code is not zero and the start function bit is
+	 * still set, this is an deferred error and the last start I/O did
+	 * not succeed. At this point the condition that caused the deferred
+	 * error might still apply. So we just schedule the request to be
+	 * started later.
+	 */
+	if (irb->scsw.cmd.cc != 0 &&
+	    (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
+	    (request->status == TAPE_REQUEST_IN_IO)) {
+		DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
+			device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl);
+		request->status = TAPE_REQUEST_QUEUED;
+		schedule_delayed_work(&device->tape_dnr, HZ);
+		return;
+	}
+
+	/* May be an unsolicited irq */
+	if(request != NULL)
+		request->rescnt = irb->scsw.cmd.count;
+	else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) &&
+		 !list_empty(&device->req_queue)) {
+		/* Not Ready to Ready after long busy ? */
+		struct tape_request *req;
+		req = list_entry(device->req_queue.next,
+				 struct tape_request, list);
+		if (req->status == TAPE_REQUEST_LONG_BUSY) {
+			DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
+			if (del_timer(&device->lb_timeout)) {
+				tape_put_device(device);
+				__tape_start_next_request(device);
+			}
+			return;
+		}
+	}
+	if (irb->scsw.cmd.dstat != 0x0c) {
+		/* Set the 'ONLINE' flag depending on sense byte 1 */
+		if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
+			device->tape_generic_status |= GMT_ONLINE(~0);
+		else
+			device->tape_generic_status &= ~GMT_ONLINE(~0);
+
+		/*
+		 * Any request that does not come back with channel end
+		 * and device end is unusual. Log the sense data.
+		 */
+		DBF_EVENT(3,"-- Tape Interrupthandler --\n");
+		tape_dump_sense_dbf(device, request, irb);
+	} else {
+		/* Upon normal completion the device _is_ online */
+		device->tape_generic_status |= GMT_ONLINE(~0);
+	}
+	if (device->tape_state == TS_NOT_OPER) {
+		DBF_EVENT(6, "tape:device is not operational\n");
+		return;
+	}
+
+	/*
+	 * Request that were canceled still come back with an interrupt.
+	 * To detect these request the state will be set to TAPE_REQUEST_DONE.
+	 */
+	if(request != NULL && request->status == TAPE_REQUEST_DONE) {
+		__tape_end_request(device, request, -EIO);
+		return;
+	}
+
+	rc = device->discipline->irq(device, request, irb);
+	/*
+	 * rc < 0 : request finished unsuccessfully.
+	 * rc == TAPE_IO_SUCCESS: request finished successfully.
+	 * rc == TAPE_IO_PENDING: request is still running. Ignore rc.
+	 * rc == TAPE_IO_RETRY: request finished but needs another go.
+	 * rc == TAPE_IO_STOP: request needs to get terminated.
+	 */
+	switch (rc) {
+		case TAPE_IO_SUCCESS:
+			/* Upon normal completion the device _is_ online */
+			device->tape_generic_status |= GMT_ONLINE(~0);
+			__tape_end_request(device, request, rc);
+			break;
+		case TAPE_IO_PENDING:
+			break;
+		case TAPE_IO_LONG_BUSY:
+			device->lb_timeout.expires = jiffies +
+				LONG_BUSY_TIMEOUT * HZ;
+			DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
+			add_timer(&device->lb_timeout);
+			request->status = TAPE_REQUEST_LONG_BUSY;
+			break;
+		case TAPE_IO_RETRY:
+			rc = __tape_start_io(device, request);
+			if (rc)
+				__tape_end_request(device, request, rc);
+			break;
+		case TAPE_IO_STOP:
+			rc = __tape_cancel_io(device, request);
+			if (rc)
+				__tape_end_request(device, request, rc);
+			break;
+		default:
+			if (rc > 0) {
+				DBF_EVENT(6, "xunknownrc\n");
+				__tape_end_request(device, request, -EIO);
+			} else {
+				__tape_end_request(device, request, rc);
+			}
+			break;
+	}
+}
+
+/*
+ * Tape device open function used by tape_char frontend.
+ */
+int
+tape_open(struct tape_device *device)
+{
+	int rc;
+
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	if (device->tape_state == TS_NOT_OPER) {
+		DBF_EVENT(6, "TAPE:nodev\n");
+		rc = -ENODEV;
+	} else if (device->tape_state == TS_IN_USE) {
+		DBF_EVENT(6, "TAPE:dbusy\n");
+		rc = -EBUSY;
+	} else if (device->tape_state == TS_BLKUSE) {
+		DBF_EVENT(6, "TAPE:dbusy\n");
+		rc = -EBUSY;
+	} else if (device->discipline != NULL &&
+		   !try_module_get(device->discipline->owner)) {
+		DBF_EVENT(6, "TAPE:nodisc\n");
+		rc = -ENODEV;
+	} else {
+		tape_state_set(device, TS_IN_USE);
+		rc = 0;
+	}
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	return rc;
+}
+
+/*
+ * Tape device release function used by tape_char frontend.
+ */
+int
+tape_release(struct tape_device *device)
+{
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	if (device->tape_state == TS_IN_USE)
+		tape_state_set(device, TS_UNUSED);
+	module_put(device->discipline->owner);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	return 0;
+}
+
+/*
+ * Execute a magnetic tape command a number of times.
+ */
+int
+tape_mtop(struct tape_device *device, int mt_op, int mt_count)
+{
+	tape_mtop_fn fn;
+	int rc;
+
+	DBF_EVENT(6, "TAPE:mtio\n");
+	DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
+	DBF_EVENT(6, "TAPE:arg:	 %x\n", mt_count);
+
+	if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
+		return -EINVAL;
+	fn = device->discipline->mtop_array[mt_op];
+	if (fn == NULL)
+		return -EINVAL;
+
+	/* We assume that the backends can handle count up to 500. */
+	if (mt_op == MTBSR  || mt_op == MTFSR  || mt_op == MTFSF  ||
+	    mt_op == MTBSF  || mt_op == MTFSFM || mt_op == MTBSFM) {
+		rc = 0;
+		for (; mt_count > 500; mt_count -= 500)
+			if ((rc = fn(device, 500)) != 0)
+				break;
+		if (rc == 0)
+			rc = fn(device, mt_count);
+	} else
+		rc = fn(device, mt_count);
+	return rc;
+
+}
+
+/*
+ * Tape init function.
+ */
+static int
+tape_init (void)
+{
+	TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long));
+	debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
+#ifdef DBF_LIKE_HELL
+	debug_set_level(TAPE_DBF_AREA, 6);
+#endif
+	DBF_EVENT(3, "tape init\n");
+	tape_proc_init();
+	tapechar_init ();
+	return 0;
+}
+
+/*
+ * Tape exit function.
+ */
+static void
+tape_exit(void)
+{
+	DBF_EVENT(6, "tape exit\n");
+
+	/* Get rid of the frontends */
+	tapechar_exit();
+	tape_proc_cleanup();
+	debug_unregister (TAPE_DBF_AREA);
+}
+
+MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
+	      "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
+MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver");
+MODULE_LICENSE("GPL");
+
+module_init(tape_init);
+module_exit(tape_exit);
+
+EXPORT_SYMBOL(tape_generic_remove);
+EXPORT_SYMBOL(tape_generic_probe);
+EXPORT_SYMBOL(tape_generic_online);
+EXPORT_SYMBOL(tape_generic_offline);
+EXPORT_SYMBOL(tape_generic_pm_suspend);
+EXPORT_SYMBOL(tape_put_device);
+EXPORT_SYMBOL(tape_get_device);
+EXPORT_SYMBOL(tape_state_verbose);
+EXPORT_SYMBOL(tape_op_verbose);
+EXPORT_SYMBOL(tape_state_set);
+EXPORT_SYMBOL(tape_med_state_set);
+EXPORT_SYMBOL(tape_alloc_request);
+EXPORT_SYMBOL(tape_free_request);
+EXPORT_SYMBOL(tape_dump_sense_dbf);
+EXPORT_SYMBOL(tape_do_io);
+EXPORT_SYMBOL(tape_do_io_async);
+EXPORT_SYMBOL(tape_do_io_interruptible);
+EXPORT_SYMBOL(tape_cancel_io);
+EXPORT_SYMBOL(tape_mtop);
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
new file mode 100644
index 0000000..32a14ee
--- /dev/null
+++ b/drivers/s390/char/tape_proc.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    tape device driver for S/390 and zSeries tapes.
+ *
+ *  S390 and zSeries version
+ *    Copyright IBM Corp. 2001
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *
+ * PROCFS Functions
+ */
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+
+#define TAPE_DBF_AREA	tape_core_dbf
+
+#include "tape.h"
+
+static const char *tape_med_st_verbose[MS_SIZE] =
+{
+	[MS_UNKNOWN] = "UNKNOWN ",
+	[MS_LOADED] = "LOADED  ",
+	[MS_UNLOADED] = "UNLOADED"
+};
+
+/* our proc tapedevices entry */
+static struct proc_dir_entry *tape_proc_devices;
+
+/*
+ * Show function for /proc/tapedevices
+ */
+static int tape_proc_show(struct seq_file *m, void *v)
+{
+	struct tape_device *device;
+	struct tape_request *request;
+	const char *str;
+	unsigned long n;
+
+	n = (unsigned long) v - 1;
+	if (!n) {
+		seq_printf(m, "TapeNo\tBusID      CuType/Model\t"
+			"DevType/Model\tBlkSize\tState\tOp\tMedState\n");
+	}
+	device = tape_find_device(n);
+	if (IS_ERR(device))
+		return 0;
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	seq_printf(m, "%d\t", (int) n);
+	seq_printf(m, "%-10.10s ", dev_name(&device->cdev->dev));
+	seq_printf(m, "%04X/", device->cdev->id.cu_type);
+	seq_printf(m, "%02X\t", device->cdev->id.cu_model);
+	seq_printf(m, "%04X/", device->cdev->id.dev_type);
+	seq_printf(m, "%02X\t\t", device->cdev->id.dev_model);
+	if (device->char_data.block_size == 0)
+		seq_printf(m, "auto\t");
+	else
+		seq_printf(m, "%i\t", device->char_data.block_size);
+	if (device->tape_state >= 0 &&
+	    device->tape_state < TS_SIZE)
+		str = tape_state_verbose[device->tape_state];
+	else
+		str = "UNKNOWN";
+	seq_printf(m, "%s\t", str);
+	if (!list_empty(&device->req_queue)) {
+		request = list_entry(device->req_queue.next,
+				     struct tape_request, list);
+		str = tape_op_verbose[request->op];
+	} else
+		str = "---";
+	seq_printf(m, "%s\t", str);
+	seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]);
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	tape_put_device(device);
+        return 0;
+}
+
+static void *tape_proc_start(struct seq_file *m, loff_t *pos)
+{
+	if (*pos >= 256 / TAPE_MINORS_PER_DEV)
+		return NULL;
+	return (void *)((unsigned long) *pos + 1);
+}
+
+static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return tape_proc_start(m, pos);
+}
+
+static void tape_proc_stop(struct seq_file *m, void *v)
+{
+}
+
+static const struct seq_operations tape_proc_seq = {
+	.start		= tape_proc_start,
+	.next		= tape_proc_next,
+	.stop		= tape_proc_stop,
+	.show		= tape_proc_show,
+};
+
+/*
+ * Initialize procfs stuff on startup
+ */
+void
+tape_proc_init(void)
+{
+	tape_proc_devices = proc_create_seq("tapedevices",
+			S_IFREG | S_IRUGO | S_IWUSR, NULL,  &tape_proc_seq);
+	if (tape_proc_devices == NULL) {
+		return;
+	}
+}
+
+/*
+ * Cleanup all stuff registered to the procfs
+ */
+void
+tape_proc_cleanup(void)
+{
+	if (tape_proc_devices != NULL)
+		remove_proc_entry ("tapedevices", NULL);
+}
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
new file mode 100644
index 0000000..1f5fab6
--- /dev/null
+++ b/drivers/s390/char/tape_std.c
@@ -0,0 +1,745 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    standard tape device functions for ibm tapes.
+ *
+ *  S390 and zSeries version
+ *    Copyright IBM Corp. 2001, 2002
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Michael Holzheu <holzheu@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *		 Stefan Bader <shbader@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/bio.h>
+#include <linux/timer.h>
+
+#include <asm/types.h>
+#include <asm/idals.h>
+#include <asm/ebcdic.h>
+#include <asm/tape390.h>
+
+#define TAPE_DBF_AREA	tape_core_dbf
+
+#include "tape.h"
+#include "tape_std.h"
+
+/*
+ * tape_std_assign
+ */
+static void
+tape_std_assign_timeout(struct timer_list *t)
+{
+	struct tape_request *	request = from_timer(request, t, timer);
+	struct tape_device *	device = request->device;
+	int rc;
+
+	BUG_ON(!device);
+
+	DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
+			device->cdev_id);
+	rc = tape_cancel_io(device, request);
+	if(rc)
+		DBF_EVENT(3, "(%08x): Assign timeout: Cancel failed with rc = "
+			  "%i\n", device->cdev_id, rc);
+}
+
+int
+tape_std_assign(struct tape_device *device)
+{
+	int                  rc;
+	struct timer_list    timeout;
+	struct tape_request *request;
+
+	request = tape_alloc_request(2, 11);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+
+	request->op = TO_ASSIGN;
+	tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata);
+	tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+
+	/*
+	 * The assign command sometimes blocks if the device is assigned
+	 * to another host (actually this shouldn't happen but it does).
+	 * So we set up a timeout for this call.
+	 */
+	timer_setup(&request->timer, tape_std_assign_timeout, 0);
+	mod_timer(&timeout, jiffies + 2 * HZ);
+
+	rc = tape_do_io_interruptible(device, request);
+
+	del_timer_sync(&request->timer);
+
+	if (rc != 0) {
+		DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
+			device->cdev_id);
+	} else {
+		DBF_EVENT(3, "%08x: Tape assigned\n", device->cdev_id);
+	}
+	tape_free_request(request);
+	return rc;
+}
+
+/*
+ * tape_std_unassign
+ */
+int
+tape_std_unassign (struct tape_device *device)
+{
+	int                  rc;
+	struct tape_request *request;
+
+	if (device->tape_state == TS_NOT_OPER) {
+		DBF_EVENT(3, "(%08x): Can't unassign device\n",
+			device->cdev_id);
+		return -EIO;
+	}
+
+	request = tape_alloc_request(2, 11);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+
+	request->op = TO_UNASSIGN;
+	tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata);
+	tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+
+	if ((rc = tape_do_io(device, request)) != 0) {
+		DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id);
+	} else {
+		DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id);
+	}
+	tape_free_request(request);
+	return rc;
+}
+
+/*
+ * TAPE390_DISPLAY: Show a string on the tape display.
+ */
+int
+tape_std_display(struct tape_device *device, struct display_struct *disp)
+{
+	struct tape_request *request;
+	int rc;
+
+	request = tape_alloc_request(2, 17);
+	if (IS_ERR(request)) {
+		DBF_EVENT(3, "TAPE: load display failed\n");
+		return PTR_ERR(request);
+	}
+	request->op = TO_DIS;
+
+	*(unsigned char *) request->cpdata = disp->cntrl;
+	DBF_EVENT(5, "TAPE: display cntrl=%04x\n", disp->cntrl);
+	memcpy(((unsigned char *) request->cpdata) + 1, disp->message1, 8);
+	memcpy(((unsigned char *) request->cpdata) + 9, disp->message2, 8);
+	ASCEBC(((unsigned char*) request->cpdata) + 1, 16);
+
+	tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata);
+	tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+
+	rc = tape_do_io_interruptible(device, request);
+	tape_free_request(request);
+	return rc;
+}
+
+/*
+ * Read block id.
+ */
+int
+tape_std_read_block_id(struct tape_device *device, __u64 *id)
+{
+	struct tape_request *request;
+	int rc;
+
+	request = tape_alloc_request(3, 8);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_RBI;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1, READ_BLOCK_ID, 8, request->cpdata);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+	/* execute it */
+	rc = tape_do_io(device, request);
+	if (rc == 0)
+		/* Get result from read buffer. */
+		*id = *(__u64 *) request->cpdata;
+	tape_free_request(request);
+	return rc;
+}
+
+int
+tape_std_terminate_write(struct tape_device *device)
+{
+	int rc;
+
+	if(device->required_tapemarks == 0)
+		return 0;
+
+	DBF_LH(5, "tape%d: terminate write %dxEOF\n", device->first_minor,
+		device->required_tapemarks);
+
+	rc = tape_mtop(device, MTWEOF, device->required_tapemarks);
+	if (rc)
+		return rc;
+
+	device->required_tapemarks = 0;
+	return tape_mtop(device, MTBSR, 1);
+}
+
+/*
+ * MTLOAD: Loads the tape.
+ * The default implementation just wait until the tape medium state changes
+ * to MS_LOADED.
+ */
+int
+tape_std_mtload(struct tape_device *device, int count)
+{
+	return wait_event_interruptible(device->state_change_wq,
+		(device->medium_state == MS_LOADED));
+}
+
+/*
+ * MTSETBLK: Set block size.
+ */
+int
+tape_std_mtsetblk(struct tape_device *device, int count)
+{
+	struct idal_buffer *new;
+
+	DBF_LH(6, "tape_std_mtsetblk(%d)\n", count);
+	if (count <= 0) {
+		/*
+		 * Just set block_size to 0. tapechar_read/tapechar_write
+		 * will realloc the idal buffer if a bigger one than the
+		 * current is needed.
+		 */
+		device->char_data.block_size = 0;
+		return 0;
+	}
+	if (device->char_data.idal_buf != NULL &&
+	    device->char_data.idal_buf->size == count)
+		/* We already have a idal buffer of that size. */
+		return 0;
+
+	if (count > MAX_BLOCKSIZE) {
+		DBF_EVENT(3, "Invalid block size (%d > %d) given.\n",
+			count, MAX_BLOCKSIZE);
+		return -EINVAL;
+	}
+
+	/* Allocate a new idal buffer. */
+	new = idal_buffer_alloc(count, 0);
+	if (IS_ERR(new))
+		return -ENOMEM;
+	if (device->char_data.idal_buf != NULL)
+		idal_buffer_free(device->char_data.idal_buf);
+	device->char_data.idal_buf = new;
+	device->char_data.block_size = count;
+
+	DBF_LH(6, "new blocksize is %d\n", device->char_data.block_size);
+
+	return 0;
+}
+
+/*
+ * MTRESET: Set block size to 0.
+ */
+int
+tape_std_mtreset(struct tape_device *device, int count)
+{
+	DBF_EVENT(6, "TCHAR:devreset:\n");
+	device->char_data.block_size = 0;
+	return 0;
+}
+
+/*
+ * MTFSF: Forward space over 'count' file marks. The tape is positioned
+ * at the EOT (End of Tape) side of the file mark.
+ */
+int
+tape_std_mtfsf(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_FSF;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTFSR: Forward space over 'count' tape blocks (blocksize is set
+ * via MTSETBLK.
+ */
+int
+tape_std_mtfsr(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+	int rc;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_FSB;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+	/* execute it */
+	rc = tape_do_io(device, request);
+	if (rc == 0 && request->rescnt > 0) {
+		DBF_LH(3, "FSR over tapemark\n");
+		rc = 1;
+	}
+	tape_free_request(request);
+
+	return rc;
+}
+
+/*
+ * MTBSR: Backward space over 'count' tape blocks.
+ * (blocksize is set via MTSETBLK.
+ */
+int
+tape_std_mtbsr(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+	int rc;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_BSB;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+	/* execute it */
+	rc = tape_do_io(device, request);
+	if (rc == 0 && request->rescnt > 0) {
+		DBF_LH(3, "BSR over tapemark\n");
+		rc = 1;
+	}
+	tape_free_request(request);
+
+	return rc;
+}
+
+/*
+ * MTWEOF: Write 'count' file marks at the current position.
+ */
+int
+tape_std_mtweof(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_WTM;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTBSFM: Backward space over 'count' file marks.
+ * The tape is positioned at the BOT (Begin Of Tape) side of the
+ * last skipped file mark.
+ */
+int
+tape_std_mtbsfm(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_BSF;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTBSF: Backward space over 'count' file marks. The tape is positioned at
+ * the EOT (End of Tape) side of the last skipped file mark.
+ */
+int
+tape_std_mtbsf(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+	int rc;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_BSF;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+	/* execute it */
+	rc = tape_do_io_free(device, request);
+	if (rc == 0) {
+		rc = tape_mtop(device, MTFSR, 1);
+		if (rc > 0)
+			rc = 0;
+	}
+	return rc;
+}
+
+/*
+ * MTFSFM: Forward space over 'count' file marks.
+ * The tape is positioned at the BOT (Begin Of Tape) side
+ * of the last skipped file mark.
+ */
+int
+tape_std_mtfsfm(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+	struct ccw1 *ccw;
+	int rc;
+
+	request = tape_alloc_request(mt_count + 2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_FSF;
+	/* setup ccws */
+	ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+			  device->modeset_byte);
+	ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
+	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
+	/* execute it */
+	rc = tape_do_io_free(device, request);
+	if (rc == 0) {
+		rc = tape_mtop(device, MTBSR, 1);
+		if (rc > 0)
+			rc = 0;
+	}
+
+	return rc;
+}
+
+/*
+ * MTREW: Rewind the tape.
+ */
+int
+tape_std_mtrew(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(3, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_REW;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
+		    device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTOFFL: Rewind the tape and put the drive off-line.
+ * Implement 'rewind unload'
+ */
+int
+tape_std_mtoffl(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(3, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_RUN;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL);
+	tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTNOP: 'No operation'.
+ */
+int
+tape_std_mtnop(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_NOP;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTEOM: positions at the end of the portion of the tape already used
+ * for recordind data. MTEOM positions after the last file mark, ready for
+ * appending another file.
+ */
+int
+tape_std_mteom(struct tape_device *device, int mt_count)
+{
+	int rc;
+
+	/*
+	 * Seek from the beginning of tape (rewind).
+	 */
+	if ((rc = tape_mtop(device, MTREW, 1)) < 0)
+		return rc;
+
+	/*
+	 * The logical end of volume is given by two sewuential tapemarks.
+	 * Look for this by skipping to the next file (over one tapemark)
+	 * and then test for another one (fsr returns 1 if a tapemark was
+	 * encountered).
+	 */
+	do {
+		if ((rc = tape_mtop(device, MTFSF, 1)) < 0)
+			return rc;
+		if ((rc = tape_mtop(device, MTFSR, 1)) < 0)
+			return rc;
+	} while (rc == 0);
+
+	return tape_mtop(device, MTBSR, 1);
+}
+
+/*
+ * MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind.
+ */
+int
+tape_std_mtreten(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(4, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_FSF;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL);
+	tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL);
+	tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr);
+	/* execute it, MTRETEN rc gets ignored */
+	tape_do_io_interruptible(device, request);
+	tape_free_request(request);
+	return tape_mtop(device, MTREW, 1);
+}
+
+/*
+ * MTERASE: erases the tape.
+ */
+int
+tape_std_mterase(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(6, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_DSE;
+	/* setup ccws */
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
+	tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL);
+	tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL);
+	tape_ccw_cc(request->cpaddr + 4, REWIND, 0, NULL);
+	tape_ccw_end(request->cpaddr + 5, NOP, 0, NULL);
+
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * MTUNLOAD: Rewind the tape and unload it.
+ */
+int
+tape_std_mtunload(struct tape_device *device, int mt_count)
+{
+	return tape_mtop(device, MTOFFL, mt_count);
+}
+
+/*
+ * MTCOMPRESSION: used to enable compression.
+ * Sets the IDRC on/off.
+ */
+int
+tape_std_mtcompression(struct tape_device *device, int mt_count)
+{
+	struct tape_request *request;
+
+	if (mt_count < 0 || mt_count > 1) {
+		DBF_EXCEPTION(6, "xcom parm\n");
+		return -EINVAL;
+	}
+	request = tape_alloc_request(2, 0);
+	if (IS_ERR(request))
+		return PTR_ERR(request);
+	request->op = TO_NOP;
+	/* setup ccws */
+	if (mt_count == 0)
+		*device->modeset_byte &= ~0x08;
+	else
+		*device->modeset_byte |= 0x08;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
+	/* execute it */
+	return tape_do_io_free(device, request);
+}
+
+/*
+ * Read Block
+ */
+struct tape_request *
+tape_std_read_block(struct tape_device *device, size_t count)
+{
+	struct tape_request *request;
+
+	/*
+	 * We have to alloc 4 ccws in order to be able to transform request
+	 * into a read backward request in error case.
+	 */
+	request = tape_alloc_request(4, 0);
+	if (IS_ERR(request)) {
+		DBF_EXCEPTION(6, "xrbl fail");
+		return request;
+	}
+	request->op = TO_RFO;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD,
+			  device->char_data.idal_buf);
+	DBF_EVENT(6, "xrbl ccwg\n");
+	return request;
+}
+
+/*
+ * Read Block backward transformation function.
+ */
+void
+tape_std_read_backward(struct tape_device *device, struct tape_request *request)
+{
+	/*
+	 * We have allocated 4 ccws in tape_std_read, so we can now
+	 * transform the request to a read backward, followed by a
+	 * forward space block.
+	 */
+	request->op = TO_RBA;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD,
+			 device->char_data.idal_buf);
+	tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
+	tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
+	DBF_EVENT(6, "xrop ccwg");}
+
+/*
+ * Write Block
+ */
+struct tape_request *
+tape_std_write_block(struct tape_device *device, size_t count)
+{
+	struct tape_request *request;
+
+	request = tape_alloc_request(2, 0);
+	if (IS_ERR(request)) {
+		DBF_EXCEPTION(6, "xwbl fail\n");
+		return request;
+	}
+	request->op = TO_WRI;
+	tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
+	tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD,
+			  device->char_data.idal_buf);
+	DBF_EVENT(6, "xwbl ccwg\n");
+	return request;
+}
+
+/*
+ * This routine is called by frontend after an ENOSP on write
+ */
+void
+tape_std_process_eov(struct tape_device *device)
+{
+	/*
+	 * End of volume: We have to backspace the last written record, then
+	 * we TRY to write a tapemark and then backspace over the written TM
+	 */
+	if (tape_mtop(device, MTBSR, 1) == 0 &&
+	    tape_mtop(device, MTWEOF, 1) == 0) {
+		tape_mtop(device, MTBSR, 1);
+	}
+}
+
+EXPORT_SYMBOL(tape_std_assign);
+EXPORT_SYMBOL(tape_std_unassign);
+EXPORT_SYMBOL(tape_std_display);
+EXPORT_SYMBOL(tape_std_read_block_id);
+EXPORT_SYMBOL(tape_std_mtload);
+EXPORT_SYMBOL(tape_std_mtsetblk);
+EXPORT_SYMBOL(tape_std_mtreset);
+EXPORT_SYMBOL(tape_std_mtfsf);
+EXPORT_SYMBOL(tape_std_mtfsr);
+EXPORT_SYMBOL(tape_std_mtbsr);
+EXPORT_SYMBOL(tape_std_mtweof);
+EXPORT_SYMBOL(tape_std_mtbsfm);
+EXPORT_SYMBOL(tape_std_mtbsf);
+EXPORT_SYMBOL(tape_std_mtfsfm);
+EXPORT_SYMBOL(tape_std_mtrew);
+EXPORT_SYMBOL(tape_std_mtoffl);
+EXPORT_SYMBOL(tape_std_mtnop);
+EXPORT_SYMBOL(tape_std_mteom);
+EXPORT_SYMBOL(tape_std_mtreten);
+EXPORT_SYMBOL(tape_std_mterase);
+EXPORT_SYMBOL(tape_std_mtunload);
+EXPORT_SYMBOL(tape_std_mtcompression);
+EXPORT_SYMBOL(tape_std_read_block);
+EXPORT_SYMBOL(tape_std_read_backward);
+EXPORT_SYMBOL(tape_std_write_block);
+EXPORT_SYMBOL(tape_std_process_eov);
diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h
new file mode 100644
index 0000000..53ec8e2
--- /dev/null
+++ b/drivers/s390/char/tape_std.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    standard tape device functions for ibm tapes.
+ *
+ *    Copyright IBM Corp. 2001, 2006
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *		 Tuan Ngo-Anh <ngoanh@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _TAPE_STD_H
+#define _TAPE_STD_H
+
+#include <asm/tape390.h>
+
+/*
+ * Biggest block size to handle. Currently 64K because we only build
+ * channel programs without data chaining.
+ */
+#define MAX_BLOCKSIZE   65535
+
+/*
+ * The CCW commands for the Tape type of command.
+ */
+#define INVALID_00		0x00	/* Invalid cmd */
+#define BACKSPACEBLOCK		0x27	/* Back Space block */
+#define BACKSPACEFILE		0x2f	/* Back Space file */
+#define DATA_SEC_ERASE		0x97	/* Data security erase */
+#define ERASE_GAP		0x17	/* Erase Gap */
+#define FORSPACEBLOCK		0x37	/* Forward space block */
+#define FORSPACEFILE		0x3F	/* Forward Space file */
+#define FORCE_STREAM_CNT	0xEB	/* Forced streaming count # */
+#define NOP			0x03	/* No operation	*/
+#define READ_FORWARD		0x02	/* Read forward */
+#define REWIND			0x07	/* Rewind */
+#define REWIND_UNLOAD		0x0F	/* Rewind and Unload */
+#define SENSE			0x04	/* Sense */
+#define NEW_MODE_SET		0xEB	/* Guess it is Mode set */
+#define WRITE_CMD		0x01	/* Write */
+#define WRITETAPEMARK		0x1F	/* Write Tape Mark */
+
+#define ASSIGN			0xB7	/* 3420 REJECT,3480 OK	*/
+#define CONTROL_ACCESS		0xE3	/* Set high speed */
+#define DIAG_MODE_SET		0x0B	/* 3420 NOP, 3480 REJECT */
+#define LOAD_DISPLAY		0x9F	/* 3420 REJECT,3480 OK */
+#define LOCATE			0x4F	/* 3420 REJ, 3480 NOP */
+#define LOOP_WRITE_TO_READ	0x8B	/* 3480 REJECT */
+#define MODE_SET_DB		0xDB	/* 3420 REJECT,3480 OK */
+#define MODE_SET_C3		0xC3	/* for 3420 */
+#define MODE_SET_CB		0xCB	/* for 3420 */
+#define MODE_SET_D3		0xD3	/* for 3420 */
+#define READ_BACKWARD		0x0C	/* */
+#define READ_BLOCK_ID		0x22	/* 3420 REJECT,3480 OK */
+#define READ_BUFFER		0x12	/* 3420 REJECT,3480 OK */
+#define READ_BUFF_LOG		0x24	/* 3420 REJECT,3480 OK */
+#define RELEASE			0xD4	/* 3420 NOP, 3480 REJECT */
+#define REQ_TRK_IN_ERROR	0x1B	/* 3420 NOP, 3480 REJECT */
+#define RESERVE			0xF4	/* 3420 NOP, 3480 REJECT */
+#define SENSE_GROUP_ID		0x34	/* 3420 REJECT,3480 OK */
+#define SENSE_ID		0xE4	/* 3420 REJECT,3480 OK */
+#define READ_DEV_CHAR		0x64	/* Read device characteristics */
+#define SET_DIAGNOSE		0x4B	/* 3420 NOP, 3480 REJECT */
+#define SET_GROUP_ID		0xAF	/* 3420 REJECT,3480 OK */
+#define SET_TAPE_WRITE_IMMED	0xC3	/* for 3480 */
+#define SUSPEND			0x5B	/* 3420 REJ, 3480 NOP */
+#define SYNC			0x43	/* Synchronize (flush buffer) */
+#define UNASSIGN		0xC7	/* 3420 REJECT,3480 OK */
+#define PERF_SUBSYS_FUNC	0x77	/* 3490 CMD */
+#define READ_CONFIG_DATA	0xFA	/* 3490 CMD */
+#define READ_MESSAGE_ID		0x4E	/* 3490 CMD */
+#define READ_SUBSYS_DATA	0x3E	/* 3490 CMD */
+#define SET_INTERFACE_ID	0x73	/* 3490 CMD */
+
+#define SENSE_COMMAND_REJECT		0x80
+#define SENSE_INTERVENTION_REQUIRED	0x40
+#define SENSE_BUS_OUT_CHECK		0x20
+#define SENSE_EQUIPMENT_CHECK		0x10
+#define SENSE_DATA_CHECK		0x08
+#define SENSE_OVERRUN			0x04
+#define SENSE_DEFERRED_UNIT_CHECK	0x02
+#define SENSE_ASSIGNED_ELSEWHERE	0x01
+
+#define SENSE_LOCATE_FAILURE		0x80
+#define SENSE_DRIVE_ONLINE		0x40
+#define SENSE_RESERVED			0x20
+#define SENSE_RECORD_SEQUENCE_ERR	0x10
+#define SENSE_BEGINNING_OF_TAPE		0x08
+#define SENSE_WRITE_MODE		0x04
+#define SENSE_WRITE_PROTECT		0x02
+#define SENSE_NOT_CAPABLE		0x01
+
+#define SENSE_CHANNEL_ADAPTER_CODE	0xE0
+#define SENSE_CHANNEL_ADAPTER_LOC	0x10
+#define SENSE_REPORTING_CU		0x08
+#define SENSE_AUTOMATIC_LOADER		0x04
+#define SENSE_TAPE_SYNC_MODE		0x02
+#define SENSE_TAPE_POSITIONING		0x01
+
+/* discipline functions */
+struct tape_request *tape_std_read_block(struct tape_device *, size_t);
+void tape_std_read_backward(struct tape_device *device,
+			    struct tape_request *request);
+struct tape_request *tape_std_write_block(struct tape_device *, size_t);
+void tape_std_check_locate(struct tape_device *, struct tape_request *);
+
+/* Some non-mtop commands. */
+int tape_std_assign(struct tape_device *);
+int tape_std_unassign(struct tape_device *);
+int tape_std_read_block_id(struct tape_device *device, __u64 *id);
+int tape_std_display(struct tape_device *, struct display_struct *disp);
+int tape_std_terminate_write(struct tape_device *);
+
+/* Standard magnetic tape commands. */
+int tape_std_mtbsf(struct tape_device *, int);
+int tape_std_mtbsfm(struct tape_device *, int);
+int tape_std_mtbsr(struct tape_device *, int);
+int tape_std_mtcompression(struct tape_device *, int);
+int tape_std_mteom(struct tape_device *, int);
+int tape_std_mterase(struct tape_device *, int);
+int tape_std_mtfsf(struct tape_device *, int);
+int tape_std_mtfsfm(struct tape_device *, int);
+int tape_std_mtfsr(struct tape_device *, int);
+int tape_std_mtload(struct tape_device *, int);
+int tape_std_mtnop(struct tape_device *, int);
+int tape_std_mtoffl(struct tape_device *, int);
+int tape_std_mtreset(struct tape_device *, int);
+int tape_std_mtreten(struct tape_device *, int);
+int tape_std_mtrew(struct tape_device *, int);
+int tape_std_mtsetblk(struct tape_device *, int);
+int tape_std_mtunload(struct tape_device *, int);
+int tape_std_mtweof(struct tape_device *, int);
+
+/* Event handlers */
+void tape_std_default_handler(struct tape_device *);
+void tape_std_unexpect_uchk_handler(struct tape_device *);
+void tape_std_irq(struct tape_device *);
+void tape_std_process_eov(struct tape_device *);
+
+// the error recovery stuff:
+void tape_std_error_recovery(struct tape_device *);
+void tape_std_error_recovery_has_failed(struct tape_device *,int error_id);
+void tape_std_error_recovery_succeded(struct tape_device *);
+void tape_std_error_recovery_do_retry(struct tape_device *);
+void tape_std_error_recovery_read_opposite(struct tape_device *);
+void tape_std_error_recovery_HWBUG(struct tape_device *, int condno);
+
+/* S390 tape types */
+enum s390_tape_type {
+        tape_3480,
+        tape_3490,
+        tape_3590,
+        tape_3592,
+};
+
+#endif // _TAPE_STD_H
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
new file mode 100644
index 0000000..5b8af27
--- /dev/null
+++ b/drivers/s390/char/tty3270.c
@@ -0,0 +1,1980 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    IBM/3270 Driver - tty functions.
+ *
+ *  Author(s):
+ *    Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ *    Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *	-- Copyright IBM Corp. 2003
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+#include <linux/compat.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/ebcdic.h>
+#include <linux/uaccess.h>
+
+#include "raw3270.h"
+#include "tty3270.h"
+#include "keyboard.h"
+
+#define TTY3270_CHAR_BUF_SIZE 256
+#define TTY3270_OUTPUT_BUFFER_SIZE 1024
+#define TTY3270_STRING_PAGES 5
+
+struct tty_driver *tty3270_driver;
+static int tty3270_max_index;
+
+static struct raw3270_fn tty3270_fn;
+
+struct tty3270_cell {
+	unsigned char character;
+	unsigned char highlight;
+	unsigned char f_color;
+};
+
+struct tty3270_line {
+	struct tty3270_cell *cells;
+	int len;
+};
+
+#define ESCAPE_NPAR 8
+
+/*
+ * The main tty view data structure.
+ * FIXME:
+ * 1) describe line orientation & lines list concept against screen
+ * 2) describe conversion of screen to lines
+ * 3) describe line format.
+ */
+struct tty3270 {
+	struct raw3270_view view;
+	struct tty_port port;
+	void **freemem_pages;		/* Array of pages used for freemem. */
+	struct list_head freemem;	/* List of free memory for strings. */
+
+	/* Output stuff. */
+	struct list_head lines;		/* List of lines. */
+	struct list_head update;	/* List of lines to update. */
+	unsigned char wcc;		/* Write control character. */
+	int nr_lines;			/* # lines in list. */
+	int nr_up;			/* # lines up in history. */
+	unsigned long update_flags;	/* Update indication bits. */
+	struct string *status;		/* Lower right of display. */
+	struct raw3270_request *write;	/* Single write request. */
+	struct timer_list timer;	/* Output delay timer. */
+
+	/* Current tty screen. */
+	unsigned int cx, cy;		/* Current output position. */
+	unsigned int highlight;		/* Blink/reverse/underscore */
+	unsigned int f_color;		/* Foreground color */
+	struct tty3270_line *screen;
+	unsigned int n_model, n_cols, n_rows;	/* New model & size */
+	struct work_struct resize_work;
+
+	/* Input stuff. */
+	struct string *prompt;		/* Output string for input area. */
+	struct string *input;		/* Input string for read request. */
+	struct raw3270_request *read;	/* Single read request. */
+	struct raw3270_request *kreset;	/* Single keyboard reset request. */
+	unsigned char inattr;		/* Visible/invisible input. */
+	int throttle, attn;		/* tty throttle/unthrottle. */
+	struct tasklet_struct readlet;	/* Tasklet to issue read request. */
+	struct tasklet_struct hanglet;	/* Tasklet to hang up the tty. */
+	struct kbd_data *kbd;		/* key_maps stuff. */
+
+	/* Escape sequence parsing. */
+	int esc_state, esc_ques, esc_npar;
+	int esc_par[ESCAPE_NPAR];
+	unsigned int saved_cx, saved_cy;
+	unsigned int saved_highlight, saved_f_color;
+
+	/* Command recalling. */
+	struct list_head rcl_lines;	/* List of recallable lines. */
+	struct list_head *rcl_walk;	/* Point in rcl_lines list. */
+	int rcl_nr, rcl_max;		/* Number/max number of rcl_lines. */
+
+	/* Character array for put_char/flush_chars. */
+	unsigned int char_count;
+	char char_buf[TTY3270_CHAR_BUF_SIZE];
+};
+
+/* tty3270->update_flags. See tty3270_update for details. */
+#define TTY_UPDATE_ERASE	1	/* Use EWRITEA instead of WRITE. */
+#define TTY_UPDATE_LIST		2	/* Update lines in tty3270->update. */
+#define TTY_UPDATE_INPUT	4	/* Update input line. */
+#define TTY_UPDATE_STATUS	8	/* Update status line. */
+#define TTY_UPDATE_ALL		16	/* Recreate screen. */
+
+static void tty3270_update(struct timer_list *);
+static void tty3270_resize_work(struct work_struct *work);
+
+/*
+ * Setup timeout for a device. On timeout trigger an update.
+ */
+static void tty3270_set_timer(struct tty3270 *tp, int expires)
+{
+	mod_timer(&tp->timer, jiffies + expires);
+}
+
+/*
+ * The input line are the two last lines of the screen.
+ */
+static void
+tty3270_update_prompt(struct tty3270 *tp, char *input, int count)
+{
+	struct string *line;
+	unsigned int off;
+
+	line = tp->prompt;
+	if (count != 0)
+		line->string[5] = TF_INMDT;
+	else
+		line->string[5] = tp->inattr;
+	if (count > tp->view.cols * 2 - 11)
+		count = tp->view.cols * 2 - 11;
+	memcpy(line->string + 6, input, count);
+	line->string[6 + count] = TO_IC;
+	/* Clear to end of input line. */
+	if (count < tp->view.cols * 2 - 11) {
+		line->string[7 + count] = TO_RA;
+		line->string[10 + count] = 0;
+		off = tp->view.cols * tp->view.rows - 9;
+		raw3270_buffer_address(tp->view.dev, line->string+count+8, off);
+		line->len = 11 + count;
+	} else
+		line->len = 7 + count;
+	tp->update_flags |= TTY_UPDATE_INPUT;
+}
+
+static void
+tty3270_create_prompt(struct tty3270 *tp)
+{
+	static const unsigned char blueprint[] =
+		{ TO_SBA, 0, 0, 0x6e, TO_SF, TF_INPUT,
+		  /* empty input string */
+		  TO_IC, TO_RA, 0, 0, 0 };
+	struct string *line;
+	unsigned int offset;
+
+	line = alloc_string(&tp->freemem,
+			    sizeof(blueprint) + tp->view.cols * 2 - 9);
+	tp->prompt = line;
+	tp->inattr = TF_INPUT;
+	/* Copy blueprint to status line */
+	memcpy(line->string, blueprint, sizeof(blueprint));
+	line->len = sizeof(blueprint);
+	/* Set output offsets. */
+	offset = tp->view.cols * (tp->view.rows - 2);
+	raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
+	offset = tp->view.cols * tp->view.rows - 9;
+	raw3270_buffer_address(tp->view.dev, line->string + 8, offset);
+
+	/* Allocate input string for reading. */
+	tp->input = alloc_string(&tp->freemem, tp->view.cols * 2 - 9 + 6);
+}
+
+/*
+ * The status line is the last line of the screen. It shows the string
+ * "Running"/"Holding" in the lower right corner of the screen.
+ */
+static void
+tty3270_update_status(struct tty3270 * tp)
+{
+	char *str;
+
+	str = (tp->nr_up != 0) ? "History" : "Running";
+	memcpy(tp->status->string + 8, str, 7);
+	codepage_convert(tp->view.ascebc, tp->status->string + 8, 7);
+	tp->update_flags |= TTY_UPDATE_STATUS;
+}
+
+static void
+tty3270_create_status(struct tty3270 * tp)
+{
+	static const unsigned char blueprint[] =
+		{ TO_SBA, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR, TAC_GREEN,
+		  0, 0, 0, 0, 0, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR,
+		  TAC_RESET };
+	struct string *line;
+	unsigned int offset;
+
+	line = alloc_string(&tp->freemem,sizeof(blueprint));
+	tp->status = line;
+	/* Copy blueprint to status line */
+	memcpy(line->string, blueprint, sizeof(blueprint));
+	/* Set address to start of status string (= last 9 characters). */
+	offset = tp->view.cols * tp->view.rows - 9;
+	raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
+}
+
+/*
+ * Set output offsets to 3270 datastream fragment of a tty string.
+ * (TO_SBA offset at the start and TO_RA offset at the end of the string)
+ */
+static void
+tty3270_update_string(struct tty3270 *tp, struct string *line, int nr)
+{
+	unsigned char *cp;
+
+	raw3270_buffer_address(tp->view.dev, line->string + 1,
+			       tp->view.cols * nr);
+	cp = line->string + line->len - 4;
+	if (*cp == TO_RA)
+		raw3270_buffer_address(tp->view.dev, cp + 1,
+				       tp->view.cols * (nr + 1));
+}
+
+/*
+ * Rebuild update list to print all lines.
+ */
+static void
+tty3270_rebuild_update(struct tty3270 *tp)
+{
+	struct string *s, *n;
+	int line, nr_up;
+
+	/* 
+	 * Throw away update list and create a new one,
+	 * containing all lines that will fit on the screen.
+	 */
+	list_for_each_entry_safe(s, n, &tp->update, update)
+		list_del_init(&s->update);
+	line = tp->view.rows - 3;
+	nr_up = tp->nr_up;
+	list_for_each_entry_reverse(s, &tp->lines, list) {
+		if (nr_up > 0) {
+			nr_up--;
+			continue;
+		}
+		tty3270_update_string(tp, s, line);
+		list_add(&s->update, &tp->update);
+		if (--line < 0)
+			break;
+	}
+	tp->update_flags |= TTY_UPDATE_LIST;
+}
+
+/*
+ * Alloc string for size bytes. If there is not enough room in
+ * freemem, free strings until there is room.
+ */
+static struct string *
+tty3270_alloc_string(struct tty3270 *tp, size_t size)
+{
+	struct string *s, *n;
+
+	s = alloc_string(&tp->freemem, size);
+	if (s)
+		return s;
+	list_for_each_entry_safe(s, n, &tp->lines, list) {
+		BUG_ON(tp->nr_lines <= tp->view.rows - 2);
+		list_del(&s->list);
+		if (!list_empty(&s->update))
+			list_del(&s->update);
+		tp->nr_lines--;
+		if (free_string(&tp->freemem, s) >= size)
+			break;
+	}
+	s = alloc_string(&tp->freemem, size);
+	BUG_ON(!s);
+	if (tp->nr_up != 0 &&
+	    tp->nr_up + tp->view.rows - 2 >= tp->nr_lines) {
+		tp->nr_up = tp->nr_lines - tp->view.rows + 2;
+		tty3270_rebuild_update(tp);
+		tty3270_update_status(tp);
+	}
+	return s;
+}
+
+/*
+ * Add an empty line to the list.
+ */
+static void
+tty3270_blank_line(struct tty3270 *tp)
+{
+	static const unsigned char blueprint[] =
+		{ TO_SBA, 0, 0, TO_SA, TAT_EXTHI, TAX_RESET,
+		  TO_SA, TAT_COLOR, TAC_RESET, TO_RA, 0, 0, 0 };
+	struct string *s;
+
+	s = tty3270_alloc_string(tp, sizeof(blueprint));
+	memcpy(s->string, blueprint, sizeof(blueprint));
+	s->len = sizeof(blueprint);
+	list_add_tail(&s->list, &tp->lines);
+	tp->nr_lines++;
+	if (tp->nr_up != 0)
+		tp->nr_up++;
+}
+
+/*
+ * Create a blank screen and remove all lines from the history.
+ */
+static void
+tty3270_blank_screen(struct tty3270 *tp)
+{
+	struct string *s, *n;
+	int i;
+
+	for (i = 0; i < tp->view.rows - 2; i++)
+		tp->screen[i].len = 0;
+	tp->nr_up = 0;
+	list_for_each_entry_safe(s, n, &tp->lines, list) {
+		list_del(&s->list);
+		if (!list_empty(&s->update))
+			list_del(&s->update);
+		tp->nr_lines--;
+		free_string(&tp->freemem, s);
+	}
+}
+
+/*
+ * Write request completion callback.
+ */
+static void
+tty3270_write_callback(struct raw3270_request *rq, void *data)
+{
+	struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
+
+	if (rq->rc != 0) {
+		/* Write wasn't successful. Refresh all. */
+		tp->update_flags = TTY_UPDATE_ALL;
+		tty3270_set_timer(tp, 1);
+	}
+	raw3270_request_reset(rq);
+	xchg(&tp->write, rq);
+}
+
+/*
+ * Update 3270 display.
+ */
+static void
+tty3270_update(struct timer_list *t)
+{
+	struct tty3270 *tp = from_timer(tp, t, timer);
+	static char invalid_sba[2] = { 0xff, 0xff };
+	struct raw3270_request *wrq;
+	unsigned long updated;
+	struct string *s, *n;
+	char *sba, *str;
+	int rc, len;
+
+	wrq = xchg(&tp->write, 0);
+	if (!wrq) {
+		tty3270_set_timer(tp, 1);
+		return;
+	}
+
+	spin_lock(&tp->view.lock);
+	updated = 0;
+	if (tp->update_flags & TTY_UPDATE_ALL) {
+		tty3270_rebuild_update(tp);
+		tty3270_update_status(tp);
+		tp->update_flags = TTY_UPDATE_ERASE | TTY_UPDATE_LIST |
+			TTY_UPDATE_INPUT | TTY_UPDATE_STATUS;
+	}
+	if (tp->update_flags & TTY_UPDATE_ERASE) {
+		/* Use erase write alternate to erase display. */
+		raw3270_request_set_cmd(wrq, TC_EWRITEA);
+		updated |= TTY_UPDATE_ERASE;
+	} else
+		raw3270_request_set_cmd(wrq, TC_WRITE);
+
+	raw3270_request_add_data(wrq, &tp->wcc, 1);
+	tp->wcc = TW_NONE;
+
+	/*
+	 * Update status line.
+	 */
+	if (tp->update_flags & TTY_UPDATE_STATUS)
+		if (raw3270_request_add_data(wrq, tp->status->string,
+					     tp->status->len) == 0)
+			updated |= TTY_UPDATE_STATUS;
+
+	/*
+	 * Write input line.
+	 */
+	if (tp->update_flags & TTY_UPDATE_INPUT)
+		if (raw3270_request_add_data(wrq, tp->prompt->string,
+					     tp->prompt->len) == 0)
+			updated |= TTY_UPDATE_INPUT;
+
+	sba = invalid_sba;
+	
+	if (tp->update_flags & TTY_UPDATE_LIST) {
+		/* Write strings in the update list to the screen. */
+		list_for_each_entry_safe(s, n, &tp->update, update) {
+			str = s->string;
+			len = s->len;
+			/*
+			 * Skip TO_SBA at the start of the string if the
+			 * last output position matches the start address
+			 * of this line.
+			 */
+			if (s->string[1] == sba[0] && s->string[2] == sba[1])
+				str += 3, len -= 3;
+			if (raw3270_request_add_data(wrq, str, len) != 0)
+				break;
+			list_del_init(&s->update);
+			if (s->string[s->len - 4] == TO_RA)
+				sba = s->string + s->len - 3;
+			else
+				sba = invalid_sba;
+		}
+		if (list_empty(&tp->update))
+			updated |= TTY_UPDATE_LIST;
+	}
+	wrq->callback = tty3270_write_callback;
+	rc = raw3270_start(&tp->view, wrq);
+	if (rc == 0) {
+		tp->update_flags &= ~updated;
+		if (tp->update_flags)
+			tty3270_set_timer(tp, 1);
+	} else {
+		raw3270_request_reset(wrq);
+		xchg(&tp->write, wrq);
+	}
+	spin_unlock(&tp->view.lock);
+}
+
+/*
+ * Command recalling.
+ */
+static void
+tty3270_rcl_add(struct tty3270 *tp, char *input, int len)
+{
+	struct string *s;
+
+	tp->rcl_walk = NULL;
+	if (len <= 0)
+		return;
+	if (tp->rcl_nr >= tp->rcl_max) {
+		s = list_entry(tp->rcl_lines.next, struct string, list);
+		list_del(&s->list);
+		free_string(&tp->freemem, s);
+		tp->rcl_nr--;
+	}
+	s = tty3270_alloc_string(tp, len);
+	memcpy(s->string, input, len);
+	list_add_tail(&s->list, &tp->rcl_lines);
+	tp->rcl_nr++;
+}
+
+static void
+tty3270_rcl_backward(struct kbd_data *kbd)
+{
+	struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
+	struct string *s;
+
+	spin_lock_bh(&tp->view.lock);
+	if (tp->inattr == TF_INPUT) {
+		if (tp->rcl_walk && tp->rcl_walk->prev != &tp->rcl_lines)
+			tp->rcl_walk = tp->rcl_walk->prev;
+		else if (!list_empty(&tp->rcl_lines))
+			tp->rcl_walk = tp->rcl_lines.prev;
+		s = tp->rcl_walk ? 
+			list_entry(tp->rcl_walk, struct string, list) : NULL;
+		if (tp->rcl_walk) {
+			s = list_entry(tp->rcl_walk, struct string, list);
+			tty3270_update_prompt(tp, s->string, s->len);
+		} else
+			tty3270_update_prompt(tp, NULL, 0);
+		tty3270_set_timer(tp, 1);
+	}
+	spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * Deactivate tty view.
+ */
+static void
+tty3270_exit_tty(struct kbd_data *kbd)
+{
+	struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
+
+	raw3270_deactivate_view(&tp->view);
+}
+
+/*
+ * Scroll forward in history.
+ */
+static void
+tty3270_scroll_forward(struct kbd_data *kbd)
+{
+	struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
+	int nr_up;
+
+	spin_lock_bh(&tp->view.lock);
+	nr_up = tp->nr_up - tp->view.rows + 2;
+	if (nr_up < 0)
+		nr_up = 0;
+	if (nr_up != tp->nr_up) {
+		tp->nr_up = nr_up;
+		tty3270_rebuild_update(tp);
+		tty3270_update_status(tp);
+		tty3270_set_timer(tp, 1);
+	}
+	spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * Scroll backward in history.
+ */
+static void
+tty3270_scroll_backward(struct kbd_data *kbd)
+{
+	struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
+	int nr_up;
+
+	spin_lock_bh(&tp->view.lock);
+	nr_up = tp->nr_up + tp->view.rows - 2;
+	if (nr_up + tp->view.rows - 2 > tp->nr_lines)
+		nr_up = tp->nr_lines - tp->view.rows + 2;
+	if (nr_up != tp->nr_up) {
+		tp->nr_up = nr_up;
+		tty3270_rebuild_update(tp);
+		tty3270_update_status(tp);
+		tty3270_set_timer(tp, 1);
+	}
+	spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * Pass input line to tty.
+ */
+static void
+tty3270_read_tasklet(struct raw3270_request *rrq)
+{
+	static char kreset_data = TW_KR;
+	struct tty3270 *tp = container_of(rrq->view, struct tty3270, view);
+	char *input;
+	int len;
+
+	spin_lock_bh(&tp->view.lock);
+	/*
+	 * Two AID keys are special: For 0x7d (enter) the input line
+	 * has to be emitted to the tty and for 0x6d the screen
+	 * needs to be redrawn.
+	 */
+	input = NULL;
+	len = 0;
+	if (tp->input->string[0] == 0x7d) {
+		/* Enter: write input to tty. */
+		input = tp->input->string + 6;
+		len = tp->input->len - 6 - rrq->rescnt;
+		if (tp->inattr != TF_INPUTN)
+			tty3270_rcl_add(tp, input, len);
+		if (tp->nr_up > 0) {
+			tp->nr_up = 0;
+			tty3270_rebuild_update(tp);
+			tty3270_update_status(tp);
+		}
+		/* Clear input area. */
+		tty3270_update_prompt(tp, NULL, 0);
+		tty3270_set_timer(tp, 1);
+	} else if (tp->input->string[0] == 0x6d) {
+		/* Display has been cleared. Redraw. */
+		tp->update_flags = TTY_UPDATE_ALL;
+		tty3270_set_timer(tp, 1);
+	}
+	spin_unlock_bh(&tp->view.lock);
+
+	/* Start keyboard reset command. */
+	raw3270_request_reset(tp->kreset);
+	raw3270_request_set_cmd(tp->kreset, TC_WRITE);
+	raw3270_request_add_data(tp->kreset, &kreset_data, 1);
+	raw3270_start(&tp->view, tp->kreset);
+
+	while (len-- > 0)
+		kbd_keycode(tp->kbd, *input++);
+	/* Emit keycode for AID byte. */
+	kbd_keycode(tp->kbd, 256 + tp->input->string[0]);
+
+	raw3270_request_reset(rrq);
+	xchg(&tp->read, rrq);
+	raw3270_put_view(&tp->view);
+}
+
+/*
+ * Read request completion callback.
+ */
+static void
+tty3270_read_callback(struct raw3270_request *rq, void *data)
+{
+	struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
+	raw3270_get_view(rq->view);
+	/* Schedule tasklet to pass input to tty. */
+	tasklet_schedule(&tp->readlet);
+}
+
+/*
+ * Issue a read request. Call with device lock.
+ */
+static void
+tty3270_issue_read(struct tty3270 *tp, int lock)
+{
+	struct raw3270_request *rrq;
+	int rc;
+
+	rrq = xchg(&tp->read, 0);
+	if (!rrq)
+		/* Read already scheduled. */
+		return;
+	rrq->callback = tty3270_read_callback;
+	rrq->callback_data = tp;
+	raw3270_request_set_cmd(rrq, TC_READMOD);
+	raw3270_request_set_data(rrq, tp->input->string, tp->input->len);
+	/* Issue the read modified request. */
+	if (lock) {
+		rc = raw3270_start(&tp->view, rrq);
+	} else
+		rc = raw3270_start_irq(&tp->view, rrq);
+	if (rc) {
+		raw3270_request_reset(rrq);
+		xchg(&tp->read, rrq);
+	}
+}
+
+/*
+ * Hang up the tty
+ */
+static void
+tty3270_hangup_tasklet(struct tty3270 *tp)
+{
+	tty_port_tty_hangup(&tp->port, true);
+	raw3270_put_view(&tp->view);
+}
+
+/*
+ * Switch to the tty view.
+ */
+static int
+tty3270_activate(struct raw3270_view *view)
+{
+	struct tty3270 *tp = container_of(view, struct tty3270, view);
+
+	tp->update_flags = TTY_UPDATE_ALL;
+	tty3270_set_timer(tp, 1);
+	return 0;
+}
+
+static void
+tty3270_deactivate(struct raw3270_view *view)
+{
+	struct tty3270 *tp = container_of(view, struct tty3270, view);
+
+	del_timer(&tp->timer);
+}
+
+static void
+tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
+{
+	/* Handle ATTN. Schedule tasklet to read aid. */
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		if (!tp->throttle)
+			tty3270_issue_read(tp, 0);
+		else
+			tp->attn = 1;
+	}
+
+	if (rq) {
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+			rq->rc = -EIO;
+			raw3270_get_view(&tp->view);
+			tasklet_schedule(&tp->hanglet);
+		} else {
+			/* Normal end. Copy residual count. */
+			rq->rescnt = irb->scsw.cmd.count;
+		}
+	} else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+		/* Interrupt without an outstanding request -> update all */
+		tp->update_flags = TTY_UPDATE_ALL;
+		tty3270_set_timer(tp, 1);
+	}
+}
+
+/*
+ * Allocate tty3270 structure.
+ */
+static struct tty3270 *
+tty3270_alloc_view(void)
+{
+	struct tty3270 *tp;
+	int pages;
+
+	tp = kzalloc(sizeof(struct tty3270), GFP_KERNEL);
+	if (!tp)
+		goto out_err;
+	tp->freemem_pages =
+		kmalloc_array(TTY3270_STRING_PAGES, sizeof(void *),
+			      GFP_KERNEL);
+	if (!tp->freemem_pages)
+		goto out_tp;
+	INIT_LIST_HEAD(&tp->freemem);
+	INIT_LIST_HEAD(&tp->lines);
+	INIT_LIST_HEAD(&tp->update);
+	INIT_LIST_HEAD(&tp->rcl_lines);
+	tp->rcl_max = 20;
+
+	for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) {
+		tp->freemem_pages[pages] = (void *)
+			__get_free_pages(GFP_KERNEL|GFP_DMA, 0);
+		if (!tp->freemem_pages[pages])
+			goto out_pages;
+		add_string_memory(&tp->freemem,
+				  tp->freemem_pages[pages], PAGE_SIZE);
+	}
+	tp->write = raw3270_request_alloc(TTY3270_OUTPUT_BUFFER_SIZE);
+	if (IS_ERR(tp->write))
+		goto out_pages;
+	tp->read = raw3270_request_alloc(0);
+	if (IS_ERR(tp->read))
+		goto out_write;
+	tp->kreset = raw3270_request_alloc(1);
+	if (IS_ERR(tp->kreset))
+		goto out_read;
+	tp->kbd = kbd_alloc();
+	if (!tp->kbd)
+		goto out_reset;
+
+	tty_port_init(&tp->port);
+	timer_setup(&tp->timer, tty3270_update, 0);
+	tasklet_init(&tp->readlet,
+		     (void (*)(unsigned long)) tty3270_read_tasklet,
+		     (unsigned long) tp->read);
+	tasklet_init(&tp->hanglet,
+		     (void (*)(unsigned long)) tty3270_hangup_tasklet,
+		     (unsigned long) tp);
+	INIT_WORK(&tp->resize_work, tty3270_resize_work);
+
+	return tp;
+
+out_reset:
+	raw3270_request_free(tp->kreset);
+out_read:
+	raw3270_request_free(tp->read);
+out_write:
+	raw3270_request_free(tp->write);
+out_pages:
+	while (pages--)
+		free_pages((unsigned long) tp->freemem_pages[pages], 0);
+	kfree(tp->freemem_pages);
+	tty_port_destroy(&tp->port);
+out_tp:
+	kfree(tp);
+out_err:
+	return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free tty3270 structure.
+ */
+static void
+tty3270_free_view(struct tty3270 *tp)
+{
+	int pages;
+
+	kbd_free(tp->kbd);
+	raw3270_request_free(tp->kreset);
+	raw3270_request_free(tp->read);
+	raw3270_request_free(tp->write);
+	for (pages = 0; pages < TTY3270_STRING_PAGES; pages++)
+		free_pages((unsigned long) tp->freemem_pages[pages], 0);
+	kfree(tp->freemem_pages);
+	tty_port_destroy(&tp->port);
+	kfree(tp);
+}
+
+/*
+ * Allocate tty3270 screen.
+ */
+static struct tty3270_line *
+tty3270_alloc_screen(unsigned int rows, unsigned int cols)
+{
+	struct tty3270_line *screen;
+	unsigned long size;
+	int lines;
+
+	size = sizeof(struct tty3270_line) * (rows - 2);
+	screen = kzalloc(size, GFP_KERNEL);
+	if (!screen)
+		goto out_err;
+	for (lines = 0; lines < rows - 2; lines++) {
+		size = sizeof(struct tty3270_cell) * cols;
+		screen[lines].cells = kzalloc(size, GFP_KERNEL);
+		if (!screen[lines].cells)
+			goto out_screen;
+	}
+	return screen;
+out_screen:
+	while (lines--)
+		kfree(screen[lines].cells);
+	kfree(screen);
+out_err:
+	return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free tty3270 screen.
+ */
+static void
+tty3270_free_screen(struct tty3270_line *screen, unsigned int rows)
+{
+	int lines;
+
+	for (lines = 0; lines < rows - 2; lines++)
+		kfree(screen[lines].cells);
+	kfree(screen);
+}
+
+/*
+ * Resize tty3270 screen
+ */
+static void tty3270_resize_work(struct work_struct *work)
+{
+	struct tty3270 *tp = container_of(work, struct tty3270, resize_work);
+	struct tty3270_line *screen, *oscreen;
+	struct tty_struct *tty;
+	unsigned int orows;
+	struct winsize ws;
+
+	screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols);
+	if (IS_ERR(screen))
+		return;
+	/* Switch to new output size */
+	spin_lock_bh(&tp->view.lock);
+	tty3270_blank_screen(tp);
+	oscreen = tp->screen;
+	orows = tp->view.rows;
+	tp->view.model = tp->n_model;
+	tp->view.rows = tp->n_rows;
+	tp->view.cols = tp->n_cols;
+	tp->screen = screen;
+	free_string(&tp->freemem, tp->prompt);
+	free_string(&tp->freemem, tp->status);
+	tty3270_create_prompt(tp);
+	tty3270_create_status(tp);
+	while (tp->nr_lines < tp->view.rows - 2)
+		tty3270_blank_line(tp);
+	tp->update_flags = TTY_UPDATE_ALL;
+	spin_unlock_bh(&tp->view.lock);
+	tty3270_free_screen(oscreen, orows);
+	tty3270_set_timer(tp, 1);
+	/* Informat tty layer about new size */
+	tty = tty_port_tty_get(&tp->port);
+	if (!tty)
+		return;
+	ws.ws_row = tp->view.rows - 2;
+	ws.ws_col = tp->view.cols;
+	tty_do_resize(tty, &ws);
+	tty_kref_put(tty);
+}
+
+static void
+tty3270_resize(struct raw3270_view *view, int model, int rows, int cols)
+{
+	struct tty3270 *tp = container_of(view, struct tty3270, view);
+
+	if (tp->n_model == model && tp->n_rows == rows && tp->n_cols == cols)
+		return;
+	tp->n_model = model;
+	tp->n_rows = rows;
+	tp->n_cols = cols;
+	schedule_work(&tp->resize_work);
+}
+
+/*
+ * Unlink tty3270 data structure from tty.
+ */
+static void
+tty3270_release(struct raw3270_view *view)
+{
+	struct tty3270 *tp = container_of(view, struct tty3270, view);
+	struct tty_struct *tty = tty_port_tty_get(&tp->port);
+
+	if (tty) {
+		tty->driver_data = NULL;
+		tty_port_tty_set(&tp->port, NULL);
+		tty_hangup(tty);
+		raw3270_put_view(&tp->view);
+		tty_kref_put(tty);
+	}
+}
+
+/*
+ * Free tty3270 data structure
+ */
+static void
+tty3270_free(struct raw3270_view *view)
+{
+	struct tty3270 *tp = container_of(view, struct tty3270, view);
+
+	del_timer_sync(&tp->timer);
+	tty3270_free_screen(tp->screen, tp->view.rows);
+	tty3270_free_view(tp);
+}
+
+/*
+ * Delayed freeing of tty3270 views.
+ */
+static void
+tty3270_del_views(void)
+{
+	int i;
+
+	for (i = RAW3270_FIRSTMINOR; i <= tty3270_max_index; i++) {
+		struct raw3270_view *view = raw3270_find_view(&tty3270_fn, i);
+		if (!IS_ERR(view))
+			raw3270_del_view(view);
+	}
+}
+
+static struct raw3270_fn tty3270_fn = {
+	.activate = tty3270_activate,
+	.deactivate = tty3270_deactivate,
+	.intv = (void *) tty3270_irq,
+	.release = tty3270_release,
+	.free = tty3270_free,
+	.resize = tty3270_resize
+};
+
+/*
+ * This routine is called whenever a 3270 tty is opened first time.
+ */
+static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+	struct raw3270_view *view;
+	struct tty3270 *tp;
+	int i, rc;
+
+	/* Check if the tty3270 is already there. */
+	view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR);
+	if (!IS_ERR(view)) {
+		tp = container_of(view, struct tty3270, view);
+		tty->driver_data = tp;
+		tty->winsize.ws_row = tp->view.rows - 2;
+		tty->winsize.ws_col = tp->view.cols;
+		tp->port.low_latency = 0;
+		tp->inattr = TF_INPUT;
+		goto port_install;
+	}
+	if (tty3270_max_index < tty->index + 1)
+		tty3270_max_index = tty->index + 1;
+
+	/* Allocate tty3270 structure on first open. */
+	tp = tty3270_alloc_view();
+	if (IS_ERR(tp))
+		return PTR_ERR(tp);
+
+	rc = raw3270_add_view(&tp->view, &tty3270_fn,
+			      tty->index + RAW3270_FIRSTMINOR);
+	if (rc) {
+		tty3270_free_view(tp);
+		return rc;
+	}
+
+	tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
+	if (IS_ERR(tp->screen)) {
+		rc = PTR_ERR(tp->screen);
+		raw3270_put_view(&tp->view);
+		raw3270_del_view(&tp->view);
+		tty3270_free_view(tp);
+		return rc;
+	}
+
+	tp->port.low_latency = 0;
+	tty->winsize.ws_row = tp->view.rows - 2;
+	tty->winsize.ws_col = tp->view.cols;
+
+	tty3270_create_prompt(tp);
+	tty3270_create_status(tp);
+	tty3270_update_status(tp);
+
+	/* Create blank line for every line in the tty output area. */
+	for (i = 0; i < tp->view.rows - 2; i++)
+		tty3270_blank_line(tp);
+
+	tp->kbd->port = &tp->port;
+	tp->kbd->fn_handler[KVAL(K_INCRCONSOLE)] = tty3270_exit_tty;
+	tp->kbd->fn_handler[KVAL(K_SCROLLBACK)] = tty3270_scroll_backward;
+	tp->kbd->fn_handler[KVAL(K_SCROLLFORW)] = tty3270_scroll_forward;
+	tp->kbd->fn_handler[KVAL(K_CONS)] = tty3270_rcl_backward;
+	kbd_ascebc(tp->kbd, tp->view.ascebc);
+
+	raw3270_activate_view(&tp->view);
+
+port_install:
+	rc = tty_port_install(&tp->port, driver, tty);
+	if (rc) {
+		raw3270_put_view(&tp->view);
+		return rc;
+	}
+
+	tty->driver_data = tp;
+
+	return 0;
+}
+
+/*
+ * This routine is called whenever a 3270 tty is opened.
+ */
+static int
+tty3270_open(struct tty_struct *tty, struct file *filp)
+{
+	struct tty3270 *tp = tty->driver_data;
+	struct tty_port *port = &tp->port;
+
+	port->count++;
+	tty_port_tty_set(port, tty);
+	return 0;
+}
+
+/*
+ * This routine is called when the 3270 tty is closed. We wait
+ * for the remaining request to be completed. Then we clean up.
+ */
+static void
+tty3270_close(struct tty_struct *tty, struct file * filp)
+{
+	struct tty3270 *tp = tty->driver_data;
+
+	if (tty->count > 1)
+		return;
+	if (tp)
+		tty_port_tty_set(&tp->port, NULL);
+}
+
+static void tty3270_cleanup(struct tty_struct *tty)
+{
+	struct tty3270 *tp = tty->driver_data;
+
+	if (tp) {
+		tty->driver_data = NULL;
+		raw3270_put_view(&tp->view);
+	}
+}
+
+/*
+ * We always have room.
+ */
+static int
+tty3270_write_room(struct tty_struct *tty)
+{
+	return INT_MAX;
+}
+
+/*
+ * Insert character into the screen at the current position with the
+ * current color and highlight. This function does NOT do cursor movement.
+ */
+static void tty3270_put_character(struct tty3270 *tp, char ch)
+{
+	struct tty3270_line *line;
+	struct tty3270_cell *cell;
+
+	line = tp->screen + tp->cy;
+	if (line->len <= tp->cx) {
+		while (line->len < tp->cx) {
+			cell = line->cells + line->len;
+			cell->character = tp->view.ascebc[' '];
+			cell->highlight = tp->highlight;
+			cell->f_color = tp->f_color;
+			line->len++;
+		}
+		line->len++;
+	}
+	cell = line->cells + tp->cx;
+	cell->character = tp->view.ascebc[(unsigned int) ch];
+	cell->highlight = tp->highlight;
+	cell->f_color = tp->f_color;
+}
+
+/*
+ * Convert a tty3270_line to a 3270 data fragment usable for output.
+ */
+static void
+tty3270_convert_line(struct tty3270 *tp, int line_nr)
+{
+	struct tty3270_line *line;
+	struct tty3270_cell *cell;
+	struct string *s, *n;
+	unsigned char highlight;
+	unsigned char f_color;
+	char *cp;
+	int flen, i;
+
+	/* Determine how long the fragment will be. */
+	flen = 3;		/* Prefix (TO_SBA). */
+	line = tp->screen + line_nr;
+	flen += line->len;
+	highlight = TAX_RESET;
+	f_color = TAC_RESET;
+	for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
+		if (cell->highlight != highlight) {
+			flen += 3;	/* TO_SA to switch highlight. */
+			highlight = cell->highlight;
+		}
+		if (cell->f_color != f_color) {
+			flen += 3;	/* TO_SA to switch color. */
+			f_color = cell->f_color;
+		}
+	}
+	if (highlight != TAX_RESET)
+		flen += 3;	/* TO_SA to reset hightlight. */
+	if (f_color != TAC_RESET)
+		flen += 3;	/* TO_SA to reset color. */
+	if (line->len < tp->view.cols)
+		flen += 4;	/* Postfix (TO_RA). */
+
+	/* Find the line in the list. */
+	i = tp->view.rows - 2 - line_nr;
+	list_for_each_entry_reverse(s, &tp->lines, list)
+		if (--i <= 0)
+			break;
+	/*
+	 * Check if the line needs to get reallocated.
+	 */
+	if (s->len != flen) {
+		/* Reallocate string. */
+		n = tty3270_alloc_string(tp, flen);
+		list_add(&n->list, &s->list);
+		list_del_init(&s->list);
+		if (!list_empty(&s->update))
+			list_del_init(&s->update);
+		free_string(&tp->freemem, s);
+		s = n;
+	}
+
+	/* Write 3270 data fragment. */
+	cp = s->string;
+	*cp++ = TO_SBA;
+	*cp++ = 0;
+	*cp++ = 0;
+
+	highlight = TAX_RESET;
+	f_color = TAC_RESET;
+	for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
+		if (cell->highlight != highlight) {
+			*cp++ = TO_SA;
+			*cp++ = TAT_EXTHI;
+			*cp++ = cell->highlight;
+			highlight = cell->highlight;
+		}
+		if (cell->f_color != f_color) {
+			*cp++ = TO_SA;
+			*cp++ = TAT_COLOR;
+			*cp++ = cell->f_color;
+			f_color = cell->f_color;
+		}
+		*cp++ = cell->character;
+	}
+	if (highlight != TAX_RESET) {
+		*cp++ = TO_SA;
+		*cp++ = TAT_EXTHI;
+		*cp++ = TAX_RESET;
+	}
+	if (f_color != TAC_RESET) {
+		*cp++ = TO_SA;
+		*cp++ = TAT_COLOR;
+		*cp++ = TAC_RESET;
+	}
+	if (line->len < tp->view.cols) {
+		*cp++ = TO_RA;
+		*cp++ = 0;
+		*cp++ = 0;
+		*cp++ = 0;
+	}
+
+	if (tp->nr_up + line_nr < tp->view.rows - 2) {
+		/* Line is currently visible on screen. */
+		tty3270_update_string(tp, s, line_nr);
+		/* Add line to update list. */
+		if (list_empty(&s->update)) {
+			list_add_tail(&s->update, &tp->update);
+			tp->update_flags |= TTY_UPDATE_LIST;
+		}
+	}
+}
+
+/*
+ * Do carriage return.
+ */
+static void
+tty3270_cr(struct tty3270 *tp)
+{
+	tp->cx = 0;
+}
+
+/*
+ * Do line feed.
+ */
+static void
+tty3270_lf(struct tty3270 *tp)
+{
+	struct tty3270_line temp;
+	int i;
+
+	tty3270_convert_line(tp, tp->cy);
+	if (tp->cy < tp->view.rows - 3) {
+		tp->cy++;
+		return;
+	}
+	/* Last line just filled up. Add new, blank line. */
+	tty3270_blank_line(tp);
+	temp = tp->screen[0];
+	temp.len = 0;
+	for (i = 0; i < tp->view.rows - 3; i++)
+		tp->screen[i] = tp->screen[i+1];
+	tp->screen[tp->view.rows - 3] = temp;
+	tty3270_rebuild_update(tp);
+}
+
+static void
+tty3270_ri(struct tty3270 *tp)
+{
+	if (tp->cy > 0) {
+	    tty3270_convert_line(tp, tp->cy);
+	    tp->cy--;
+	}
+}
+
+/*
+ * Insert characters at current position.
+ */
+static void
+tty3270_insert_characters(struct tty3270 *tp, int n)
+{
+	struct tty3270_line *line;
+	int k;
+
+	line = tp->screen + tp->cy;
+	while (line->len < tp->cx) {
+		line->cells[line->len].character = tp->view.ascebc[' '];
+		line->cells[line->len].highlight = TAX_RESET;
+		line->cells[line->len].f_color = TAC_RESET;
+		line->len++;
+	}
+	if (n > tp->view.cols - tp->cx)
+		n = tp->view.cols - tp->cx;
+	k = min_t(int, line->len - tp->cx, tp->view.cols - tp->cx - n);
+	while (k--)
+		line->cells[tp->cx + n + k] = line->cells[tp->cx + k];
+	line->len += n;
+	if (line->len > tp->view.cols)
+		line->len = tp->view.cols;
+	while (n-- > 0) {
+		line->cells[tp->cx + n].character = tp->view.ascebc[' '];
+		line->cells[tp->cx + n].highlight = tp->highlight;
+		line->cells[tp->cx + n].f_color = tp->f_color;
+	}
+}
+
+/*
+ * Delete characters at current position.
+ */
+static void
+tty3270_delete_characters(struct tty3270 *tp, int n)
+{
+	struct tty3270_line *line;
+	int i;
+
+	line = tp->screen + tp->cy;
+	if (line->len <= tp->cx)
+		return;
+	if (line->len - tp->cx <= n) {
+		line->len = tp->cx;
+		return;
+	}
+	for (i = tp->cx; i + n < line->len; i++)
+		line->cells[i] = line->cells[i + n];
+	line->len -= n;
+}
+
+/*
+ * Erase characters at current position.
+ */
+static void
+tty3270_erase_characters(struct tty3270 *tp, int n)
+{
+	struct tty3270_line *line;
+	struct tty3270_cell *cell;
+
+	line = tp->screen + tp->cy;
+	while (line->len > tp->cx && n-- > 0) {
+		cell = line->cells + tp->cx++;
+		cell->character = ' ';
+		cell->highlight = TAX_RESET;
+		cell->f_color = TAC_RESET;
+	}
+	tp->cx += n;
+	tp->cx = min_t(int, tp->cx, tp->view.cols - 1);
+}
+
+/*
+ * Erase line, 3 different cases:
+ *  Esc [ 0 K	Erase from current position to end of line inclusive
+ *  Esc [ 1 K	Erase from beginning of line to current position inclusive
+ *  Esc [ 2 K	Erase entire line (without moving cursor)
+ */
+static void
+tty3270_erase_line(struct tty3270 *tp, int mode)
+{
+	struct tty3270_line *line;
+	struct tty3270_cell *cell;
+	int i;
+
+	line = tp->screen + tp->cy;
+	if (mode == 0)
+		line->len = tp->cx;
+	else if (mode == 1) {
+		for (i = 0; i < tp->cx; i++) {
+			cell = line->cells + i;
+			cell->character = ' ';
+			cell->highlight = TAX_RESET;
+			cell->f_color = TAC_RESET;
+		}
+		if (line->len <= tp->cx)
+			line->len = tp->cx + 1;
+	} else if (mode == 2)
+		line->len = 0;
+	tty3270_convert_line(tp, tp->cy);
+}
+
+/*
+ * Erase display, 3 different cases:
+ *  Esc [ 0 J	Erase from current position to bottom of screen inclusive
+ *  Esc [ 1 J	Erase from top of screen to current position inclusive
+ *  Esc [ 2 J	Erase entire screen (without moving the cursor)
+ */
+static void
+tty3270_erase_display(struct tty3270 *tp, int mode)
+{
+	int i;
+
+	if (mode == 0) {
+		tty3270_erase_line(tp, 0);
+		for (i = tp->cy + 1; i < tp->view.rows - 2; i++) {
+			tp->screen[i].len = 0;
+			tty3270_convert_line(tp, i);
+		}
+	} else if (mode == 1) {
+		for (i = 0; i < tp->cy; i++) {
+			tp->screen[i].len = 0;
+			tty3270_convert_line(tp, i);
+		}
+		tty3270_erase_line(tp, 1);
+	} else if (mode == 2) {
+		for (i = 0; i < tp->view.rows - 2; i++) {
+			tp->screen[i].len = 0;
+			tty3270_convert_line(tp, i);
+		}
+	}
+	tty3270_rebuild_update(tp);
+}
+
+/*
+ * Set attributes found in an escape sequence.
+ *  Esc [ <attr> ; <attr> ; ... m
+ */
+static void
+tty3270_set_attributes(struct tty3270 *tp)
+{
+	static unsigned char f_colors[] = {
+		TAC_DEFAULT, TAC_RED, TAC_GREEN, TAC_YELLOW, TAC_BLUE,
+		TAC_PINK, TAC_TURQ, TAC_WHITE, 0, TAC_DEFAULT
+	};
+	int i, attr;
+
+	for (i = 0; i <= tp->esc_npar; i++) {
+		attr = tp->esc_par[i];
+		switch (attr) {
+		case 0:		/* Reset */
+			tp->highlight = TAX_RESET;
+			tp->f_color = TAC_RESET;
+			break;
+		/* Highlight. */
+		case 4:		/* Start underlining. */
+			tp->highlight = TAX_UNDER;
+			break;
+		case 5:		/* Start blink. */
+			tp->highlight = TAX_BLINK;
+			break;
+		case 7:		/* Start reverse. */
+			tp->highlight = TAX_REVER;
+			break;
+		case 24:	/* End underlining */
+			if (tp->highlight == TAX_UNDER)
+				tp->highlight = TAX_RESET;
+			break;
+		case 25:	/* End blink. */
+			if (tp->highlight == TAX_BLINK)
+				tp->highlight = TAX_RESET;
+			break;
+		case 27:	/* End reverse. */
+			if (tp->highlight == TAX_REVER)
+				tp->highlight = TAX_RESET;
+			break;
+		/* Foreground color. */
+		case 30:	/* Black */
+		case 31:	/* Red */
+		case 32:	/* Green */
+		case 33:	/* Yellow */
+		case 34:	/* Blue */
+		case 35:	/* Magenta */
+		case 36:	/* Cyan */
+		case 37:	/* White */
+		case 39:	/* Black */
+			tp->f_color = f_colors[attr - 30];
+			break;
+		}
+	}
+}
+
+static inline int
+tty3270_getpar(struct tty3270 *tp, int ix)
+{
+	return (tp->esc_par[ix] > 0) ? tp->esc_par[ix] : 1;
+}
+
+static void
+tty3270_goto_xy(struct tty3270 *tp, int cx, int cy)
+{
+	int max_cx = max(0, cx);
+	int max_cy = max(0, cy);
+
+	tp->cx = min_t(int, tp->view.cols - 1, max_cx);
+	cy = min_t(int, tp->view.rows - 3, max_cy);
+	if (cy != tp->cy) {
+		tty3270_convert_line(tp, tp->cy);
+		tp->cy = cy;
+	}
+}
+
+/*
+ * Process escape sequences. Known sequences:
+ *  Esc 7			Save Cursor Position
+ *  Esc 8			Restore Cursor Position
+ *  Esc [ Pn ; Pn ; .. m	Set attributes
+ *  Esc [ Pn ; Pn H		Cursor Position
+ *  Esc [ Pn ; Pn f		Cursor Position
+ *  Esc [ Pn A			Cursor Up
+ *  Esc [ Pn B			Cursor Down
+ *  Esc [ Pn C			Cursor Forward
+ *  Esc [ Pn D			Cursor Backward
+ *  Esc [ Pn G			Cursor Horizontal Absolute
+ *  Esc [ Pn X			Erase Characters
+ *  Esc [ Ps J			Erase in Display
+ *  Esc [ Ps K			Erase in Line
+ * // FIXME: add all the new ones.
+ *
+ *  Pn is a numeric parameter, a string of zero or more decimal digits.
+ *  Ps is a selective parameter.
+ */
+static void
+tty3270_escape_sequence(struct tty3270 *tp, char ch)
+{
+	enum { ESnormal, ESesc, ESsquare, ESgetpars };
+
+	if (tp->esc_state == ESnormal) {
+		if (ch == 0x1b)
+			/* Starting new escape sequence. */
+			tp->esc_state = ESesc;
+		return;
+	}
+	if (tp->esc_state == ESesc) {
+		tp->esc_state = ESnormal;
+		switch (ch) {
+		case '[':
+			tp->esc_state = ESsquare;
+			break;
+		case 'E':
+			tty3270_cr(tp);
+			tty3270_lf(tp);
+			break;
+		case 'M':
+			tty3270_ri(tp);
+			break;
+		case 'D':
+			tty3270_lf(tp);
+			break;
+		case 'Z':		/* Respond ID. */
+			kbd_puts_queue(&tp->port, "\033[?6c");
+			break;
+		case '7':		/* Save cursor position. */
+			tp->saved_cx = tp->cx;
+			tp->saved_cy = tp->cy;
+			tp->saved_highlight = tp->highlight;
+			tp->saved_f_color = tp->f_color;
+			break;
+		case '8':		/* Restore cursor position. */
+			tty3270_convert_line(tp, tp->cy);
+			tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
+			tp->highlight = tp->saved_highlight;
+			tp->f_color = tp->saved_f_color;
+			break;
+		case 'c':		/* Reset terminal. */
+			tp->cx = tp->saved_cx = 0;
+			tp->cy = tp->saved_cy = 0;
+			tp->highlight = tp->saved_highlight = TAX_RESET;
+			tp->f_color = tp->saved_f_color = TAC_RESET;
+			tty3270_erase_display(tp, 2);
+			break;
+		}
+		return;
+	}
+	if (tp->esc_state == ESsquare) {
+		tp->esc_state = ESgetpars;
+		memset(tp->esc_par, 0, sizeof(tp->esc_par));
+		tp->esc_npar = 0;
+		tp->esc_ques = (ch == '?');
+		if (tp->esc_ques)
+			return;
+	}
+	if (tp->esc_state == ESgetpars) {
+		if (ch == ';' && tp->esc_npar < ESCAPE_NPAR - 1) {
+			tp->esc_npar++;
+			return;
+		}
+		if (ch >= '0' && ch <= '9') {
+			tp->esc_par[tp->esc_npar] *= 10;
+			tp->esc_par[tp->esc_npar] += ch - '0';
+			return;
+		}
+	}
+	tp->esc_state = ESnormal;
+	if (ch == 'n' && !tp->esc_ques) {
+		if (tp->esc_par[0] == 5)		/* Status report. */
+			kbd_puts_queue(&tp->port, "\033[0n");
+		else if (tp->esc_par[0] == 6) {	/* Cursor report. */
+			char buf[40];
+			sprintf(buf, "\033[%d;%dR", tp->cy + 1, tp->cx + 1);
+			kbd_puts_queue(&tp->port, buf);
+		}
+		return;
+	}
+	if (tp->esc_ques)
+		return;
+	switch (ch) {
+	case 'm':
+		tty3270_set_attributes(tp);
+		break;
+	case 'H':	/* Set cursor position. */
+	case 'f':
+		tty3270_goto_xy(tp, tty3270_getpar(tp, 1) - 1,
+				tty3270_getpar(tp, 0) - 1);
+		break;
+	case 'd':	/* Set y position. */
+		tty3270_goto_xy(tp, tp->cx, tty3270_getpar(tp, 0) - 1);
+		break;
+	case 'A':	/* Cursor up. */
+	case 'F':
+		tty3270_goto_xy(tp, tp->cx, tp->cy - tty3270_getpar(tp, 0));
+		break;
+	case 'B':	/* Cursor down. */
+	case 'e':
+	case 'E':
+		tty3270_goto_xy(tp, tp->cx, tp->cy + tty3270_getpar(tp, 0));
+		break;
+	case 'C':	/* Cursor forward. */
+	case 'a':
+		tty3270_goto_xy(tp, tp->cx + tty3270_getpar(tp, 0), tp->cy);
+		break;
+	case 'D':	/* Cursor backward. */
+		tty3270_goto_xy(tp, tp->cx - tty3270_getpar(tp, 0), tp->cy);
+		break;
+	case 'G':	/* Set x position. */
+	case '`':
+		tty3270_goto_xy(tp, tty3270_getpar(tp, 0), tp->cy);
+		break;
+	case 'X':	/* Erase Characters. */
+		tty3270_erase_characters(tp, tty3270_getpar(tp, 0));
+		break;
+	case 'J':	/* Erase display. */
+		tty3270_erase_display(tp, tp->esc_par[0]);
+		break;
+	case 'K':	/* Erase line. */
+		tty3270_erase_line(tp, tp->esc_par[0]);
+		break;
+	case 'P':	/* Delete characters. */
+		tty3270_delete_characters(tp, tty3270_getpar(tp, 0));
+		break;
+	case '@':	/* Insert characters. */
+		tty3270_insert_characters(tp, tty3270_getpar(tp, 0));
+		break;
+	case 's':	/* Save cursor position. */
+		tp->saved_cx = tp->cx;
+		tp->saved_cy = tp->cy;
+		tp->saved_highlight = tp->highlight;
+		tp->saved_f_color = tp->f_color;
+		break;
+	case 'u':	/* Restore cursor position. */
+		tty3270_convert_line(tp, tp->cy);
+		tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
+		tp->highlight = tp->saved_highlight;
+		tp->f_color = tp->saved_f_color;
+		break;
+	}
+}
+
+/*
+ * String write routine for 3270 ttys
+ */
+static void
+tty3270_do_write(struct tty3270 *tp, struct tty_struct *tty,
+		const unsigned char *buf, int count)
+{
+	int i_msg, i;
+
+	spin_lock_bh(&tp->view.lock);
+	for (i_msg = 0; !tty->stopped && i_msg < count; i_msg++) {
+		if (tp->esc_state != 0) {
+			/* Continue escape sequence. */
+			tty3270_escape_sequence(tp, buf[i_msg]);
+			continue;
+		}
+
+		switch (buf[i_msg]) {
+		case 0x07:		/* '\a' -- Alarm */
+			tp->wcc |= TW_PLUSALARM;
+			break;
+		case 0x08:		/* Backspace. */
+			if (tp->cx > 0) {
+				tp->cx--;
+				tty3270_put_character(tp, ' ');
+			}
+			break;
+		case 0x09:		/* '\t' -- Tabulate */
+			for (i = tp->cx % 8; i < 8; i++) {
+				if (tp->cx >= tp->view.cols) {
+					tty3270_cr(tp);
+					tty3270_lf(tp);
+					break;
+				}
+				tty3270_put_character(tp, ' ');
+				tp->cx++;
+			}
+			break;
+		case 0x0a:		/* '\n' -- New Line */
+			tty3270_cr(tp);
+			tty3270_lf(tp);
+			break;
+		case 0x0c:		/* '\f' -- Form Feed */
+			tty3270_erase_display(tp, 2);
+			tp->cx = tp->cy = 0;
+			break;
+		case 0x0d:		/* '\r' -- Carriage Return */
+			tp->cx = 0;
+			break;
+		case 0x0f:		/* SuSE "exit alternate mode" */
+			break;
+		case 0x1b:		/* Start escape sequence. */
+			tty3270_escape_sequence(tp, buf[i_msg]);
+			break;
+		default:		/* Insert normal character. */
+			if (tp->cx >= tp->view.cols) {
+				tty3270_cr(tp);
+				tty3270_lf(tp);
+			}
+			tty3270_put_character(tp, buf[i_msg]);
+			tp->cx++;
+			break;
+		}
+	}
+	/* Convert current line to 3270 data fragment. */
+	tty3270_convert_line(tp, tp->cy);
+
+	/* Setup timer to update display after 1/10 second */
+	if (!timer_pending(&tp->timer))
+		tty3270_set_timer(tp, HZ/10);
+
+	spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * String write routine for 3270 ttys
+ */
+static int
+tty3270_write(struct tty_struct * tty,
+	      const unsigned char *buf, int count)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return 0;
+	if (tp->char_count > 0) {
+		tty3270_do_write(tp, tty, tp->char_buf, tp->char_count);
+		tp->char_count = 0;
+	}
+	tty3270_do_write(tp, tty, buf, count);
+	return count;
+}
+
+/*
+ * Put single characters to the ttys character buffer
+ */
+static int tty3270_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp || tp->char_count >= TTY3270_CHAR_BUF_SIZE)
+		return 0;
+	tp->char_buf[tp->char_count++] = ch;
+	return 1;
+}
+
+/*
+ * Flush all characters from the ttys characeter buffer put there
+ * by tty3270_put_char.
+ */
+static void
+tty3270_flush_chars(struct tty_struct *tty)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return;
+	if (tp->char_count > 0) {
+		tty3270_do_write(tp, tty, tp->char_buf, tp->char_count);
+		tp->char_count = 0;
+	}
+}
+
+/*
+ * Returns the number of characters in the output buffer. This is
+ * used in tty_wait_until_sent to wait until all characters have
+ * appeared on the screen.
+ */
+static int
+tty3270_chars_in_buffer(struct tty_struct *tty)
+{
+	return 0;
+}
+
+static void
+tty3270_flush_buffer(struct tty_struct *tty)
+{
+}
+
+/*
+ * Check for visible/invisible input switches
+ */
+static void
+tty3270_set_termios(struct tty_struct *tty, struct ktermios *old)
+{
+	struct tty3270 *tp;
+	int new;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return;
+	spin_lock_bh(&tp->view.lock);
+	if (L_ICANON(tty)) {
+		new = L_ECHO(tty) ? TF_INPUT: TF_INPUTN;
+		if (new != tp->inattr) {
+			tp->inattr = new;
+			tty3270_update_prompt(tp, NULL, 0);
+			tty3270_set_timer(tp, 1);
+		}
+	}
+	spin_unlock_bh(&tp->view.lock);
+}
+
+/*
+ * Disable reading from a 3270 tty
+ */
+static void
+tty3270_throttle(struct tty_struct * tty)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return;
+	tp->throttle = 1;
+}
+
+/*
+ * Enable reading from a 3270 tty
+ */
+static void
+tty3270_unthrottle(struct tty_struct * tty)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return;
+	tp->throttle = 0;
+	if (tp->attn)
+		tty3270_issue_read(tp, 1);
+}
+
+/*
+ * Hang up the tty device.
+ */
+static void
+tty3270_hangup(struct tty_struct *tty)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return;
+	spin_lock_bh(&tp->view.lock);
+	tp->cx = tp->saved_cx = 0;
+	tp->cy = tp->saved_cy = 0;
+	tp->highlight = tp->saved_highlight = TAX_RESET;
+	tp->f_color = tp->saved_f_color = TAC_RESET;
+	tty3270_blank_screen(tp);
+	while (tp->nr_lines < tp->view.rows - 2)
+		tty3270_blank_line(tp);
+	tp->update_flags = TTY_UPDATE_ALL;
+	spin_unlock_bh(&tp->view.lock);
+	tty3270_set_timer(tp, 1);
+}
+
+static void
+tty3270_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+}
+
+static int tty3270_ioctl(struct tty_struct *tty, unsigned int cmd,
+			 unsigned long arg)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return -ENODEV;
+	if (tty_io_error(tty))
+		return -EIO;
+	return kbd_ioctl(tp->kbd, cmd, arg);
+}
+
+#ifdef CONFIG_COMPAT
+static long tty3270_compat_ioctl(struct tty_struct *tty,
+				 unsigned int cmd, unsigned long arg)
+{
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return -ENODEV;
+	if (tty_io_error(tty))
+		return -EIO;
+	return kbd_ioctl(tp->kbd, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static const struct tty_operations tty3270_ops = {
+	.install = tty3270_install,
+	.cleanup = tty3270_cleanup,
+	.open = tty3270_open,
+	.close = tty3270_close,
+	.write = tty3270_write,
+	.put_char = tty3270_put_char,
+	.flush_chars = tty3270_flush_chars,
+	.write_room = tty3270_write_room,
+	.chars_in_buffer = tty3270_chars_in_buffer,
+	.flush_buffer = tty3270_flush_buffer,
+	.throttle = tty3270_throttle,
+	.unthrottle = tty3270_unthrottle,
+	.hangup = tty3270_hangup,
+	.wait_until_sent = tty3270_wait_until_sent,
+	.ioctl = tty3270_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = tty3270_compat_ioctl,
+#endif
+	.set_termios = tty3270_set_termios
+};
+
+static void tty3270_create_cb(int minor)
+{
+	tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL);
+}
+
+static void tty3270_destroy_cb(int minor)
+{
+	tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR);
+}
+
+static struct raw3270_notifier tty3270_notifier =
+{
+	.create = tty3270_create_cb,
+	.destroy = tty3270_destroy_cb,
+};
+
+/*
+ * 3270 tty registration code called from tty_init().
+ * Most kernel services (incl. kmalloc) are available at this poimt.
+ */
+static int __init tty3270_init(void)
+{
+	struct tty_driver *driver;
+	int ret;
+
+	driver = tty_alloc_driver(RAW3270_MAXDEVS,
+				  TTY_DRIVER_REAL_RAW |
+				  TTY_DRIVER_DYNAMIC_DEV |
+				  TTY_DRIVER_RESET_TERMIOS);
+	if (IS_ERR(driver))
+		return PTR_ERR(driver);
+
+	/*
+	 * Initialize the tty_driver structure
+	 * Entries in tty3270_driver that are NOT initialized:
+	 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
+	 */
+	driver->driver_name = "tty3270";
+	driver->name = "3270/tty";
+	driver->major = IBM_TTY3270_MAJOR;
+	driver->minor_start = RAW3270_FIRSTMINOR;
+	driver->name_base = RAW3270_FIRSTMINOR;
+	driver->type = TTY_DRIVER_TYPE_SYSTEM;
+	driver->subtype = SYSTEM_TYPE_TTY;
+	driver->init_termios = tty_std_termios;
+	tty_set_operations(driver, &tty3270_ops);
+	ret = tty_register_driver(driver);
+	if (ret) {
+		put_tty_driver(driver);
+		return ret;
+	}
+	tty3270_driver = driver;
+	raw3270_register_notifier(&tty3270_notifier);
+	return 0;
+}
+
+static void __exit
+tty3270_exit(void)
+{
+	struct tty_driver *driver;
+
+	raw3270_unregister_notifier(&tty3270_notifier);
+	driver = tty3270_driver;
+	tty3270_driver = NULL;
+	tty_unregister_driver(driver);
+	put_tty_driver(driver);
+	tty3270_del_views();
+}
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(IBM_TTY3270_MAJOR);
+
+module_init(tty3270_init);
+module_exit(tty3270_exit);
diff --git a/drivers/s390/char/tty3270.h b/drivers/s390/char/tty3270.h
new file mode 100644
index 0000000..52ceed6
--- /dev/null
+++ b/drivers/s390/char/tty3270.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    Copyright IBM Corp. 2007
+ *
+ */
+
+#ifndef __DRIVERS_S390_CHAR_TTY3270_H
+#define __DRIVERS_S390_CHAR_TTY3270_H
+
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+
+extern struct tty_driver *tty3270_driver;
+
+#endif /* __DRIVERS_S390_CHAR_TTY3270_H */
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
new file mode 100644
index 0000000..0fa1b6b
--- /dev/null
+++ b/drivers/s390/char/vmcp.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2004, 2010
+ * Interface implementation for communication with the z/VM control program
+ *
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ *
+ * z/VMs CP offers the possibility to issue commands via the diagnose code 8
+ * this driver implements a character device that issues these commands and
+ * returns the answer of CP.
+ *
+ * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/cma.h>
+#include <linux/mm.h>
+#include <asm/cpcmd.h>
+#include <asm/debug.h>
+#include <asm/vmcp.h>
+
+struct vmcp_session {
+	char *response;
+	unsigned int bufsize;
+	unsigned int cma_alloc : 1;
+	int resp_size;
+	int resp_code;
+	struct mutex mutex;
+};
+
+static debug_info_t *vmcp_debug;
+
+static unsigned long vmcp_cma_size __initdata = CONFIG_VMCP_CMA_SIZE * 1024 * 1024;
+static struct cma *vmcp_cma;
+
+static int __init early_parse_vmcp_cma(char *p)
+{
+	vmcp_cma_size = ALIGN(memparse(p, NULL), PAGE_SIZE);
+	return 0;
+}
+early_param("vmcp_cma", early_parse_vmcp_cma);
+
+void __init vmcp_cma_reserve(void)
+{
+	if (!MACHINE_IS_VM)
+		return;
+	cma_declare_contiguous(0, vmcp_cma_size, 0, 0, 0, false, "vmcp", &vmcp_cma);
+}
+
+static void vmcp_response_alloc(struct vmcp_session *session)
+{
+	struct page *page = NULL;
+	int nr_pages, order;
+
+	order = get_order(session->bufsize);
+	nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
+	/*
+	 * For anything below order 3 allocations rely on the buddy
+	 * allocator. If such low-order allocations can't be handled
+	 * anymore the system won't work anyway.
+	 */
+	if (order > 2)
+		page = cma_alloc(vmcp_cma, nr_pages, 0, false);
+	if (page) {
+		session->response = (char *)page_to_phys(page);
+		session->cma_alloc = 1;
+		return;
+	}
+	session->response = (char *)__get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, order);
+}
+
+static void vmcp_response_free(struct vmcp_session *session)
+{
+	int nr_pages, order;
+	struct page *page;
+
+	if (!session->response)
+		return;
+	order = get_order(session->bufsize);
+	nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
+	if (session->cma_alloc) {
+		page = phys_to_page((unsigned long)session->response);
+		cma_release(vmcp_cma, page, nr_pages);
+		session->cma_alloc = 0;
+	} else {
+		free_pages((unsigned long)session->response, order);
+	}
+	session->response = NULL;
+}
+
+static int vmcp_open(struct inode *inode, struct file *file)
+{
+	struct vmcp_session *session;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	session = kmalloc(sizeof(*session), GFP_KERNEL);
+	if (!session)
+		return -ENOMEM;
+
+	session->bufsize = PAGE_SIZE;
+	session->response = NULL;
+	session->resp_size = 0;
+	mutex_init(&session->mutex);
+	file->private_data = session;
+	return nonseekable_open(inode, file);
+}
+
+static int vmcp_release(struct inode *inode, struct file *file)
+{
+	struct vmcp_session *session;
+
+	session = file->private_data;
+	file->private_data = NULL;
+	vmcp_response_free(session);
+	kfree(session);
+	return 0;
+}
+
+static ssize_t
+vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos)
+{
+	ssize_t ret;
+	size_t size;
+	struct vmcp_session *session;
+
+	session = file->private_data;
+	if (mutex_lock_interruptible(&session->mutex))
+		return -ERESTARTSYS;
+	if (!session->response) {
+		mutex_unlock(&session->mutex);
+		return 0;
+	}
+	size = min_t(size_t, session->resp_size, session->bufsize);
+	ret = simple_read_from_buffer(buff, count, ppos,
+					session->response, size);
+
+	mutex_unlock(&session->mutex);
+
+	return ret;
+}
+
+static ssize_t
+vmcp_write(struct file *file, const char __user *buff, size_t count,
+	   loff_t *ppos)
+{
+	char *cmd;
+	struct vmcp_session *session;
+
+	if (count > 240)
+		return -EINVAL;
+	cmd = memdup_user_nul(buff, count);
+	if (IS_ERR(cmd))
+		return PTR_ERR(cmd);
+	session = file->private_data;
+	if (mutex_lock_interruptible(&session->mutex)) {
+		kfree(cmd);
+		return -ERESTARTSYS;
+	}
+	if (!session->response)
+		vmcp_response_alloc(session);
+	if (!session->response) {
+		mutex_unlock(&session->mutex);
+		kfree(cmd);
+		return -ENOMEM;
+	}
+	debug_text_event(vmcp_debug, 1, cmd);
+	session->resp_size = cpcmd(cmd, session->response, session->bufsize,
+				   &session->resp_code);
+	mutex_unlock(&session->mutex);
+	kfree(cmd);
+	*ppos = 0;		/* reset the file pointer after a command */
+	return count;
+}
+
+
+/*
+ * These ioctls are available, as the semantics of the diagnose 8 call
+ * does not fit very well into a Linux call. Diagnose X'08' is described in
+ * CP Programming Services SC24-6084-00
+ *
+ * VMCP_GETCODE: gives the CP return code back to user space
+ * VMCP_SETBUF: sets the response buffer for the next write call. diagnose 8
+ * expects adjacent pages in real storage and to make matters worse, we
+ * dont know the size of the response. Therefore we default to PAGESIZE and
+ * let userspace to change the response size, if userspace expects a bigger
+ * response
+ */
+static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct vmcp_session *session;
+	int ret = -ENOTTY;
+	int __user *argp;
+
+	session = file->private_data;
+	if (is_compat_task())
+		argp = compat_ptr(arg);
+	else
+		argp = (int __user *)arg;
+	if (mutex_lock_interruptible(&session->mutex))
+		return -ERESTARTSYS;
+	switch (cmd) {
+	case VMCP_GETCODE:
+		ret = put_user(session->resp_code, argp);
+		break;
+	case VMCP_SETBUF:
+		vmcp_response_free(session);
+		ret = get_user(session->bufsize, argp);
+		if (ret)
+			session->bufsize = PAGE_SIZE;
+		if (!session->bufsize || get_order(session->bufsize) > 8) {
+			session->bufsize = PAGE_SIZE;
+			ret = -EINVAL;
+		}
+		break;
+	case VMCP_GETSIZE:
+		ret = put_user(session->resp_size, argp);
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&session->mutex);
+	return ret;
+}
+
+static const struct file_operations vmcp_fops = {
+	.owner		= THIS_MODULE,
+	.open		= vmcp_open,
+	.release	= vmcp_release,
+	.read		= vmcp_read,
+	.write		= vmcp_write,
+	.unlocked_ioctl	= vmcp_ioctl,
+	.compat_ioctl	= vmcp_ioctl,
+	.llseek		= no_llseek,
+};
+
+static struct miscdevice vmcp_dev = {
+	.name	= "vmcp",
+	.minor	= MISC_DYNAMIC_MINOR,
+	.fops	= &vmcp_fops,
+};
+
+static int __init vmcp_init(void)
+{
+	int ret;
+
+	if (!MACHINE_IS_VM)
+		return 0;
+
+	vmcp_debug = debug_register("vmcp", 1, 1, 240);
+	if (!vmcp_debug)
+		return -ENOMEM;
+
+	ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view);
+	if (ret) {
+		debug_unregister(vmcp_debug);
+		return ret;
+	}
+
+	ret = misc_register(&vmcp_dev);
+	if (ret)
+		debug_unregister(vmcp_debug);
+	return ret;
+}
+device_initcall(vmcp_init);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
new file mode 100644
index 0000000..069b9ef
--- /dev/null
+++ b/drivers/s390/char/vmlogrdr.c
@@ -0,0 +1,902 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *	character device driver for reading z/VM system service records
+ *
+ *
+ *	Copyright IBM Corp. 2004, 2009
+ *	character device driver for reading z/VM system service records,
+ *	Version 1.0
+ *	Author(s): Xenia Tkatschow <xenia@us.ibm.com>
+ *		   Stefan Weinhuber <wein@de.ibm.com>
+ *
+ */
+
+#define KMSG_COMPONENT "vmlogrdr"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <asm/cpcmd.h>
+#include <asm/debug.h>
+#include <asm/ebcdic.h>
+#include <net/iucv/iucv.h>
+#include <linux/kmod.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/string.h>
+
+MODULE_AUTHOR
+	("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
+	 "                            Stefan Weinhuber (wein@de.ibm.com)");
+MODULE_DESCRIPTION ("Character device driver for reading z/VM "
+		    "system service records.");
+MODULE_LICENSE("GPL");
+
+
+/*
+ * The size of the buffer for iucv data transfer is one page,
+ * but in addition to the data we read from iucv we also
+ * place an integer and some characters into that buffer,
+ * so the maximum size for record data is a little less then
+ * one page.
+ */
+#define NET_BUFFER_SIZE	(PAGE_SIZE - sizeof(int) - sizeof(FENCE))
+
+/*
+ * The elements that are concurrently accessed by bottom halves are
+ * connection_established, iucv_path_severed, local_interrupt_buffer
+ * and receive_ready. The first three can be protected by
+ * priv_lock.  receive_ready is atomic, so it can be incremented and
+ * decremented without holding a lock.
+ * The variable dev_in_use needs to be protected by the lock, since
+ * it's a flag used by open to make sure that the device is opened only
+ * by one user at the same time.
+ */
+struct vmlogrdr_priv_t {
+	char system_service[8];
+	char internal_name[8];
+	char recording_name[8];
+	struct iucv_path *path;
+	int connection_established;
+	int iucv_path_severed;
+	struct iucv_message local_interrupt_buffer;
+	atomic_t receive_ready;
+	int minor_num;
+	char * buffer;
+	char * current_position;
+	int remaining;
+	ulong residual_length;
+	int buffer_free;
+	int dev_in_use; /* 1: already opened, 0: not opened*/
+	spinlock_t priv_lock;
+	struct device  *device;
+	struct device  *class_device;
+	int autorecording;
+	int autopurge;
+};
+
+
+/*
+ * File operation structure for vmlogrdr devices
+ */
+static int vmlogrdr_open(struct inode *, struct file *);
+static int vmlogrdr_release(struct inode *, struct file *);
+static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
+			      size_t count, loff_t * ppos);
+
+static const struct file_operations vmlogrdr_fops = {
+	.owner   = THIS_MODULE,
+	.open    = vmlogrdr_open,
+	.release = vmlogrdr_release,
+	.read    = vmlogrdr_read,
+	.llseek  = no_llseek,
+};
+
+
+static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 *ipuser);
+static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 *ipuser);
+static void vmlogrdr_iucv_message_pending(struct iucv_path *,
+					  struct iucv_message *);
+
+
+static struct iucv_handler vmlogrdr_iucv_handler = {
+	.path_complete	 = vmlogrdr_iucv_path_complete,
+	.path_severed	 = vmlogrdr_iucv_path_severed,
+	.message_pending = vmlogrdr_iucv_message_pending,
+};
+
+
+static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
+static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
+
+/*
+ * pointer to system service private structure
+ * minor number 0 --> logrec
+ * minor number 1 --> account
+ * minor number 2 --> symptom
+ */
+
+static struct vmlogrdr_priv_t sys_ser[] = {
+	{ .system_service = "*LOGREC ",
+	  .internal_name  = "logrec",
+	  .recording_name = "EREP",
+	  .minor_num      = 0,
+	  .buffer_free    = 1,
+	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
+	  .autorecording  = 1,
+	  .autopurge      = 1,
+	},
+	{ .system_service = "*ACCOUNT",
+	  .internal_name  = "account",
+	  .recording_name = "ACCOUNT",
+	  .minor_num      = 1,
+	  .buffer_free    = 1,
+	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
+	  .autorecording  = 1,
+	  .autopurge      = 1,
+	},
+	{ .system_service = "*SYMPTOM",
+	  .internal_name  = "symptom",
+	  .recording_name = "SYMPTOM",
+	  .minor_num      = 2,
+	  .buffer_free    = 1,
+	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
+	  .autorecording  = 1,
+	  .autopurge      = 1,
+	}
+};
+
+#define MAXMINOR  (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
+
+static char FENCE[] = {"EOR"};
+static int vmlogrdr_major = 0;
+static struct cdev  *vmlogrdr_cdev = NULL;
+static int recording_class_AB;
+
+
+static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 *ipuser)
+{
+	struct vmlogrdr_priv_t * logptr = path->private;
+
+	spin_lock(&logptr->priv_lock);
+	logptr->connection_established = 1;
+	spin_unlock(&logptr->priv_lock);
+	wake_up(&conn_wait_queue);
+}
+
+
+static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
+{
+	struct vmlogrdr_priv_t * logptr = path->private;
+	u8 reason = (u8) ipuser[8];
+
+	pr_err("vmlogrdr: connection severed with reason %i\n", reason);
+
+	iucv_path_sever(path, NULL);
+	kfree(path);
+	logptr->path = NULL;
+
+	spin_lock(&logptr->priv_lock);
+	logptr->connection_established = 0;
+	logptr->iucv_path_severed = 1;
+	spin_unlock(&logptr->priv_lock);
+
+	wake_up(&conn_wait_queue);
+	/* just in case we're sleeping waiting for a record */
+	wake_up_interruptible(&read_wait_queue);
+}
+
+
+static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
+					  struct iucv_message *msg)
+{
+	struct vmlogrdr_priv_t * logptr = path->private;
+
+	/*
+	 * This function is the bottom half so it should be quick.
+	 * Copy the external interrupt data into our local eib and increment
+	 * the usage count
+	 */
+	spin_lock(&logptr->priv_lock);
+	memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
+	atomic_inc(&logptr->receive_ready);
+	spin_unlock(&logptr->priv_lock);
+	wake_up_interruptible(&read_wait_queue);
+}
+
+
+static int vmlogrdr_get_recording_class_AB(void)
+{
+	static const char cp_command[] = "QUERY COMMAND RECORDING ";
+	char cp_response[80];
+	char *tail;
+	int len,i;
+
+	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
+	len = strnlen(cp_response,sizeof(cp_response));
+	// now the parsing
+	tail=strnchr(cp_response,len,'=');
+	if (!tail)
+		return 0;
+	tail++;
+	if (!strncmp("ANY",tail,3))
+		return 1;
+	if (!strncmp("NONE",tail,4))
+		return 0;
+	/*
+	 * expect comma separated list of classes here, if one of them
+	 * is A or B return 1 otherwise 0
+	 */
+        for (i=tail-cp_response; i<len; i++)
+		if ( cp_response[i]=='A' || cp_response[i]=='B' )
+			return 1;
+	return 0;
+}
+
+
+static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
+			      int action, int purge)
+{
+
+	char cp_command[80];
+	char cp_response[160];
+	char *onoff, *qid_string;
+	int rc;
+
+	onoff = ((action == 1) ? "ON" : "OFF");
+	qid_string = ((recording_class_AB == 1) ? " QID * " : "");
+
+	/*
+	 * The recording commands needs to be called with option QID
+	 * for guests that have previlege classes A or B.
+	 * Purging has to be done as separate step, because recording
+	 * can't be switched on as long as records are on the queue.
+	 * Doing both at the same time doesn't work.
+	 */
+	if (purge && (action == 1)) {
+		memset(cp_command, 0x00, sizeof(cp_command));
+		memset(cp_response, 0x00, sizeof(cp_response));
+		snprintf(cp_command, sizeof(cp_command),
+			 "RECORDING %s PURGE %s",
+			 logptr->recording_name,
+			 qid_string);
+		cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
+	}
+
+	memset(cp_command, 0x00, sizeof(cp_command));
+	memset(cp_response, 0x00, sizeof(cp_response));
+	snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
+		logptr->recording_name,
+		onoff,
+		qid_string);
+	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
+	/* The recording command will usually answer with 'Command complete'
+	 * on success, but when the specific service was never connected
+	 * before then there might be an additional informational message
+	 * 'HCPCRC8072I Recording entry not found' before the
+	 * 'Command complete'. So I use strstr rather then the strncmp.
+	 */
+	if (strstr(cp_response,"Command complete"))
+		rc = 0;
+	else
+		rc = -EIO;
+	/*
+	 * If we turn recording off, we have to purge any remaining records
+	 * afterwards, as a large number of queued records may impact z/VM
+	 * performance.
+	 */
+	if (purge && (action == 0)) {
+		memset(cp_command, 0x00, sizeof(cp_command));
+		memset(cp_response, 0x00, sizeof(cp_response));
+		snprintf(cp_command, sizeof(cp_command),
+			 "RECORDING %s PURGE %s",
+			 logptr->recording_name,
+			 qid_string);
+		cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
+	}
+
+	return rc;
+}
+
+
+static int vmlogrdr_open (struct inode *inode, struct file *filp)
+{
+	int dev_num = 0;
+	struct vmlogrdr_priv_t * logptr = NULL;
+	int connect_rc = 0;
+	int ret;
+
+	dev_num = iminor(inode);
+	if (dev_num >= MAXMINOR)
+		return -ENODEV;
+	logptr = &sys_ser[dev_num];
+
+	/*
+	 * only allow for blocking reads to be open
+	 */
+	if (filp->f_flags & O_NONBLOCK)
+		return -EOPNOTSUPP;
+
+	/* Besure this device hasn't already been opened */
+	spin_lock_bh(&logptr->priv_lock);
+	if (logptr->dev_in_use)	{
+		spin_unlock_bh(&logptr->priv_lock);
+		return -EBUSY;
+	}
+	logptr->dev_in_use = 1;
+	logptr->connection_established = 0;
+	logptr->iucv_path_severed = 0;
+	atomic_set(&logptr->receive_ready, 0);
+	logptr->buffer_free = 1;
+	spin_unlock_bh(&logptr->priv_lock);
+
+	/* set the file options */
+	filp->private_data = logptr;
+
+	/* start recording for this service*/
+	if (logptr->autorecording) {
+		ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
+		if (ret)
+			pr_warn("vmlogrdr: failed to start recording automatically\n");
+	}
+
+	/* create connection to the system service */
+	logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
+	if (!logptr->path)
+		goto out_dev;
+	connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
+				       logptr->system_service, NULL, NULL,
+				       logptr);
+	if (connect_rc) {
+		pr_err("vmlogrdr: iucv connection to %s "
+		       "failed with rc %i \n",
+		       logptr->system_service, connect_rc);
+		goto out_path;
+	}
+
+	/* We've issued the connect and now we must wait for a
+	 * ConnectionComplete or ConnectinSevered Interrupt
+	 * before we can continue to process.
+	 */
+	wait_event(conn_wait_queue, (logptr->connection_established)
+		   || (logptr->iucv_path_severed));
+	if (logptr->iucv_path_severed)
+		goto out_record;
+	nonseekable_open(inode, filp);
+	return 0;
+
+out_record:
+	if (logptr->autorecording)
+		vmlogrdr_recording(logptr,0,logptr->autopurge);
+out_path:
+	kfree(logptr->path);	/* kfree(NULL) is ok. */
+	logptr->path = NULL;
+out_dev:
+	logptr->dev_in_use = 0;
+	return -EIO;
+}
+
+
+static int vmlogrdr_release (struct inode *inode, struct file *filp)
+{
+	int ret;
+
+	struct vmlogrdr_priv_t * logptr = filp->private_data;
+
+	iucv_path_sever(logptr->path, NULL);
+	kfree(logptr->path);
+	logptr->path = NULL;
+	if (logptr->autorecording) {
+		ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
+		if (ret)
+			pr_warn("vmlogrdr: failed to stop recording automatically\n");
+	}
+	logptr->dev_in_use = 0;
+
+	return 0;
+}
+
+
+static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
+{
+	int rc, *temp;
+	/* we need to keep track of two data sizes here:
+	 * The number of bytes we need to receive from iucv and
+	 * the total number of bytes we actually write into the buffer.
+	 */
+	int user_data_count, iucv_data_count;
+	char * buffer;
+
+	if (atomic_read(&priv->receive_ready)) {
+		spin_lock_bh(&priv->priv_lock);
+		if (priv->residual_length){
+			/* receive second half of a record */
+			iucv_data_count = priv->residual_length;
+			user_data_count = 0;
+			buffer = priv->buffer;
+		} else {
+			/* receive a new record:
+			 * We need to return the total length of the record
+                         * + size of FENCE in the first 4 bytes of the buffer.
+		         */
+			iucv_data_count = priv->local_interrupt_buffer.length;
+			user_data_count = sizeof(int);
+			temp = (int*)priv->buffer;
+			*temp= iucv_data_count + sizeof(FENCE);
+			buffer = priv->buffer + sizeof(int);
+		}
+		/*
+		 * If the record is bigger than our buffer, we receive only
+		 * a part of it. We can get the rest later.
+		 */
+		if (iucv_data_count > NET_BUFFER_SIZE)
+			iucv_data_count = NET_BUFFER_SIZE;
+		rc = iucv_message_receive(priv->path,
+					  &priv->local_interrupt_buffer,
+					  0, buffer, iucv_data_count,
+					  &priv->residual_length);
+		spin_unlock_bh(&priv->priv_lock);
+		/* An rc of 5 indicates that the record was bigger than
+		 * the buffer, which is OK for us. A 9 indicates that the
+		 * record was purged befor we could receive it.
+		 */
+		if (rc == 5)
+			rc = 0;
+		if (rc == 9)
+			atomic_set(&priv->receive_ready, 0);
+	} else {
+		rc = 1;
+	}
+	if (!rc) {
+		priv->buffer_free = 0;
+ 		user_data_count += iucv_data_count;
+		priv->current_position = priv->buffer;
+		if (priv->residual_length == 0){
+			/* the whole record has been captured,
+			 * now add the fence */
+			atomic_dec(&priv->receive_ready);
+			buffer = priv->buffer + user_data_count;
+			memcpy(buffer, FENCE, sizeof(FENCE));
+			user_data_count += sizeof(FENCE);
+		}
+		priv->remaining = user_data_count;
+	}
+
+	return rc;
+}
+
+
+static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
+			     size_t count, loff_t * ppos)
+{
+	int rc;
+	struct vmlogrdr_priv_t * priv = filp->private_data;
+
+	while (priv->buffer_free) {
+		rc = vmlogrdr_receive_data(priv);
+		if (rc) {
+			rc = wait_event_interruptible(read_wait_queue,
+					atomic_read(&priv->receive_ready));
+			if (rc)
+				return rc;
+		}
+	}
+	/* copy only up to end of record */
+	if (count > priv->remaining)
+		count = priv->remaining;
+
+	if (copy_to_user(data, priv->current_position, count))
+		return -EFAULT;
+
+	*ppos += count;
+	priv->current_position += count;
+	priv->remaining -= count;
+
+	/* if all data has been transferred, set buffer free */
+	if (priv->remaining == 0)
+		priv->buffer_free = 1;
+
+	return count;
+}
+
+static ssize_t vmlogrdr_autopurge_store(struct device * dev,
+					struct device_attribute *attr,
+					const char * buf, size_t count)
+{
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+	ssize_t ret = count;
+
+	switch (buf[0]) {
+	case '0':
+		priv->autopurge=0;
+		break;
+	case '1':
+		priv->autopurge=1;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+
+static ssize_t vmlogrdr_autopurge_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+	return sprintf(buf, "%u\n", priv->autopurge);
+}
+
+
+static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
+		   vmlogrdr_autopurge_store);
+
+
+static ssize_t vmlogrdr_purge_store(struct device * dev,
+				    struct device_attribute *attr,
+				    const char * buf, size_t count)
+{
+
+	char cp_command[80];
+	char cp_response[80];
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+
+	if (buf[0] != '1')
+		return -EINVAL;
+
+	memset(cp_command, 0x00, sizeof(cp_command));
+	memset(cp_response, 0x00, sizeof(cp_response));
+
+        /*
+	 * The recording command needs to be called with option QID
+	 * for guests that have previlege classes A or B.
+	 * Other guests will not recognize the command and we have to
+	 * issue the same command without the QID parameter.
+	 */
+
+	if (recording_class_AB)
+		snprintf(cp_command, sizeof(cp_command),
+			 "RECORDING %s PURGE QID * ",
+			 priv->recording_name);
+	else
+		snprintf(cp_command, sizeof(cp_command),
+			 "RECORDING %s PURGE ",
+			 priv->recording_name);
+
+	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
+
+	return count;
+}
+
+
+static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
+
+
+static ssize_t vmlogrdr_autorecording_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+	ssize_t ret = count;
+
+	switch (buf[0]) {
+	case '0':
+		priv->autorecording=0;
+		break;
+	case '1':
+		priv->autorecording=1;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+
+static ssize_t vmlogrdr_autorecording_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+	return sprintf(buf, "%u\n", priv->autorecording);
+}
+
+
+static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
+		   vmlogrdr_autorecording_store);
+
+
+static ssize_t vmlogrdr_recording_store(struct device * dev,
+					struct device_attribute *attr,
+					const char * buf, size_t count)
+{
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+	ssize_t ret;
+
+	switch (buf[0]) {
+	case '0':
+		ret = vmlogrdr_recording(priv,0,0);
+		break;
+	case '1':
+		ret = vmlogrdr_recording(priv,1,0);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	if (ret)
+		return ret;
+	else
+		return count;
+
+}
+
+
+static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
+
+
+static ssize_t recording_status_show(struct device_driver *driver, char *buf)
+{
+	static const char cp_command[] = "QUERY RECORDING ";
+	int len;
+
+	cpcmd(cp_command, buf, 4096, NULL);
+	len = strlen(buf);
+	return len;
+}
+static DRIVER_ATTR_RO(recording_status);
+static struct attribute *vmlogrdr_drv_attrs[] = {
+	&driver_attr_recording_status.attr,
+	NULL,
+};
+static struct attribute_group vmlogrdr_drv_attr_group = {
+	.attrs = vmlogrdr_drv_attrs,
+};
+static const struct attribute_group *vmlogrdr_drv_attr_groups[] = {
+	&vmlogrdr_drv_attr_group,
+	NULL,
+};
+
+static struct attribute *vmlogrdr_attrs[] = {
+	&dev_attr_autopurge.attr,
+	&dev_attr_purge.attr,
+	&dev_attr_autorecording.attr,
+	&dev_attr_recording.attr,
+	NULL,
+};
+static struct attribute_group vmlogrdr_attr_group = {
+	.attrs = vmlogrdr_attrs,
+};
+static const struct attribute_group *vmlogrdr_attr_groups[] = {
+	&vmlogrdr_attr_group,
+	NULL,
+};
+
+static int vmlogrdr_pm_prepare(struct device *dev)
+{
+	int rc;
+	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
+
+	rc = 0;
+	if (priv) {
+		spin_lock_bh(&priv->priv_lock);
+		if (priv->dev_in_use)
+			rc = -EBUSY;
+		spin_unlock_bh(&priv->priv_lock);
+	}
+	if (rc)
+		pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
+		       dev_name(dev));
+	return rc;
+}
+
+
+static const struct dev_pm_ops vmlogrdr_pm_ops = {
+	.prepare = vmlogrdr_pm_prepare,
+};
+
+static struct class *vmlogrdr_class;
+static struct device_driver vmlogrdr_driver = {
+	.name = "vmlogrdr",
+	.bus  = &iucv_bus,
+	.pm = &vmlogrdr_pm_ops,
+	.groups = vmlogrdr_drv_attr_groups,
+};
+
+static int vmlogrdr_register_driver(void)
+{
+	int ret;
+
+	/* Register with iucv driver */
+	ret = iucv_register(&vmlogrdr_iucv_handler, 1);
+	if (ret)
+		goto out;
+
+	ret = driver_register(&vmlogrdr_driver);
+	if (ret)
+		goto out_iucv;
+
+	vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
+	if (IS_ERR(vmlogrdr_class)) {
+		ret = PTR_ERR(vmlogrdr_class);
+		vmlogrdr_class = NULL;
+		goto out_driver;
+	}
+	return 0;
+
+out_driver:
+	driver_unregister(&vmlogrdr_driver);
+out_iucv:
+	iucv_unregister(&vmlogrdr_iucv_handler, 1);
+out:
+	return ret;
+}
+
+
+static void vmlogrdr_unregister_driver(void)
+{
+	class_destroy(vmlogrdr_class);
+	vmlogrdr_class = NULL;
+	driver_unregister(&vmlogrdr_driver);
+	iucv_unregister(&vmlogrdr_iucv_handler, 1);
+}
+
+
+static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
+{
+	struct device *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (dev) {
+		dev_set_name(dev, "%s", priv->internal_name);
+		dev->bus = &iucv_bus;
+		dev->parent = iucv_root;
+		dev->driver = &vmlogrdr_driver;
+		dev->groups = vmlogrdr_attr_groups;
+		dev_set_drvdata(dev, priv);
+		/*
+		 * The release function could be called after the
+		 * module has been unloaded. It's _only_ task is to
+		 * free the struct. Therefore, we specify kfree()
+		 * directly here. (Probably a little bit obfuscating
+		 * but legitime ...).
+		 */
+		dev->release = (void (*)(struct device *))kfree;
+	} else
+		return -ENOMEM;
+	ret = device_register(dev);
+	if (ret) {
+		put_device(dev);
+		return ret;
+	}
+
+	priv->class_device = device_create(vmlogrdr_class, dev,
+					   MKDEV(vmlogrdr_major,
+						 priv->minor_num),
+					   priv, "%s", dev_name(dev));
+	if (IS_ERR(priv->class_device)) {
+		ret = PTR_ERR(priv->class_device);
+		priv->class_device=NULL;
+		device_unregister(dev);
+		return ret;
+	}
+	priv->device = dev;
+	return 0;
+}
+
+
+static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
+{
+	device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
+	if (priv->device != NULL) {
+		device_unregister(priv->device);
+		priv->device=NULL;
+	}
+	return 0;
+}
+
+
+static int vmlogrdr_register_cdev(dev_t dev)
+{
+	int rc = 0;
+	vmlogrdr_cdev = cdev_alloc();
+	if (!vmlogrdr_cdev) {
+		return -ENOMEM;
+	}
+	vmlogrdr_cdev->owner = THIS_MODULE;
+	vmlogrdr_cdev->ops = &vmlogrdr_fops;
+	rc = cdev_add(vmlogrdr_cdev, dev, MAXMINOR);
+	if (!rc)
+		return 0;
+
+	// cleanup: cdev is not fully registered, no cdev_del here!
+	kobject_put(&vmlogrdr_cdev->kobj);
+	vmlogrdr_cdev=NULL;
+	return rc;
+}
+
+
+static void vmlogrdr_cleanup(void)
+{
+        int i;
+
+	if (vmlogrdr_cdev) {
+		cdev_del(vmlogrdr_cdev);
+		vmlogrdr_cdev=NULL;
+	}
+	for (i=0; i < MAXMINOR; ++i ) {
+		vmlogrdr_unregister_device(&sys_ser[i]);
+		free_page((unsigned long)sys_ser[i].buffer);
+	}
+	vmlogrdr_unregister_driver();
+	if (vmlogrdr_major) {
+		unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
+		vmlogrdr_major=0;
+	}
+}
+
+
+static int __init vmlogrdr_init(void)
+{
+	int rc;
+	int i;
+	dev_t dev;
+
+	if (! MACHINE_IS_VM) {
+		pr_err("not running under VM, driver not loaded.\n");
+		return -ENODEV;
+	}
+
+        recording_class_AB = vmlogrdr_get_recording_class_AB();
+
+	rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
+	if (rc)
+		return rc;
+	vmlogrdr_major = MAJOR(dev);
+
+	rc=vmlogrdr_register_driver();
+	if (rc)
+		goto cleanup;
+
+	for (i=0; i < MAXMINOR; ++i ) {
+		sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		if (!sys_ser[i].buffer) {
+			rc = -ENOMEM;
+			break;
+		}
+		sys_ser[i].current_position = sys_ser[i].buffer;
+		rc=vmlogrdr_register_device(&sys_ser[i]);
+		if (rc)
+			break;
+	}
+	if (rc)
+		goto cleanup;
+
+	rc = vmlogrdr_register_cdev(dev);
+	if (rc)
+		goto cleanup;
+	return 0;
+
+cleanup:
+	vmlogrdr_cleanup();
+	return rc;
+}
+
+
+static void __exit vmlogrdr_exit(void)
+{
+	vmlogrdr_cleanup();
+	return;
+}
+
+
+module_init(vmlogrdr_init);
+module_exit(vmlogrdr_exit);
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
new file mode 100644
index 0000000..cbde65a
--- /dev/null
+++ b/drivers/s390/char/vmur.c
@@ -0,0 +1,1058 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux driver for System z and s390 unit record devices
+ * (z/VM virtual punch, reader, printer)
+ *
+ * Copyright IBM Corp. 2001, 2009
+ * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
+ *	    Michael Holzheu <holzheu@de.ibm.com>
+ *	    Frank Munzert <munzert@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "vmur"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/uaccess.h>
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+#include <asm/debug.h>
+#include <asm/diag.h>
+
+#include "vmur.h"
+
+/*
+ * Driver overview
+ *
+ * Unit record device support is implemented as a character device driver.
+ * We can fit at least 16 bits into a device minor number and use the
+ * simple method of mapping a character device number with minor abcd
+ * to the unit record device with devno abcd.
+ * I/O to virtual unit record devices is handled as follows:
+ * Reads: Diagnose code 0x14 (input spool file manipulation)
+ * is used to read spool data page-wise.
+ * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length
+ * is available by reading sysfs attr reclen. Each write() to the device
+ * must specify an integral multiple (maximal 511) of reclen.
+ */
+
+static char ur_banner[] = "z/VM virtual unit record device driver";
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
+MODULE_LICENSE("GPL");
+
+static dev_t ur_first_dev_maj_min;
+static struct class *vmur_class;
+static struct debug_info *vmur_dbf;
+
+/* We put the device's record length (for writes) in the driver_info field */
+static struct ccw_device_id ur_ids[] = {
+	{ CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) },
+	{ CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) },
+	{ /* end of list */ }
+};
+
+MODULE_DEVICE_TABLE(ccw, ur_ids);
+
+static int ur_probe(struct ccw_device *cdev);
+static void ur_remove(struct ccw_device *cdev);
+static int ur_set_online(struct ccw_device *cdev);
+static int ur_set_offline(struct ccw_device *cdev);
+static int ur_pm_suspend(struct ccw_device *cdev);
+
+static struct ccw_driver ur_driver = {
+	.driver = {
+		.name	= "vmur",
+		.owner	= THIS_MODULE,
+	},
+	.ids		= ur_ids,
+	.probe		= ur_probe,
+	.remove		= ur_remove,
+	.set_online	= ur_set_online,
+	.set_offline	= ur_set_offline,
+	.freeze		= ur_pm_suspend,
+	.int_class	= IRQIO_VMR,
+};
+
+static DEFINE_MUTEX(vmur_mutex);
+
+/*
+ * Allocation, freeing, getting and putting of urdev structures
+ *
+ * Each ur device (urd) contains a reference to its corresponding ccw device
+ * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
+ * ur device using dev_get_drvdata(&cdev->dev) pointer.
+ *
+ * urd references:
+ * - ur_probe gets a urd reference, ur_remove drops the reference
+ *   dev_get_drvdata(&cdev->dev)
+ * - ur_open gets a urd reference, ur_release drops the reference
+ *   (urf->urd)
+ *
+ * cdev references:
+ * - urdev_alloc get a cdev reference (urd->cdev)
+ * - urdev_free drops the cdev reference (urd->cdev)
+ *
+ * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock
+ */
+static struct urdev *urdev_alloc(struct ccw_device *cdev)
+{
+	struct urdev *urd;
+
+	urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
+	if (!urd)
+		return NULL;
+	urd->reclen = cdev->id.driver_info;
+	ccw_device_get_id(cdev, &urd->dev_id);
+	mutex_init(&urd->io_mutex);
+	init_waitqueue_head(&urd->wait);
+	spin_lock_init(&urd->open_lock);
+	refcount_set(&urd->ref_count,  1);
+	urd->cdev = cdev;
+	get_device(&cdev->dev);
+	return urd;
+}
+
+static void urdev_free(struct urdev *urd)
+{
+	TRACE("urdev_free: %p\n", urd);
+	if (urd->cdev)
+		put_device(&urd->cdev->dev);
+	kfree(urd);
+}
+
+static void urdev_get(struct urdev *urd)
+{
+	refcount_inc(&urd->ref_count);
+}
+
+static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
+{
+	struct urdev *urd;
+	unsigned long flags;
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	urd = dev_get_drvdata(&cdev->dev);
+	if (urd)
+		urdev_get(urd);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+	return urd;
+}
+
+static struct urdev *urdev_get_from_devno(u16 devno)
+{
+	char bus_id[16];
+	struct ccw_device *cdev;
+	struct urdev *urd;
+
+	sprintf(bus_id, "0.0.%04x", devno);
+	cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
+	if (!cdev)
+		return NULL;
+	urd = urdev_get_from_cdev(cdev);
+	put_device(&cdev->dev);
+	return urd;
+}
+
+static void urdev_put(struct urdev *urd)
+{
+	if (refcount_dec_and_test(&urd->ref_count))
+		urdev_free(urd);
+}
+
+/*
+ * State and contents of ur devices can be changed by class D users issuing
+ * CP commands such as PURGE or TRANSFER, while the Linux guest is suspended.
+ * Also the Linux guest might be logged off, which causes all active spool
+ * files to be closed.
+ * So we cannot guarantee that spool files are still the same when the Linux
+ * guest is resumed. In order to avoid unpredictable results at resume time
+ * we simply refuse to suspend if a ur device node is open.
+ */
+static int ur_pm_suspend(struct ccw_device *cdev)
+{
+	struct urdev *urd = dev_get_drvdata(&cdev->dev);
+
+	TRACE("ur_pm_suspend: cdev=%p\n", cdev);
+	if (urd->open_flag) {
+		pr_err("Unit record device %s is busy, %s refusing to "
+		       "suspend.\n", dev_name(&cdev->dev), ur_banner);
+		return -EBUSY;
+	}
+	return 0;
+}
+
+/*
+ * Low-level functions to do I/O to a ur device.
+ *     alloc_chan_prog
+ *     free_chan_prog
+ *     do_ur_io
+ *     ur_int_handler
+ *
+ * alloc_chan_prog allocates and builds the channel program
+ * free_chan_prog frees memory of the channel program
+ *
+ * do_ur_io issues the channel program to the device and blocks waiting
+ * on a completion event it publishes at urd->io_done. The function
+ * serialises itself on the device's mutex so that only one I/O
+ * is issued at a time (and that I/O is synchronous).
+ *
+ * ur_int_handler catches the "I/O done" interrupt, writes the
+ * subchannel status word into the scsw member of the urdev structure
+ * and complete()s the io_done to wake the waiting do_ur_io.
+ *
+ * The caller of do_ur_io is responsible for kfree()ing the channel program
+ * address pointer that alloc_chan_prog returned.
+ */
+
+static void free_chan_prog(struct ccw1 *cpa)
+{
+	struct ccw1 *ptr = cpa;
+
+	while (ptr->cda) {
+		kfree((void *)(addr_t) ptr->cda);
+		ptr++;
+	}
+	kfree(cpa);
+}
+
+/*
+ * alloc_chan_prog
+ * The channel program we use is write commands chained together
+ * with a final NOP CCW command-chained on (which ensures that CE and DE
+ * are presented together in a single interrupt instead of as separate
+ * interrupts unless an incorrect length indication kicks in first). The
+ * data length in each CCW is reclen.
+ */
+static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
+				    int reclen)
+{
+	struct ccw1 *cpa;
+	void *kbuf;
+	int i;
+
+	TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
+
+	/*
+	 * We chain a NOP onto the writes to force CE+DE together.
+	 * That means we allocate room for CCWs to cover count/reclen
+	 * records plus a NOP.
+	 */
+	cpa = kcalloc(rec_count + 1, sizeof(struct ccw1),
+		      GFP_KERNEL | GFP_DMA);
+	if (!cpa)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < rec_count; i++) {
+		cpa[i].cmd_code = WRITE_CCW_CMD;
+		cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
+		cpa[i].count = reclen;
+		kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
+		if (!kbuf) {
+			free_chan_prog(cpa);
+			return ERR_PTR(-ENOMEM);
+		}
+		cpa[i].cda = (u32)(addr_t) kbuf;
+		if (copy_from_user(kbuf, ubuf, reclen)) {
+			free_chan_prog(cpa);
+			return ERR_PTR(-EFAULT);
+		}
+		ubuf += reclen;
+	}
+	/* The following NOP CCW forces CE+DE to be presented together */
+	cpa[i].cmd_code = CCW_CMD_NOOP;
+	return cpa;
+}
+
+static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
+{
+	int rc;
+	struct ccw_device *cdev = urd->cdev;
+	DECLARE_COMPLETION_ONSTACK(event);
+
+	TRACE("do_ur_io: cpa=%p\n", cpa);
+
+	rc = mutex_lock_interruptible(&urd->io_mutex);
+	if (rc)
+		return rc;
+
+	urd->io_done = &event;
+
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	rc = ccw_device_start(cdev, cpa, 1, 0, 0);
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+
+	TRACE("do_ur_io: ccw_device_start returned %d\n", rc);
+	if (rc)
+		goto out;
+
+	wait_for_completion(&event);
+	TRACE("do_ur_io: I/O complete\n");
+	rc = 0;
+
+out:
+	mutex_unlock(&urd->io_mutex);
+	return rc;
+}
+
+/*
+ * ur interrupt handler, called from the ccw_device layer
+ */
+static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
+			   struct irb *irb)
+{
+	struct urdev *urd;
+
+	if (!IS_ERR(irb)) {
+		TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
+		      intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
+		      irb->scsw.cmd.count);
+	}
+	if (!intparm) {
+		TRACE("ur_int_handler: unsolicited interrupt\n");
+		return;
+	}
+	urd = dev_get_drvdata(&cdev->dev);
+	BUG_ON(!urd);
+	/* On special conditions irb is an error pointer */
+	if (IS_ERR(irb))
+		urd->io_request_rc = PTR_ERR(irb);
+	else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+		urd->io_request_rc = 0;
+	else
+		urd->io_request_rc = -EIO;
+
+	complete(urd->io_done);
+}
+
+/*
+ * reclen sysfs attribute - The record length to be used for write CCWs
+ */
+static ssize_t ur_attr_reclen_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct urdev *urd;
+	int rc;
+
+	urd = urdev_get_from_cdev(to_ccwdev(dev));
+	if (!urd)
+		return -ENODEV;
+	rc = sprintf(buf, "%zu\n", urd->reclen);
+	urdev_put(urd);
+	return rc;
+}
+
+static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
+
+static int ur_create_attributes(struct device *dev)
+{
+	return device_create_file(dev, &dev_attr_reclen);
+}
+
+static void ur_remove_attributes(struct device *dev)
+{
+	device_remove_file(dev, &dev_attr_reclen);
+}
+
+/*
+ * diagnose code 0x210 - retrieve device information
+ * cc=0  normal completion, we have a real device
+ * cc=1  CP paging error
+ * cc=2  The virtual device exists, but is not associated with a real device
+ * cc=3  Invalid device address, or the virtual device does not exist
+ */
+static int get_urd_class(struct urdev *urd)
+{
+	static struct diag210 ur_diag210;
+	int cc;
+
+	ur_diag210.vrdcdvno = urd->dev_id.devno;
+	ur_diag210.vrdclen = sizeof(struct diag210);
+
+	cc = diag210(&ur_diag210);
+	switch (cc) {
+	case 0:
+		return -EOPNOTSUPP;
+	case 2:
+		return ur_diag210.vrdcvcla; /* virtual device class */
+	case 3:
+		return -ENODEV;
+	default:
+		return -EIO;
+	}
+}
+
+/*
+ * Allocation and freeing of urfile structures
+ */
+static struct urfile *urfile_alloc(struct urdev *urd)
+{
+	struct urfile *urf;
+
+	urf = kzalloc(sizeof(struct urfile), GFP_KERNEL);
+	if (!urf)
+		return NULL;
+	urf->urd = urd;
+
+	TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf,
+	      urf->dev_reclen);
+
+	return urf;
+}
+
+static void urfile_free(struct urfile *urf)
+{
+	TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd);
+	kfree(urf);
+}
+
+/*
+ * The fops implementation of the character device driver
+ */
+static ssize_t do_write(struct urdev *urd, const char __user *udata,
+			size_t count, size_t reclen, loff_t *ppos)
+{
+	struct ccw1 *cpa;
+	int rc;
+
+	cpa = alloc_chan_prog(udata, count / reclen, reclen);
+	if (IS_ERR(cpa))
+		return PTR_ERR(cpa);
+
+	rc = do_ur_io(urd, cpa);
+	if (rc)
+		goto fail_kfree_cpa;
+
+	if (urd->io_request_rc) {
+		rc = urd->io_request_rc;
+		goto fail_kfree_cpa;
+	}
+	*ppos += count;
+	rc = count;
+
+fail_kfree_cpa:
+	free_chan_prog(cpa);
+	return rc;
+}
+
+static ssize_t ur_write(struct file *file, const char __user *udata,
+			size_t count, loff_t *ppos)
+{
+	struct urfile *urf = file->private_data;
+
+	TRACE("ur_write: count=%zu\n", count);
+
+	if (count == 0)
+		return 0;
+
+	if (count % urf->dev_reclen)
+		return -EINVAL;	/* count must be a multiple of reclen */
+
+	if (count > urf->dev_reclen * MAX_RECS_PER_IO)
+		count = urf->dev_reclen * MAX_RECS_PER_IO;
+
+	return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
+}
+
+/*
+ * diagnose code 0x14 subcode 0x0028 - position spool file to designated
+ *				       record
+ * cc=0  normal completion
+ * cc=2  no file active on the virtual reader or device not ready
+ * cc=3  record specified is beyond EOF
+ */
+static int diag_position_to_record(int devno, int record)
+{
+	int cc;
+
+	cc = diag14(record, devno, 0x28);
+	switch (cc) {
+	case 0:
+		return 0;
+	case 2:
+		return -ENOMEDIUM;
+	case 3:
+		return -ENODATA; /* position beyond end of file */
+	default:
+		return -EIO;
+	}
+}
+
+/*
+ * diagnose code 0x14 subcode 0x0000 - read next spool file buffer
+ * cc=0  normal completion
+ * cc=1  EOF reached
+ * cc=2  no file active on the virtual reader, and no file eligible
+ * cc=3  file already active on the virtual reader or specified virtual
+ *	 reader does not exist or is not a reader
+ */
+static int diag_read_file(int devno, char *buf)
+{
+	int cc;
+
+	cc = diag14((unsigned long) buf, devno, 0x00);
+	switch (cc) {
+	case 0:
+		return 0;
+	case 1:
+		return -ENODATA;
+	case 2:
+		return -ENOMEDIUM;
+	default:
+		return -EIO;
+	}
+}
+
+static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
+			   loff_t *offs)
+{
+	size_t len, copied, res;
+	char *buf;
+	int rc;
+	u16 reclen;
+	struct urdev *urd;
+
+	urd = ((struct urfile *) file->private_data)->urd;
+	reclen = ((struct urfile *) file->private_data)->file_reclen;
+
+	rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1);
+	if (rc == -ENODATA)
+		return 0;
+	if (rc)
+		return rc;
+
+	len = min((size_t) PAGE_SIZE, count);
+	buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
+	if (!buf)
+		return -ENOMEM;
+
+	copied = 0;
+	res = (size_t) (*offs % PAGE_SIZE);
+	do {
+		rc = diag_read_file(urd->dev_id.devno, buf);
+		if (rc == -ENODATA) {
+			break;
+		}
+		if (rc)
+			goto fail;
+		if (reclen && (copied == 0) && (*offs < PAGE_SIZE))
+			*((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
+		len = min(count - copied, PAGE_SIZE - res);
+		if (copy_to_user(ubuf + copied, buf + res, len)) {
+			rc = -EFAULT;
+			goto fail;
+		}
+		res = 0;
+		copied += len;
+	} while (copied != count);
+
+	*offs += copied;
+	rc = copied;
+fail:
+	free_page((unsigned long) buf);
+	return rc;
+}
+
+static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count,
+		       loff_t *offs)
+{
+	struct urdev *urd;
+	int rc;
+
+	TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs);
+
+	if (count == 0)
+		return 0;
+
+	urd = ((struct urfile *) file->private_data)->urd;
+	rc = mutex_lock_interruptible(&urd->io_mutex);
+	if (rc)
+		return rc;
+	rc = diag14_read(file, ubuf, count, offs);
+	mutex_unlock(&urd->io_mutex);
+	return rc;
+}
+
+/*
+ * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor
+ * cc=0  normal completion
+ * cc=1  no files on reader queue or no subsequent file
+ * cc=2  spid specified is invalid
+ */
+static int diag_read_next_file_info(struct file_control_block *buf, int spid)
+{
+	int cc;
+
+	cc = diag14((unsigned long) buf, spid, 0xfff);
+	switch (cc) {
+	case 0:
+		return 0;
+	default:
+		return -ENODATA;
+	}
+}
+
+static int verify_uri_device(struct urdev *urd)
+{
+	struct file_control_block *fcb;
+	char *buf;
+	int rc;
+
+	fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
+	if (!fcb)
+		return -ENOMEM;
+
+	/* check for empty reader device (beginning of chain) */
+	rc = diag_read_next_file_info(fcb, 0);
+	if (rc)
+		goto fail_free_fcb;
+
+	/* if file is in hold status, we do not read it */
+	if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) {
+		rc = -EPERM;
+		goto fail_free_fcb;
+	}
+
+	/* open file on virtual reader	*/
+	buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
+	if (!buf) {
+		rc = -ENOMEM;
+		goto fail_free_fcb;
+	}
+	rc = diag_read_file(urd->dev_id.devno, buf);
+	if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
+		goto fail_free_buf;
+
+	/* check if the file on top of the queue is open now */
+	rc = diag_read_next_file_info(fcb, 0);
+	if (rc)
+		goto fail_free_buf;
+	if (!(fcb->file_stat & FLG_IN_USE)) {
+		rc = -EMFILE;
+		goto fail_free_buf;
+	}
+	rc = 0;
+
+fail_free_buf:
+	free_page((unsigned long) buf);
+fail_free_fcb:
+	kfree(fcb);
+	return rc;
+}
+
+static int verify_device(struct urdev *urd)
+{
+	switch (urd->class) {
+	case DEV_CLASS_UR_O:
+		return 0; /* no check needed here */
+	case DEV_CLASS_UR_I:
+		return verify_uri_device(urd);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int get_uri_file_reclen(struct urdev *urd)
+{
+	struct file_control_block *fcb;
+	int rc;
+
+	fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
+	if (!fcb)
+		return -ENOMEM;
+	rc = diag_read_next_file_info(fcb, 0);
+	if (rc)
+		goto fail_free;
+	if (fcb->file_stat & FLG_CP_DUMP)
+		rc = 0;
+	else
+		rc = fcb->rec_len;
+
+fail_free:
+	kfree(fcb);
+	return rc;
+}
+
+static int get_file_reclen(struct urdev *urd)
+{
+	switch (urd->class) {
+	case DEV_CLASS_UR_O:
+		return 0;
+	case DEV_CLASS_UR_I:
+		return get_uri_file_reclen(urd);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int ur_open(struct inode *inode, struct file *file)
+{
+	u16 devno;
+	struct urdev *urd;
+	struct urfile *urf;
+	unsigned short accmode;
+	int rc;
+
+	accmode = file->f_flags & O_ACCMODE;
+
+	if (accmode == O_RDWR)
+		return -EACCES;
+	/*
+	 * We treat the minor number as the devno of the ur device
+	 * to find in the driver tree.
+	 */
+	devno = MINOR(file_inode(file)->i_rdev);
+
+	urd = urdev_get_from_devno(devno);
+	if (!urd) {
+		rc = -ENXIO;
+		goto out;
+	}
+
+	spin_lock(&urd->open_lock);
+	while (urd->open_flag) {
+		spin_unlock(&urd->open_lock);
+		if (file->f_flags & O_NONBLOCK) {
+			rc = -EBUSY;
+			goto fail_put;
+		}
+		if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) {
+			rc = -ERESTARTSYS;
+			goto fail_put;
+		}
+		spin_lock(&urd->open_lock);
+	}
+	urd->open_flag++;
+	spin_unlock(&urd->open_lock);
+
+	TRACE("ur_open\n");
+
+	if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) ||
+	    ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) {
+		TRACE("ur_open: unsupported dev class (%d)\n", urd->class);
+		rc = -EACCES;
+		goto fail_unlock;
+	}
+
+	rc = verify_device(urd);
+	if (rc)
+		goto fail_unlock;
+
+	urf = urfile_alloc(urd);
+	if (!urf) {
+		rc = -ENOMEM;
+		goto fail_unlock;
+	}
+
+	urf->dev_reclen = urd->reclen;
+	rc = get_file_reclen(urd);
+	if (rc < 0)
+		goto fail_urfile_free;
+	urf->file_reclen = rc;
+	file->private_data = urf;
+	return 0;
+
+fail_urfile_free:
+	urfile_free(urf);
+fail_unlock:
+	spin_lock(&urd->open_lock);
+	urd->open_flag--;
+	spin_unlock(&urd->open_lock);
+fail_put:
+	urdev_put(urd);
+out:
+	return rc;
+}
+
+static int ur_release(struct inode *inode, struct file *file)
+{
+	struct urfile *urf = file->private_data;
+
+	TRACE("ur_release\n");
+	spin_lock(&urf->urd->open_lock);
+	urf->urd->open_flag--;
+	spin_unlock(&urf->urd->open_lock);
+	wake_up_interruptible(&urf->urd->wait);
+	urdev_put(urf->urd);
+	urfile_free(urf);
+	return 0;
+}
+
+static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
+{
+	if ((file->f_flags & O_ACCMODE) != O_RDONLY)
+		return -ESPIPE; /* seek allowed only for reader */
+	if (offset % PAGE_SIZE)
+		return -ESPIPE; /* only multiples of 4K allowed */
+	return no_seek_end_llseek(file, offset, whence);
+}
+
+static const struct file_operations ur_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = ur_open,
+	.release = ur_release,
+	.read	 = ur_read,
+	.write	 = ur_write,
+	.llseek  = ur_llseek,
+};
+
+/*
+ * ccw_device infrastructure:
+ *     ur_probe creates the struct urdev (with refcount = 1), the device
+ *     attributes, sets up the interrupt handler and validates the virtual
+ *     unit record device.
+ *     ur_remove removes the device attributes and drops the reference to
+ *     struct urdev.
+ *
+ *     ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized
+ *     by the vmur_mutex lock.
+ *
+ *     urd->char_device is used as indication that the online function has
+ *     been completed successfully.
+ */
+static int ur_probe(struct ccw_device *cdev)
+{
+	struct urdev *urd;
+	int rc;
+
+	TRACE("ur_probe: cdev=%p\n", cdev);
+
+	mutex_lock(&vmur_mutex);
+	urd = urdev_alloc(cdev);
+	if (!urd) {
+		rc = -ENOMEM;
+		goto fail_unlock;
+	}
+
+	rc = ur_create_attributes(&cdev->dev);
+	if (rc) {
+		rc = -ENOMEM;
+		goto fail_urdev_put;
+	}
+	cdev->handler = ur_int_handler;
+
+	/* validate virtual unit record device */
+	urd->class = get_urd_class(urd);
+	if (urd->class < 0) {
+		rc = urd->class;
+		goto fail_remove_attr;
+	}
+	if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
+		rc = -EOPNOTSUPP;
+		goto fail_remove_attr;
+	}
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	dev_set_drvdata(&cdev->dev, urd);
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+
+	mutex_unlock(&vmur_mutex);
+	return 0;
+
+fail_remove_attr:
+	ur_remove_attributes(&cdev->dev);
+fail_urdev_put:
+	urdev_put(urd);
+fail_unlock:
+	mutex_unlock(&vmur_mutex);
+	return rc;
+}
+
+static int ur_set_online(struct ccw_device *cdev)
+{
+	struct urdev *urd;
+	int minor, major, rc;
+	char node_id[16];
+
+	TRACE("ur_set_online: cdev=%p\n", cdev);
+
+	mutex_lock(&vmur_mutex);
+	urd = urdev_get_from_cdev(cdev);
+	if (!urd) {
+		/* ur_remove already deleted our urd */
+		rc = -ENODEV;
+		goto fail_unlock;
+	}
+
+	if (urd->char_device) {
+		/* Another ur_set_online was faster */
+		rc = -EBUSY;
+		goto fail_urdev_put;
+	}
+
+	minor = urd->dev_id.devno;
+	major = MAJOR(ur_first_dev_maj_min);
+
+	urd->char_device = cdev_alloc();
+	if (!urd->char_device) {
+		rc = -ENOMEM;
+		goto fail_urdev_put;
+	}
+
+	urd->char_device->ops = &ur_fops;
+	urd->char_device->owner = ur_fops.owner;
+
+	rc = cdev_add(urd->char_device, MKDEV(major, minor), 1);
+	if (rc)
+		goto fail_free_cdev;
+	if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
+		if (urd->class == DEV_CLASS_UR_I)
+			sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev));
+		if (urd->class == DEV_CLASS_UR_O)
+			sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev));
+	} else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
+		sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev));
+	} else {
+		rc = -EOPNOTSUPP;
+		goto fail_free_cdev;
+	}
+
+	urd->device = device_create(vmur_class, &cdev->dev,
+				    urd->char_device->dev, NULL, "%s", node_id);
+	if (IS_ERR(urd->device)) {
+		rc = PTR_ERR(urd->device);
+		TRACE("ur_set_online: device_create rc=%d\n", rc);
+		goto fail_free_cdev;
+	}
+	urdev_put(urd);
+	mutex_unlock(&vmur_mutex);
+	return 0;
+
+fail_free_cdev:
+	cdev_del(urd->char_device);
+	urd->char_device = NULL;
+fail_urdev_put:
+	urdev_put(urd);
+fail_unlock:
+	mutex_unlock(&vmur_mutex);
+	return rc;
+}
+
+static int ur_set_offline_force(struct ccw_device *cdev, int force)
+{
+	struct urdev *urd;
+	int rc;
+
+	TRACE("ur_set_offline: cdev=%p\n", cdev);
+	urd = urdev_get_from_cdev(cdev);
+	if (!urd)
+		/* ur_remove already deleted our urd */
+		return -ENODEV;
+	if (!urd->char_device) {
+		/* Another ur_set_offline was faster */
+		rc = -EBUSY;
+		goto fail_urdev_put;
+	}
+	if (!force && (refcount_read(&urd->ref_count) > 2)) {
+		/* There is still a user of urd (e.g. ur_open) */
+		TRACE("ur_set_offline: BUSY\n");
+		rc = -EBUSY;
+		goto fail_urdev_put;
+	}
+	device_destroy(vmur_class, urd->char_device->dev);
+	cdev_del(urd->char_device);
+	urd->char_device = NULL;
+	rc = 0;
+
+fail_urdev_put:
+	urdev_put(urd);
+	return rc;
+}
+
+static int ur_set_offline(struct ccw_device *cdev)
+{
+	int rc;
+
+	mutex_lock(&vmur_mutex);
+	rc = ur_set_offline_force(cdev, 0);
+	mutex_unlock(&vmur_mutex);
+	return rc;
+}
+
+static void ur_remove(struct ccw_device *cdev)
+{
+	unsigned long flags;
+
+	TRACE("ur_remove\n");
+
+	mutex_lock(&vmur_mutex);
+
+	if (cdev->online)
+		ur_set_offline_force(cdev, 1);
+	ur_remove_attributes(&cdev->dev);
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	urdev_put(dev_get_drvdata(&cdev->dev));
+	dev_set_drvdata(&cdev->dev, NULL);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+	mutex_unlock(&vmur_mutex);
+}
+
+/*
+ * Module initialisation and cleanup
+ */
+static int __init ur_init(void)
+{
+	int rc;
+	dev_t dev;
+
+	if (!MACHINE_IS_VM) {
+		pr_err("The %s cannot be loaded without z/VM\n",
+		       ur_banner);
+		return -ENODEV;
+	}
+
+	vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long));
+	if (!vmur_dbf)
+		return -ENOMEM;
+	rc = debug_register_view(vmur_dbf, &debug_sprintf_view);
+	if (rc)
+		goto fail_free_dbf;
+
+	debug_set_level(vmur_dbf, 6);
+
+	vmur_class = class_create(THIS_MODULE, "vmur");
+	if (IS_ERR(vmur_class)) {
+		rc = PTR_ERR(vmur_class);
+		goto fail_free_dbf;
+	}
+
+	rc = ccw_driver_register(&ur_driver);
+	if (rc)
+		goto fail_class_destroy;
+
+	rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
+	if (rc) {
+		pr_err("Kernel function alloc_chrdev_region failed with "
+		       "error code %d\n", rc);
+		goto fail_unregister_driver;
+	}
+	ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
+
+	pr_info("%s loaded.\n", ur_banner);
+	return 0;
+
+fail_unregister_driver:
+	ccw_driver_unregister(&ur_driver);
+fail_class_destroy:
+	class_destroy(vmur_class);
+fail_free_dbf:
+	debug_unregister(vmur_dbf);
+	return rc;
+}
+
+static void __exit ur_exit(void)
+{
+	unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
+	ccw_driver_unregister(&ur_driver);
+	class_destroy(vmur_class);
+	debug_unregister(vmur_dbf);
+	pr_info("%s unloaded.\n", ur_banner);
+}
+
+module_init(ur_init);
+module_exit(ur_exit);
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h
new file mode 100644
index 0000000..608b071
--- /dev/null
+++ b/drivers/s390/char/vmur.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux driver for System z and s390 unit record devices
+ * (z/VM virtual punch, reader, printer)
+ *
+ * Copyright IBM Corp. 2001, 2007
+ * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
+ *	    Michael Holzheu <holzheu@de.ibm.com>
+ *	    Frank Munzert <munzert@de.ibm.com>
+ */
+
+#ifndef _VMUR_H_
+#define _VMUR_H_
+
+#include <linux/refcount.h>
+
+#define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */
+#define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */
+/*
+ * we only support z/VM's default unit record devices:
+ * both in SPOOL directory control statement and in CP DEFINE statement
+ *	RDR defaults to 2540 reader
+ *	PUN defaults to 2540 punch
+ *	PRT defaults to 1403 printer
+ */
+#define READER_PUNCH_DEVTYPE	0x2540
+#define PRINTER_DEVTYPE		0x1403
+
+/* z/VM spool file control block SFBLOK */
+struct file_control_block {
+	char reserved_1[8];
+	char user_owner[8];
+	char user_orig[8];
+	__s32 data_recs;
+	__s16 rec_len;
+	__s16 file_num;
+	__u8  file_stat;
+	__u8  dev_type;
+	char  reserved_2[6];
+	char  file_name[12];
+	char  file_type[12];
+	char  create_date[8];
+	char  create_time[8];
+	char  reserved_3[6];
+	__u8  file_class;
+	__u8  sfb_lok;
+	__u64 distr_code;
+	__u32 reserved_4;
+	__u8  current_starting_copy_number;
+	__u8  sfblock_cntrl_flags;
+	__u8  reserved_5;
+	__u8  more_status_flags;
+	char  rest[200];
+} __attribute__ ((packed));
+
+#define FLG_SYSTEM_HOLD	0x04
+#define FLG_CP_DUMP	0x10
+#define FLG_USER_HOLD	0x20
+#define FLG_IN_USE	0x80
+
+/*
+ * A struct urdev is created for each ur device that is made available
+ * via the ccw_device driver model.
+ */
+struct urdev {
+	struct ccw_device *cdev;	/* Backpointer to ccw device */
+	struct mutex io_mutex;		/* Serialises device IO */
+	struct completion *io_done;	/* do_ur_io waits; irq completes */
+	struct device *device;
+	struct cdev *char_device;
+	struct ccw_dev_id dev_id;	/* device id */
+	size_t reclen;			/* Record length for *write* CCWs */
+	int class;			/* VM device class */
+	int io_request_rc;		/* return code from I/O request */
+	refcount_t ref_count;		/* reference counter */
+	wait_queue_head_t wait;		/* wait queue to serialize open */
+	int open_flag;			/* "urdev is open" flag */
+	spinlock_t open_lock;		/* serialize critical sections */
+};
+
+/*
+ * A struct urfile is allocated at open() time for each device and
+ * freed on release().
+ */
+struct urfile {
+	struct urdev *urd;
+	unsigned int flags;
+	size_t dev_reclen;
+	__u16 file_reclen;
+};
+
+/*
+ * Device major/minor definitions.
+ */
+
+#define UR_MAJOR 0	/* get dynamic major */
+/*
+ * We map minor numbers directly to device numbers (0-FFFF) for simplicity.
+ * This avoids having to allocate (and manage) slot numbers.
+ */
+#define NUM_MINORS 65536
+
+/* Limiting each I/O to 511 records limits chan prog to 4KB (511 r/w + 1 NOP) */
+#define MAX_RECS_PER_IO		511
+#define WRITE_CCW_CMD		0x01
+
+#define TRACE(x...) debug_sprintf_event(vmur_dbf, 1, x)
+#define CCWDEV_CU_DI(cutype, di) \
+		CCW_DEVICE(cutype, 0x00), .driver_info = (di)
+
+#define FILE_RECLEN_OFFSET	4064 /* reclen offset in spool data block */
+
+#endif /* _VMUR_H_ */
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
new file mode 100644
index 0000000..76d3c50
--- /dev/null
+++ b/drivers/s390/char/zcore.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * zcore module to export memory content and register sets for creating system
+ * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
+ * dump format as s390 standalone dumps.
+ *
+ * For more information please refer to Documentation/s390/zfcpdump.txt
+ *
+ * Copyright IBM Corp. 2003, 2008
+ * Author(s): Michael Holzheu
+ */
+
+#define KMSG_COMPONENT "zdump"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/memblock.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/ipl.h>
+#include <asm/sclp.h>
+#include <asm/setup.h>
+#include <linux/uaccess.h>
+#include <asm/debug.h>
+#include <asm/processor.h>
+#include <asm/irqflags.h>
+#include <asm/checksum.h>
+#include <asm/os_info.h>
+#include <asm/switch_to.h>
+#include "sclp.h"
+
+#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
+
+#define CHUNK_INFO_SIZE	34 /* 2 16-byte char, each followed by blank */
+
+enum arch_id {
+	ARCH_S390	= 0,
+	ARCH_S390X	= 1,
+};
+
+struct ipib_info {
+	unsigned long	ipib;
+	u32		checksum;
+}  __attribute__((packed));
+
+static struct debug_info *zcore_dbf;
+static int hsa_available;
+static struct dentry *zcore_dir;
+static struct dentry *zcore_memmap_file;
+static struct dentry *zcore_reipl_file;
+static struct dentry *zcore_hsa_file;
+static struct ipl_parameter_block *ipl_block;
+
+static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+/*
+ * Copy memory from HSA to user memory (not reentrant):
+ *
+ * @dest:  User buffer where memory should be copied to
+ * @src:   Start address within HSA where data should be copied
+ * @count: Size of buffer, which should be copied
+ */
+int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
+{
+	unsigned long offset, bytes;
+
+	if (!hsa_available)
+		return -ENODATA;
+
+	while (count) {
+		if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
+			TRACE("sclp_sdias_copy() failed\n");
+			return -EIO;
+		}
+		offset = src % PAGE_SIZE;
+		bytes = min(PAGE_SIZE - offset, count);
+		if (copy_to_user(dest, hsa_buf + offset, bytes))
+			return -EFAULT;
+		src += bytes;
+		dest += bytes;
+		count -= bytes;
+	}
+	return 0;
+}
+
+/*
+ * Copy memory from HSA to kernel memory (not reentrant):
+ *
+ * @dest:  Kernel or user buffer where memory should be copied to
+ * @src:   Start address within HSA where data should be copied
+ * @count: Size of buffer, which should be copied
+ */
+int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
+{
+	unsigned long offset, bytes;
+
+	if (!hsa_available)
+		return -ENODATA;
+
+	while (count) {
+		if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
+			TRACE("sclp_sdias_copy() failed\n");
+			return -EIO;
+		}
+		offset = src % PAGE_SIZE;
+		bytes = min(PAGE_SIZE - offset, count);
+		memcpy(dest, hsa_buf + offset, bytes);
+		src += bytes;
+		dest += bytes;
+		count -= bytes;
+	}
+	return 0;
+}
+
+static int __init init_cpu_info(void)
+{
+	struct save_area *sa;
+
+	/* get info for boot cpu from lowcore, stored in the HSA */
+	sa = save_area_boot_cpu();
+	if (!sa)
+		return -ENOMEM;
+	if (memcpy_hsa_kernel(hsa_buf, __LC_FPREGS_SAVE_AREA, 512) < 0) {
+		TRACE("could not copy from HSA\n");
+		return -EIO;
+	}
+	save_area_add_regs(sa, hsa_buf); /* vx registers are saved in smp.c */
+	return 0;
+}
+
+/*
+ * Release the HSA
+ */
+static void release_hsa(void)
+{
+	diag308(DIAG308_REL_HSA, NULL);
+	hsa_available = 0;
+}
+
+static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	return simple_read_from_buffer(buf, count, ppos, filp->private_data,
+				       memblock.memory.cnt * CHUNK_INFO_SIZE);
+}
+
+static int zcore_memmap_open(struct inode *inode, struct file *filp)
+{
+	struct memblock_region *reg;
+	char *buf;
+	int i = 0;
+
+	buf = kcalloc(memblock.memory.cnt, CHUNK_INFO_SIZE, GFP_KERNEL);
+	if (!buf) {
+		return -ENOMEM;
+	}
+	for_each_memblock(memory, reg) {
+		sprintf(buf + (i++ * CHUNK_INFO_SIZE), "%016llx %016llx ",
+			(unsigned long long) reg->base,
+			(unsigned long long) reg->size);
+	}
+	filp->private_data = buf;
+	return nonseekable_open(inode, filp);
+}
+
+static int zcore_memmap_release(struct inode *inode, struct file *filp)
+{
+	kfree(filp->private_data);
+	return 0;
+}
+
+static const struct file_operations zcore_memmap_fops = {
+	.owner		= THIS_MODULE,
+	.read		= zcore_memmap_read,
+	.open		= zcore_memmap_open,
+	.release	= zcore_memmap_release,
+	.llseek		= no_llseek,
+};
+
+static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	if (ipl_block) {
+		diag308(DIAG308_SET, ipl_block);
+		diag308(DIAG308_LOAD_CLEAR, NULL);
+	}
+	return count;
+}
+
+static int zcore_reipl_open(struct inode *inode, struct file *filp)
+{
+	return nonseekable_open(inode, filp);
+}
+
+static int zcore_reipl_release(struct inode *inode, struct file *filp)
+{
+	return 0;
+}
+
+static const struct file_operations zcore_reipl_fops = {
+	.owner		= THIS_MODULE,
+	.write		= zcore_reipl_write,
+	.open		= zcore_reipl_open,
+	.release	= zcore_reipl_release,
+	.llseek		= no_llseek,
+};
+
+static ssize_t zcore_hsa_read(struct file *filp, char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	static char str[18];
+
+	if (hsa_available)
+		snprintf(str, sizeof(str), "%lx\n", sclp.hsa_size);
+	else
+		snprintf(str, sizeof(str), "0\n");
+	return simple_read_from_buffer(buf, count, ppos, str, strlen(str));
+}
+
+static ssize_t zcore_hsa_write(struct file *filp, const char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	char value;
+
+	if (*ppos != 0)
+		return -EPIPE;
+	if (copy_from_user(&value, buf, 1))
+		return -EFAULT;
+	if (value != '0')
+		return -EINVAL;
+	release_hsa();
+	return count;
+}
+
+static const struct file_operations zcore_hsa_fops = {
+	.owner		= THIS_MODULE,
+	.write		= zcore_hsa_write,
+	.read		= zcore_hsa_read,
+	.open		= nonseekable_open,
+	.llseek		= no_llseek,
+};
+
+static int __init check_sdias(void)
+{
+	if (!sclp.hsa_size) {
+		TRACE("Could not determine HSA size\n");
+		return -ENODEV;
+	}
+	return 0;
+}
+
+/*
+ * Provide IPL parameter information block from either HSA or memory
+ * for future reipl
+ */
+static int __init zcore_reipl_init(void)
+{
+	struct ipib_info ipib_info;
+	int rc;
+
+	rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
+	if (rc)
+		return rc;
+	if (ipib_info.ipib == 0)
+		return 0;
+	ipl_block = (void *) __get_free_page(GFP_KERNEL);
+	if (!ipl_block)
+		return -ENOMEM;
+	if (ipib_info.ipib < sclp.hsa_size)
+		rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
+	else
+		rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
+	if (rc || (__force u32)csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
+	    ipib_info.checksum) {
+		TRACE("Checksum does not match\n");
+		free_page((unsigned long) ipl_block);
+		ipl_block = NULL;
+	}
+	return 0;
+}
+
+static int __init zcore_init(void)
+{
+	unsigned char arch;
+	int rc;
+
+	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+		return -ENODATA;
+	if (OLDMEM_BASE)
+		return -ENODATA;
+
+	zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
+	debug_register_view(zcore_dbf, &debug_sprintf_view);
+	debug_set_level(zcore_dbf, 6);
+
+	TRACE("devno:  %x\n", ipl_info.data.fcp.dev_id.devno);
+	TRACE("wwpn:   %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
+	TRACE("lun:    %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
+
+	rc = sclp_sdias_init();
+	if (rc)
+		goto fail;
+
+	rc = check_sdias();
+	if (rc)
+		goto fail;
+	hsa_available = 1;
+
+	rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
+	if (rc)
+		goto fail;
+
+	if (arch == ARCH_S390) {
+		pr_alert("The 64-bit dump tool cannot be used for a "
+			 "32-bit system\n");
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	pr_alert("The dump process started for a 64-bit operating system\n");
+	rc = init_cpu_info();
+	if (rc)
+		goto fail;
+
+	rc = zcore_reipl_init();
+	if (rc)
+		goto fail;
+
+	zcore_dir = debugfs_create_dir("zcore" , NULL);
+	if (!zcore_dir) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+	zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
+						NULL, &zcore_memmap_fops);
+	if (!zcore_memmap_file) {
+		rc = -ENOMEM;
+		goto fail_dir;
+	}
+	zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
+						NULL, &zcore_reipl_fops);
+	if (!zcore_reipl_file) {
+		rc = -ENOMEM;
+		goto fail_memmap_file;
+	}
+	zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir,
+					     NULL, &zcore_hsa_fops);
+	if (!zcore_hsa_file) {
+		rc = -ENOMEM;
+		goto fail_reipl_file;
+	}
+	return 0;
+
+fail_reipl_file:
+	debugfs_remove(zcore_reipl_file);
+fail_memmap_file:
+	debugfs_remove(zcore_memmap_file);
+fail_dir:
+	debugfs_remove(zcore_dir);
+fail:
+	diag308(DIAG308_REL_HSA, NULL);
+	return rc;
+}
+subsys_initcall(zcore_init);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
new file mode 100644
index 0000000..f230516
--- /dev/null
+++ b/drivers/s390/cio/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the S/390 common i/o drivers
+#
+
+# The following is required for define_trace.h to find ./trace.h
+CFLAGS_trace.o := -I$(src)
+CFLAGS_vfio_ccw_fsm.o := -I$(src)
+
+obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
+	fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o
+ccw_device-objs += device.o device_fsm.o device_ops.o
+ccw_device-objs += device_id.o device_pgid.o device_status.o
+obj-y += ccw_device.o cmf.o
+obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
+obj-$(CONFIG_EADM_SCH) += eadm_sch.o
+obj-$(CONFIG_SCM_BUS) += scm.o
+obj-$(CONFIG_CCWGROUP) += ccwgroup.o
+
+qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
+obj-$(CONFIG_QDIO) += qdio.o
+
+vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o
+obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
new file mode 100644
index 0000000..a45011e
--- /dev/null
+++ b/drivers/s390/cio/airq.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    Support for adapter interruptions
+ *
+ *    Copyright IBM Corp. 1999, 2007
+ *    Author(s): Ingo Adlung <adlung@de.ibm.com>
+ *		 Cornelia Huck <cornelia.huck@de.ibm.com>
+ *		 Arnd Bergmann <arndb@de.ibm.com>
+ *		 Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+
+#include <asm/airq.h>
+#include <asm/isc.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "ioasm.h"
+
+static DEFINE_SPINLOCK(airq_lists_lock);
+static struct hlist_head airq_lists[MAX_ISC+1];
+
+/**
+ * register_adapter_interrupt() - register adapter interrupt handler
+ * @airq: pointer to adapter interrupt descriptor
+ *
+ * Returns 0 on success, or -EINVAL.
+ */
+int register_adapter_interrupt(struct airq_struct *airq)
+{
+	char dbf_txt[32];
+
+	if (!airq->handler || airq->isc > MAX_ISC)
+		return -EINVAL;
+	if (!airq->lsi_ptr) {
+		airq->lsi_ptr = kzalloc(1, GFP_KERNEL);
+		if (!airq->lsi_ptr)
+			return -ENOMEM;
+		airq->flags |= AIRQ_PTR_ALLOCATED;
+	}
+	if (!airq->lsi_mask)
+		airq->lsi_mask = 0xff;
+	snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%p", airq);
+	CIO_TRACE_EVENT(4, dbf_txt);
+	isc_register(airq->isc);
+	spin_lock(&airq_lists_lock);
+	hlist_add_head_rcu(&airq->list, &airq_lists[airq->isc]);
+	spin_unlock(&airq_lists_lock);
+	return 0;
+}
+EXPORT_SYMBOL(register_adapter_interrupt);
+
+/**
+ * unregister_adapter_interrupt - unregister adapter interrupt handler
+ * @airq: pointer to adapter interrupt descriptor
+ */
+void unregister_adapter_interrupt(struct airq_struct *airq)
+{
+	char dbf_txt[32];
+
+	if (hlist_unhashed(&airq->list))
+		return;
+	snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%p", airq);
+	CIO_TRACE_EVENT(4, dbf_txt);
+	spin_lock(&airq_lists_lock);
+	hlist_del_rcu(&airq->list);
+	spin_unlock(&airq_lists_lock);
+	synchronize_rcu();
+	isc_unregister(airq->isc);
+	if (airq->flags & AIRQ_PTR_ALLOCATED) {
+		kfree(airq->lsi_ptr);
+		airq->lsi_ptr = NULL;
+		airq->flags &= ~AIRQ_PTR_ALLOCATED;
+	}
+}
+EXPORT_SYMBOL(unregister_adapter_interrupt);
+
+static irqreturn_t do_airq_interrupt(int irq, void *dummy)
+{
+	struct tpi_info *tpi_info;
+	struct airq_struct *airq;
+	struct hlist_head *head;
+
+	set_cpu_flag(CIF_NOHZ_DELAY);
+	tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
+	trace_s390_cio_adapter_int(tpi_info);
+	head = &airq_lists[tpi_info->isc];
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(airq, head, list)
+		if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
+			airq->handler(airq);
+	rcu_read_unlock();
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction airq_interrupt = {
+	.name	 = "AIO",
+	.handler = do_airq_interrupt,
+};
+
+void __init init_airq_interrupts(void)
+{
+	irq_set_chip_and_handler(THIN_INTERRUPT,
+				 &dummy_irq_chip, handle_percpu_irq);
+	setup_irq(THIN_INTERRUPT, &airq_interrupt);
+}
+
+/**
+ * airq_iv_create - create an interrupt vector
+ * @bits: number of bits in the interrupt vector
+ * @flags: allocation flags
+ *
+ * Returns a pointer to an interrupt vector structure
+ */
+struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
+{
+	struct airq_iv *iv;
+	unsigned long size;
+
+	iv = kzalloc(sizeof(*iv), GFP_KERNEL);
+	if (!iv)
+		goto out;
+	iv->bits = bits;
+	size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
+	iv->vector = kzalloc(size, GFP_KERNEL);
+	if (!iv->vector)
+		goto out_free;
+	if (flags & AIRQ_IV_ALLOC) {
+		iv->avail = kmalloc(size, GFP_KERNEL);
+		if (!iv->avail)
+			goto out_free;
+		memset(iv->avail, 0xff, size);
+		iv->end = 0;
+	} else
+		iv->end = bits;
+	if (flags & AIRQ_IV_BITLOCK) {
+		iv->bitlock = kzalloc(size, GFP_KERNEL);
+		if (!iv->bitlock)
+			goto out_free;
+	}
+	if (flags & AIRQ_IV_PTR) {
+		size = bits * sizeof(unsigned long);
+		iv->ptr = kzalloc(size, GFP_KERNEL);
+		if (!iv->ptr)
+			goto out_free;
+	}
+	if (flags & AIRQ_IV_DATA) {
+		size = bits * sizeof(unsigned int);
+		iv->data = kzalloc(size, GFP_KERNEL);
+		if (!iv->data)
+			goto out_free;
+	}
+	spin_lock_init(&iv->lock);
+	return iv;
+
+out_free:
+	kfree(iv->ptr);
+	kfree(iv->bitlock);
+	kfree(iv->avail);
+	kfree(iv->vector);
+	kfree(iv);
+out:
+	return NULL;
+}
+EXPORT_SYMBOL(airq_iv_create);
+
+/**
+ * airq_iv_release - release an interrupt vector
+ * @iv: pointer to interrupt vector structure
+ */
+void airq_iv_release(struct airq_iv *iv)
+{
+	kfree(iv->data);
+	kfree(iv->ptr);
+	kfree(iv->bitlock);
+	kfree(iv->vector);
+	kfree(iv->avail);
+	kfree(iv);
+}
+EXPORT_SYMBOL(airq_iv_release);
+
+/**
+ * airq_iv_alloc - allocate irq bits from an interrupt vector
+ * @iv: pointer to an interrupt vector structure
+ * @num: number of consecutive irq bits to allocate
+ *
+ * Returns the bit number of the first irq in the allocated block of irqs,
+ * or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been
+ * specified
+ */
+unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
+{
+	unsigned long bit, i, flags;
+
+	if (!iv->avail || num == 0)
+		return -1UL;
+	spin_lock_irqsave(&iv->lock, flags);
+	bit = find_first_bit_inv(iv->avail, iv->bits);
+	while (bit + num <= iv->bits) {
+		for (i = 1; i < num; i++)
+			if (!test_bit_inv(bit + i, iv->avail))
+				break;
+		if (i >= num) {
+			/* Found a suitable block of irqs */
+			for (i = 0; i < num; i++)
+				clear_bit_inv(bit + i, iv->avail);
+			if (bit + num >= iv->end)
+				iv->end = bit + num + 1;
+			break;
+		}
+		bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1);
+	}
+	if (bit + num > iv->bits)
+		bit = -1UL;
+	spin_unlock_irqrestore(&iv->lock, flags);
+	return bit;
+}
+EXPORT_SYMBOL(airq_iv_alloc);
+
+/**
+ * airq_iv_free - free irq bits of an interrupt vector
+ * @iv: pointer to interrupt vector structure
+ * @bit: number of the first irq bit to free
+ * @num: number of consecutive irq bits to free
+ */
+void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
+{
+	unsigned long i, flags;
+
+	if (!iv->avail || num == 0)
+		return;
+	spin_lock_irqsave(&iv->lock, flags);
+	for (i = 0; i < num; i++) {
+		/* Clear (possibly left over) interrupt bit */
+		clear_bit_inv(bit + i, iv->vector);
+		/* Make the bit positions available again */
+		set_bit_inv(bit + i, iv->avail);
+	}
+	if (bit + num >= iv->end) {
+		/* Find new end of bit-field */
+		while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
+			iv->end--;
+	}
+	spin_unlock_irqrestore(&iv->lock, flags);
+}
+EXPORT_SYMBOL(airq_iv_free);
+
+/**
+ * airq_iv_scan - scan interrupt vector for non-zero bits
+ * @iv: pointer to interrupt vector structure
+ * @start: bit number to start the search
+ * @end: bit number to end the search
+ *
+ * Returns the bit number of the next non-zero interrupt bit, or
+ * -1UL if the scan completed without finding any more any non-zero bits.
+ */
+unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
+			   unsigned long end)
+{
+	unsigned long bit;
+
+	/* Find non-zero bit starting from 'ivs->next'. */
+	bit = find_next_bit_inv(iv->vector, end, start);
+	if (bit >= end)
+		return -1UL;
+	clear_bit_inv(bit, iv->vector);
+	return bit;
+}
+EXPORT_SYMBOL(airq_iv_scan);
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
new file mode 100644
index 0000000..2a3f874
--- /dev/null
+++ b/drivers/s390/cio/blacklist.c
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *   S/390 common I/O routines -- blacklisting of specific devices
+ *
+ *    Copyright IBM Corp. 1999, 2013
+ *    Author(s): Ingo Adlung (adlung@de.ibm.com)
+ *		 Cornelia Huck (cornelia.huck@de.ibm.com)
+ *		 Arnd Bergmann (arndb@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+
+#include <linux/uaccess.h>
+#include <asm/cio.h>
+#include <asm/ipl.h>
+
+#include "blacklist.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+
+/*
+ * "Blacklisting" of certain devices:
+ * Device numbers given in the commandline as cio_ignore=... won't be known
+ * to Linux.
+ *
+ * These can be single devices or ranges of devices
+ */
+
+/* 65536 bits for each set to indicate if a devno is blacklisted or not */
+#define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
+			 (8*sizeof(long)))
+static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS];
+typedef enum {add, free} range_action;
+
+/*
+ * Function: blacklist_range
+ * (Un-)blacklist the devices from-to
+ */
+static int blacklist_range(range_action action, unsigned int from_ssid,
+			   unsigned int to_ssid, unsigned int from,
+			   unsigned int to, int msgtrigger)
+{
+	if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
+		if (msgtrigger)
+			pr_warn("0.%x.%04x to 0.%x.%04x is not a valid range for cio_ignore\n",
+				from_ssid, from, to_ssid, to);
+
+		return 1;
+	}
+
+	while ((from_ssid < to_ssid) || ((from_ssid == to_ssid) &&
+	       (from <= to))) {
+		if (action == add)
+			set_bit(from, bl_dev[from_ssid]);
+		else
+			clear_bit(from, bl_dev[from_ssid]);
+		from++;
+		if (from > __MAX_SUBCHANNEL) {
+			from_ssid++;
+			from = 0;
+		}
+	}
+
+	return 0;
+}
+
+static int pure_hex(char **cp, unsigned int *val, int min_digit,
+		    int max_digit, int max_val)
+{
+	int diff;
+
+	diff = 0;
+	*val = 0;
+
+	while (diff <= max_digit) {
+		int value = hex_to_bin(**cp);
+
+		if (value < 0)
+			break;
+		*val = *val * 16 + value;
+		(*cp)++;
+		diff++;
+	}
+
+	if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
+		return 1;
+
+	return 0;
+}
+
+static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid,
+		       unsigned int *devno, int msgtrigger)
+{
+	char *str_work;
+	int val, rc, ret;
+
+	rc = 1;
+
+	if (*str == '\0')
+		goto out;
+
+	/* old style */
+	str_work = str;
+	val = simple_strtoul(str, &str_work, 16);
+
+	if (*str_work == '\0') {
+		if (val <= __MAX_SUBCHANNEL) {
+			*devno = val;
+			*ssid = 0;
+			*cssid = 0;
+			rc = 0;
+		}
+		goto out;
+	}
+
+	/* new style */
+	str_work = str;
+	ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
+	if (ret || (str_work[0] != '.'))
+		goto out;
+	str_work++;
+	ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
+	if (ret || (str_work[0] != '.'))
+		goto out;
+	str_work++;
+	ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
+	if (ret || (str_work[0] != '\0'))
+		goto out;
+
+	rc = 0;
+out:
+	if (rc && msgtrigger)
+		pr_warn("%s is not a valid device for the cio_ignore kernel parameter\n",
+			str);
+
+	return rc;
+}
+
+static int blacklist_parse_parameters(char *str, range_action action,
+				      int msgtrigger)
+{
+	unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
+	int rc, totalrc;
+	char *parm;
+	range_action ra;
+
+	totalrc = 0;
+
+	while ((parm = strsep(&str, ","))) {
+		rc = 0;
+		ra = action;
+		if (*parm == '!') {
+			if (ra == add)
+				ra = free;
+			else
+				ra = add;
+			parm++;
+		}
+		if (strcmp(parm, "all") == 0) {
+			from_cssid = 0;
+			from_ssid = 0;
+			from = 0;
+			to_cssid = __MAX_CSSID;
+			to_ssid = __MAX_SSID;
+			to = __MAX_SUBCHANNEL;
+		} else if (strcmp(parm, "ipldev") == 0) {
+			if (ipl_info.type == IPL_TYPE_CCW) {
+				from_cssid = 0;
+				from_ssid = ipl_info.data.ccw.dev_id.ssid;
+				from = ipl_info.data.ccw.dev_id.devno;
+			} else if (ipl_info.type == IPL_TYPE_FCP ||
+				   ipl_info.type == IPL_TYPE_FCP_DUMP) {
+				from_cssid = 0;
+				from_ssid = ipl_info.data.fcp.dev_id.ssid;
+				from = ipl_info.data.fcp.dev_id.devno;
+			} else {
+				continue;
+			}
+			to_cssid = from_cssid;
+			to_ssid = from_ssid;
+			to = from;
+		} else if (strcmp(parm, "condev") == 0) {
+			if (console_devno == -1)
+				continue;
+
+			from_cssid = to_cssid = 0;
+			from_ssid = to_ssid = 0;
+			from = to = console_devno;
+		} else {
+			rc = parse_busid(strsep(&parm, "-"), &from_cssid,
+					 &from_ssid, &from, msgtrigger);
+			if (!rc) {
+				if (parm != NULL)
+					rc = parse_busid(parm, &to_cssid,
+							 &to_ssid, &to,
+							 msgtrigger);
+				else {
+					to_cssid = from_cssid;
+					to_ssid = from_ssid;
+					to = from;
+				}
+			}
+		}
+		if (!rc) {
+			rc = blacklist_range(ra, from_ssid, to_ssid, from, to,
+					     msgtrigger);
+			if (rc)
+				totalrc = -EINVAL;
+		} else
+			totalrc = -EINVAL;
+	}
+
+	return totalrc;
+}
+
+static int __init
+blacklist_setup (char *str)
+{
+	CIO_MSG_EVENT(6, "Reading blacklist parameters\n");
+	if (blacklist_parse_parameters(str, add, 1))
+		return 0;
+	return 1;
+}
+
+__setup ("cio_ignore=", blacklist_setup);
+
+/* Checking if devices are blacklisted */
+
+/*
+ * Function: is_blacklisted
+ * Returns 1 if the given devicenumber can be found in the blacklist,
+ * otherwise 0.
+ * Used by validate_subchannel()
+ */
+int
+is_blacklisted (int ssid, int devno)
+{
+	return test_bit (devno, bl_dev[ssid]);
+}
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Function: blacklist_parse_proc_parameters
+ * parse the stuff which is piped to /proc/cio_ignore
+ */
+static int blacklist_parse_proc_parameters(char *buf)
+{
+	int rc;
+	char *parm;
+
+	parm = strsep(&buf, " ");
+
+	if (strcmp("free", parm) == 0) {
+		rc = blacklist_parse_parameters(buf, free, 0);
+		css_schedule_eval_all_unreg(0);
+	} else if (strcmp("add", parm) == 0)
+		rc = blacklist_parse_parameters(buf, add, 0);
+	else if (strcmp("purge", parm) == 0)
+		return ccw_purge_blacklisted();
+	else
+		return -EINVAL;
+
+
+	return rc;
+}
+
+/* Iterator struct for all devices. */
+struct ccwdev_iter {
+	int devno;
+	int ssid;
+	int in_range;
+};
+
+static void *
+cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
+{
+	struct ccwdev_iter *iter = s->private;
+
+	if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+		return NULL;
+	memset(iter, 0, sizeof(*iter));
+	iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
+	iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
+	return iter;
+}
+
+static void
+cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
+{
+}
+
+static void *
+cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
+{
+	struct ccwdev_iter *iter;
+
+	if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+		return NULL;
+	iter = it;
+	if (iter->devno == __MAX_SUBCHANNEL) {
+		iter->devno = 0;
+		iter->ssid++;
+		if (iter->ssid > __MAX_SSID)
+			return NULL;
+	} else
+		iter->devno++;
+	(*offset)++;
+	return iter;
+}
+
+static int
+cio_ignore_proc_seq_show(struct seq_file *s, void *it)
+{
+	struct ccwdev_iter *iter;
+
+	iter = it;
+	if (!is_blacklisted(iter->ssid, iter->devno))
+		/* Not blacklisted, nothing to output. */
+		return 0;
+	if (!iter->in_range) {
+		/* First device in range. */
+		if ((iter->devno == __MAX_SUBCHANNEL) ||
+		    !is_blacklisted(iter->ssid, iter->devno + 1)) {
+			/* Singular device. */
+			seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
+			return 0;
+		}
+		iter->in_range = 1;
+		seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
+		return 0;
+	}
+	if ((iter->devno == __MAX_SUBCHANNEL) ||
+	    !is_blacklisted(iter->ssid, iter->devno + 1)) {
+		/* Last device in range. */
+		iter->in_range = 0;
+		seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
+	}
+	return 0;
+}
+
+static ssize_t
+cio_ignore_write(struct file *file, const char __user *user_buf,
+		 size_t user_len, loff_t *offset)
+{
+	char *buf;
+	ssize_t rc, ret, i;
+
+	if (*offset)
+		return -EINVAL;
+	if (user_len > 65536)
+		user_len = 65536;
+	buf = vzalloc(user_len + 1); /* maybe better use the stack? */
+	if (buf == NULL)
+		return -ENOMEM;
+
+	if (strncpy_from_user (buf, user_buf, user_len) < 0) {
+		rc = -EFAULT;
+		goto out_free;
+	}
+
+	i = user_len - 1;
+	while ((i >= 0) && (isspace(buf[i]) || (buf[i] == 0))) {
+		buf[i] = '\0';
+		i--;
+	}
+	ret = blacklist_parse_proc_parameters(buf);
+	if (ret)
+		rc = ret;
+	else
+		rc = user_len;
+
+out_free:
+	vfree (buf);
+	return rc;
+}
+
+static const struct seq_operations cio_ignore_proc_seq_ops = {
+	.start = cio_ignore_proc_seq_start,
+	.stop  = cio_ignore_proc_seq_stop,
+	.next  = cio_ignore_proc_seq_next,
+	.show  = cio_ignore_proc_seq_show,
+};
+
+static int
+cio_ignore_proc_open(struct inode *inode, struct file *file)
+{
+	return seq_open_private(file, &cio_ignore_proc_seq_ops,
+				sizeof(struct ccwdev_iter));
+}
+
+static const struct file_operations cio_ignore_proc_fops = {
+	.open    = cio_ignore_proc_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private,
+	.write   = cio_ignore_write,
+};
+
+static int
+cio_ignore_proc_init (void)
+{
+	struct proc_dir_entry *entry;
+
+	entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL,
+			    &cio_ignore_proc_fops);
+	if (!entry)
+		return -ENOENT;
+	return 0;
+}
+
+__initcall (cio_ignore_proc_init);
+
+#endif /* CONFIG_PROC_FS */
diff --git a/drivers/s390/cio/blacklist.h b/drivers/s390/cio/blacklist.h
new file mode 100644
index 0000000..140e3e4
--- /dev/null
+++ b/drivers/s390/cio/blacklist.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_BLACKLIST_H
+#define S390_BLACKLIST_H
+
+extern int is_blacklisted (int ssid, int devno);
+
+#endif
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
new file mode 100644
index 0000000..93b2862
--- /dev/null
+++ b/drivers/s390/cio/ccwgroup.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  bus driver for ccwgroup
+ *
+ *  Copyright IBM Corp. 2002, 2012
+ *
+ *  Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ *	       Cornelia Huck (cornelia.huck@de.ibm.com)
+ */
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/dcache.h>
+
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+
+#include "device.h"
+
+#define CCW_BUS_ID_SIZE		10
+
+/* In Linux 2.4, we had a channel device layer called "chandev"
+ * that did all sorts of obscure stuff for networking devices.
+ * This is another driver that serves as a replacement for just
+ * one of its functions, namely the translation of single subchannels
+ * to devices that use multiple subchannels.
+ */
+
+static struct bus_type ccwgroup_bus_type;
+
+static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
+{
+	int i;
+	char str[16];
+
+	for (i = 0; i < gdev->count; i++) {
+		sprintf(str, "cdev%d", i);
+		sysfs_remove_link(&gdev->dev.kobj, str);
+		sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
+	}
+}
+
+/*
+ * Remove references from ccw devices to ccw group device and from
+ * ccw group device to ccw devices.
+ */
+static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
+{
+	struct ccw_device *cdev;
+	int i;
+
+	for (i = 0; i < gdev->count; i++) {
+		cdev = gdev->cdev[i];
+		if (!cdev)
+			continue;
+		spin_lock_irq(cdev->ccwlock);
+		dev_set_drvdata(&cdev->dev, NULL);
+		spin_unlock_irq(cdev->ccwlock);
+		gdev->cdev[i] = NULL;
+		put_device(&cdev->dev);
+	}
+}
+
+/**
+ * ccwgroup_set_online() - enable a ccwgroup device
+ * @gdev: target ccwgroup device
+ *
+ * This function attempts to put the ccwgroup device into the online state.
+ * Returns:
+ *  %0 on success and a negative error value on failure.
+ */
+int ccwgroup_set_online(struct ccwgroup_device *gdev)
+{
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+	int ret = -EINVAL;
+
+	if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+		return -EAGAIN;
+	if (gdev->state == CCWGROUP_ONLINE)
+		goto out;
+	if (gdrv->set_online)
+		ret = gdrv->set_online(gdev);
+	if (ret)
+		goto out;
+
+	gdev->state = CCWGROUP_ONLINE;
+out:
+	atomic_set(&gdev->onoff, 0);
+	return ret;
+}
+EXPORT_SYMBOL(ccwgroup_set_online);
+
+/**
+ * ccwgroup_set_offline() - disable a ccwgroup device
+ * @gdev: target ccwgroup device
+ *
+ * This function attempts to put the ccwgroup device into the offline state.
+ * Returns:
+ *  %0 on success and a negative error value on failure.
+ */
+int ccwgroup_set_offline(struct ccwgroup_device *gdev)
+{
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+	int ret = -EINVAL;
+
+	if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+		return -EAGAIN;
+	if (gdev->state == CCWGROUP_OFFLINE)
+		goto out;
+	if (gdrv->set_offline)
+		ret = gdrv->set_offline(gdev);
+	if (ret)
+		goto out;
+
+	gdev->state = CCWGROUP_OFFLINE;
+out:
+	atomic_set(&gdev->onoff, 0);
+	return ret;
+}
+EXPORT_SYMBOL(ccwgroup_set_offline);
+
+static ssize_t ccwgroup_online_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	unsigned long value;
+	int ret;
+
+	device_lock(dev);
+	if (!dev->driver) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = kstrtoul(buf, 0, &value);
+	if (ret)
+		goto out;
+
+	if (value == 1)
+		ret = ccwgroup_set_online(gdev);
+	else if (value == 0)
+		ret = ccwgroup_set_offline(gdev);
+	else
+		ret = -EINVAL;
+out:
+	device_unlock(dev);
+	return (ret == 0) ? count : ret;
+}
+
+static ssize_t ccwgroup_online_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	int online;
+
+	online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0;
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", online);
+}
+
+/*
+ * Provide an 'ungroup' attribute so the user can remove group devices no
+ * longer needed or accidentially created. Saves memory :)
+ */
+static void ccwgroup_ungroup(struct ccwgroup_device *gdev)
+{
+	mutex_lock(&gdev->reg_mutex);
+	if (device_is_registered(&gdev->dev)) {
+		__ccwgroup_remove_symlinks(gdev);
+		device_unregister(&gdev->dev);
+		__ccwgroup_remove_cdev_refs(gdev);
+	}
+	mutex_unlock(&gdev->reg_mutex);
+}
+
+static ssize_t ccwgroup_ungroup_store(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t count)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	int rc = 0;
+
+	/* Prevent concurrent online/offline processing and ungrouping. */
+	if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+		return -EAGAIN;
+	if (gdev->state != CCWGROUP_OFFLINE) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (device_remove_file_self(dev, attr))
+		ccwgroup_ungroup(gdev);
+	else
+		rc = -ENODEV;
+out:
+	if (rc) {
+		/* Release onoff "lock" when ungrouping failed. */
+		atomic_set(&gdev->onoff, 0);
+		return rc;
+	}
+	return count;
+}
+static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
+static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
+
+static struct attribute *ccwgroup_attrs[] = {
+	&dev_attr_online.attr,
+	&dev_attr_ungroup.attr,
+	NULL,
+};
+static struct attribute_group ccwgroup_attr_group = {
+	.attrs = ccwgroup_attrs,
+};
+static const struct attribute_group *ccwgroup_attr_groups[] = {
+	&ccwgroup_attr_group,
+	NULL,
+};
+
+static void ccwgroup_ungroup_workfn(struct work_struct *work)
+{
+	struct ccwgroup_device *gdev =
+		container_of(work, struct ccwgroup_device, ungroup_work);
+
+	ccwgroup_ungroup(gdev);
+	put_device(&gdev->dev);
+}
+
+static void ccwgroup_release(struct device *dev)
+{
+	kfree(to_ccwgroupdev(dev));
+}
+
+static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
+{
+	char str[16];
+	int i, rc;
+
+	for (i = 0; i < gdev->count; i++) {
+		rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj,
+				       &gdev->dev.kobj, "group_device");
+		if (rc) {
+			for (--i; i >= 0; i--)
+				sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
+						  "group_device");
+			return rc;
+		}
+	}
+	for (i = 0; i < gdev->count; i++) {
+		sprintf(str, "cdev%d", i);
+		rc = sysfs_create_link(&gdev->dev.kobj,
+				       &gdev->cdev[i]->dev.kobj, str);
+		if (rc) {
+			for (--i; i >= 0; i--) {
+				sprintf(str, "cdev%d", i);
+				sysfs_remove_link(&gdev->dev.kobj, str);
+			}
+			for (i = 0; i < gdev->count; i++)
+				sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
+						  "group_device");
+			return rc;
+		}
+	}
+	return 0;
+}
+
+static int __get_next_id(const char **buf, struct ccw_dev_id *id)
+{
+	unsigned int cssid, ssid, devno;
+	int ret = 0, len;
+	char *start, *end;
+
+	start = (char *)*buf;
+	end = strchr(start, ',');
+	if (!end) {
+		/* Last entry. Strip trailing newline, if applicable. */
+		end = strchr(start, '\n');
+		if (end)
+			*end = '\0';
+		len = strlen(start) + 1;
+	} else {
+		len = end - start + 1;
+		end++;
+	}
+	if (len <= CCW_BUS_ID_SIZE) {
+		if (sscanf(start, "%2x.%1x.%04x", &cssid, &ssid, &devno) != 3)
+			ret = -EINVAL;
+	} else
+		ret = -EINVAL;
+
+	if (!ret) {
+		id->ssid = ssid;
+		id->devno = devno;
+	}
+	*buf = end;
+	return ret;
+}
+
+/**
+ * ccwgroup_create_dev() - create and register a ccw group device
+ * @parent: parent device for the new device
+ * @gdrv: driver for the new group device
+ * @num_devices: number of slave devices
+ * @buf: buffer containing comma separated bus ids of slave devices
+ *
+ * Create and register a new ccw group device as a child of @parent. Slave
+ * devices are obtained from the list of bus ids given in @buf.
+ * Returns:
+ *  %0 on success and an error code on failure.
+ * Context:
+ *  non-atomic
+ */
+int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
+			int num_devices, const char *buf)
+{
+	struct ccwgroup_device *gdev;
+	struct ccw_dev_id dev_id;
+	int rc, i;
+
+	if (num_devices < 1)
+		return -EINVAL;
+
+	gdev = kzalloc(struct_size(gdev, cdev, num_devices), GFP_KERNEL);
+	if (!gdev)
+		return -ENOMEM;
+
+	atomic_set(&gdev->onoff, 0);
+	mutex_init(&gdev->reg_mutex);
+	mutex_lock(&gdev->reg_mutex);
+	INIT_WORK(&gdev->ungroup_work, ccwgroup_ungroup_workfn);
+	gdev->count = num_devices;
+	gdev->dev.bus = &ccwgroup_bus_type;
+	gdev->dev.parent = parent;
+	gdev->dev.release = ccwgroup_release;
+	device_initialize(&gdev->dev);
+
+	for (i = 0; i < num_devices && buf; i++) {
+		rc = __get_next_id(&buf, &dev_id);
+		if (rc != 0)
+			goto error;
+		gdev->cdev[i] = get_ccwdev_by_dev_id(&dev_id);
+		/*
+		 * All devices have to be of the same type in
+		 * order to be grouped.
+		 */
+		if (!gdev->cdev[i] || !gdev->cdev[i]->drv ||
+		    gdev->cdev[i]->drv != gdev->cdev[0]->drv ||
+		    gdev->cdev[i]->id.driver_info !=
+		    gdev->cdev[0]->id.driver_info) {
+			rc = -EINVAL;
+			goto error;
+		}
+		/* Don't allow a device to belong to more than one group. */
+		spin_lock_irq(gdev->cdev[i]->ccwlock);
+		if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
+			spin_unlock_irq(gdev->cdev[i]->ccwlock);
+			rc = -EINVAL;
+			goto error;
+		}
+		dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
+		spin_unlock_irq(gdev->cdev[i]->ccwlock);
+	}
+	/* Check for sufficient number of bus ids. */
+	if (i < num_devices) {
+		rc = -EINVAL;
+		goto error;
+	}
+	/* Check for trailing stuff. */
+	if (i == num_devices && strlen(buf) > 0) {
+		rc = -EINVAL;
+		goto error;
+	}
+	/* Check if the devices are bound to the required ccw driver. */
+	if (gdrv && gdrv->ccw_driver &&
+	    gdev->cdev[0]->drv != gdrv->ccw_driver) {
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
+	gdev->dev.groups = ccwgroup_attr_groups;
+
+	if (gdrv) {
+		gdev->dev.driver = &gdrv->driver;
+		rc = gdrv->setup ? gdrv->setup(gdev) : 0;
+		if (rc)
+			goto error;
+	}
+	rc = device_add(&gdev->dev);
+	if (rc)
+		goto error;
+	rc = __ccwgroup_create_symlinks(gdev);
+	if (rc) {
+		device_del(&gdev->dev);
+		goto error;
+	}
+	mutex_unlock(&gdev->reg_mutex);
+	return 0;
+error:
+	for (i = 0; i < num_devices; i++)
+		if (gdev->cdev[i]) {
+			spin_lock_irq(gdev->cdev[i]->ccwlock);
+			if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
+				dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
+			spin_unlock_irq(gdev->cdev[i]->ccwlock);
+			put_device(&gdev->cdev[i]->dev);
+			gdev->cdev[i] = NULL;
+		}
+	mutex_unlock(&gdev->reg_mutex);
+	put_device(&gdev->dev);
+	return rc;
+}
+EXPORT_SYMBOL(ccwgroup_create_dev);
+
+static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
+			     void *data)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(data);
+
+	if (action == BUS_NOTIFY_UNBIND_DRIVER) {
+		get_device(&gdev->dev);
+		schedule_work(&gdev->ungroup_work);
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block ccwgroup_nb = {
+	.notifier_call = ccwgroup_notifier
+};
+
+static int __init init_ccwgroup(void)
+{
+	int ret;
+
+	ret = bus_register(&ccwgroup_bus_type);
+	if (ret)
+		return ret;
+
+	ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
+	if (ret)
+		bus_unregister(&ccwgroup_bus_type);
+
+	return ret;
+}
+
+static void __exit cleanup_ccwgroup(void)
+{
+	bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
+	bus_unregister(&ccwgroup_bus_type);
+}
+
+module_init(init_ccwgroup);
+module_exit(cleanup_ccwgroup);
+
+/************************** driver stuff ******************************/
+
+static int ccwgroup_remove(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
+
+	if (!dev->driver)
+		return 0;
+	if (gdrv->remove)
+		gdrv->remove(gdev);
+
+	return 0;
+}
+
+static void ccwgroup_shutdown(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
+
+	if (!dev->driver)
+		return;
+	if (gdrv->shutdown)
+		gdrv->shutdown(gdev);
+}
+
+static int ccwgroup_pm_prepare(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+	/* Fail while device is being set online/offline. */
+	if (atomic_read(&gdev->onoff))
+		return -EAGAIN;
+
+	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+		return 0;
+
+	return gdrv->prepare ? gdrv->prepare(gdev) : 0;
+}
+
+static void ccwgroup_pm_complete(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
+
+	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+		return;
+
+	if (gdrv->complete)
+		gdrv->complete(gdev);
+}
+
+static int ccwgroup_pm_freeze(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+		return 0;
+
+	return gdrv->freeze ? gdrv->freeze(gdev) : 0;
+}
+
+static int ccwgroup_pm_thaw(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+		return 0;
+
+	return gdrv->thaw ? gdrv->thaw(gdev) : 0;
+}
+
+static int ccwgroup_pm_restore(struct device *dev)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+
+	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
+		return 0;
+
+	return gdrv->restore ? gdrv->restore(gdev) : 0;
+}
+
+static const struct dev_pm_ops ccwgroup_pm_ops = {
+	.prepare = ccwgroup_pm_prepare,
+	.complete = ccwgroup_pm_complete,
+	.freeze = ccwgroup_pm_freeze,
+	.thaw = ccwgroup_pm_thaw,
+	.restore = ccwgroup_pm_restore,
+};
+
+static struct bus_type ccwgroup_bus_type = {
+	.name   = "ccwgroup",
+	.remove = ccwgroup_remove,
+	.shutdown = ccwgroup_shutdown,
+	.pm = &ccwgroup_pm_ops,
+};
+
+bool dev_is_ccwgroup(struct device *dev)
+{
+	return dev->bus == &ccwgroup_bus_type;
+}
+EXPORT_SYMBOL(dev_is_ccwgroup);
+
+/**
+ * ccwgroup_driver_register() - register a ccw group driver
+ * @cdriver: driver to be registered
+ *
+ * This function is mainly a wrapper around driver_register().
+ */
+int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
+{
+	/* register our new driver with the core */
+	cdriver->driver.bus = &ccwgroup_bus_type;
+
+	return driver_register(&cdriver->driver);
+}
+EXPORT_SYMBOL(ccwgroup_driver_register);
+
+static int __ccwgroup_match_all(struct device *dev, void *data)
+{
+	return 1;
+}
+
+/**
+ * ccwgroup_driver_unregister() - deregister a ccw group driver
+ * @cdriver: driver to be deregistered
+ *
+ * This function is mainly a wrapper around driver_unregister().
+ */
+void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
+{
+	struct device *dev;
+
+	/* We don't want ccwgroup devices to live longer than their driver. */
+	while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
+					 __ccwgroup_match_all))) {
+		struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+
+		ccwgroup_ungroup(gdev);
+		put_device(dev);
+	}
+	driver_unregister(&cdriver->driver);
+}
+EXPORT_SYMBOL(ccwgroup_driver_unregister);
+
+/**
+ * ccwgroup_probe_ccwdev() - probe function for slave devices
+ * @cdev: ccw device to be probed
+ *
+ * This is a dummy probe function for ccw devices that are slave devices in
+ * a ccw group device.
+ * Returns:
+ *  always %0
+ */
+int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
+{
+	return 0;
+}
+EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
+
+/**
+ * ccwgroup_remove_ccwdev() - remove function for slave devices
+ * @cdev: ccw device to be removed
+ *
+ * This is a remove function for ccw devices that are slave devices in a ccw
+ * group device. It sets the ccw device offline and also deregisters the
+ * embedding ccw group device.
+ */
+void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
+{
+	struct ccwgroup_device *gdev;
+
+	/* Ignore offlining errors, device is gone anyway. */
+	ccw_device_set_offline(cdev);
+	/* If one of its devices is gone, the whole group is done for. */
+	spin_lock_irq(cdev->ccwlock);
+	gdev = dev_get_drvdata(&cdev->dev);
+	if (!gdev) {
+		spin_unlock_irq(cdev->ccwlock);
+		return;
+	}
+	/* Get ccwgroup device reference for local processing. */
+	get_device(&gdev->dev);
+	spin_unlock_irq(cdev->ccwlock);
+	/* Unregister group device. */
+	ccwgroup_ungroup(gdev);
+	/* Release ccwgroup device reference for local processing. */
+	put_device(&gdev->dev);
+}
+EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
new file mode 100644
index 0000000..603268a
--- /dev/null
+++ b/drivers/s390/cio/ccwreq.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Handling of internal CCW device requests.
+ *
+ *    Copyright IBM Corp. 2009, 2011
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "io_sch.h"
+#include "cio.h"
+#include "device.h"
+#include "cio_debug.h"
+
+/**
+ * lpm_adjust - adjust path mask
+ * @lpm: path mask to adjust
+ * @mask: mask of available paths
+ *
+ * Shift @lpm right until @lpm and @mask have at least one bit in common or
+ * until @lpm is zero. Return the resulting lpm.
+ */
+int lpm_adjust(int lpm, int mask)
+{
+	while (lpm && ((lpm & mask) == 0))
+		lpm >>= 1;
+	return lpm;
+}
+
+/*
+ * Adjust path mask to use next path and reset retry count. Return resulting
+ * path mask.
+ */
+static u16 ccwreq_next_path(struct ccw_device *cdev)
+{
+	struct ccw_request *req = &cdev->private->req;
+
+	if (!req->singlepath) {
+		req->mask = 0;
+		goto out;
+	}
+	req->retries	= req->maxretries;
+	req->mask	= lpm_adjust(req->mask >> 1, req->lpm);
+out:
+	return req->mask;
+}
+
+/*
+ * Clean up device state and report to callback.
+ */
+static void ccwreq_stop(struct ccw_device *cdev, int rc)
+{
+	struct ccw_request *req = &cdev->private->req;
+
+	if (req->done)
+		return;
+	req->done = 1;
+	ccw_device_set_timeout(cdev, 0);
+	memset(&cdev->private->irb, 0, sizeof(struct irb));
+	if (rc && rc != -ENODEV && req->drc)
+		rc = req->drc;
+	req->callback(cdev, req->data, rc);
+}
+
+/*
+ * (Re-)Start the operation until retries and paths are exhausted.
+ */
+static void ccwreq_do(struct ccw_device *cdev)
+{
+	struct ccw_request *req = &cdev->private->req;
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw1 *cp = req->cp;
+	int rc = -EACCES;
+
+	while (req->mask) {
+		if (req->retries-- == 0) {
+			/* Retries exhausted, try next path. */
+			ccwreq_next_path(cdev);
+			continue;
+		}
+		/* Perform start function. */
+		memset(&cdev->private->irb, 0, sizeof(struct irb));
+		rc = cio_start(sch, cp, (u8) req->mask);
+		if (rc == 0) {
+			/* I/O started successfully. */
+			ccw_device_set_timeout(cdev, req->timeout);
+			return;
+		}
+		if (rc == -ENODEV) {
+			/* Permanent device error. */
+			break;
+		}
+		if (rc == -EACCES) {
+			/* Permant path error. */
+			ccwreq_next_path(cdev);
+			continue;
+		}
+		/* Temporary improper status. */
+		rc = cio_clear(sch);
+		if (rc)
+			break;
+		return;
+	}
+	ccwreq_stop(cdev, rc);
+}
+
+/**
+ * ccw_request_start - perform I/O request
+ * @cdev: ccw device
+ *
+ * Perform the I/O request specified by cdev->req.
+ */
+void ccw_request_start(struct ccw_device *cdev)
+{
+	struct ccw_request *req = &cdev->private->req;
+
+	if (req->singlepath) {
+		/* Try all paths twice to counter link flapping. */
+		req->mask = 0x8080;
+	} else
+		req->mask = req->lpm;
+
+	req->retries	= req->maxretries;
+	req->mask	= lpm_adjust(req->mask, req->lpm);
+	req->drc	= 0;
+	req->done	= 0;
+	req->cancel	= 0;
+	if (!req->mask)
+		goto out_nopath;
+	ccwreq_do(cdev);
+	return;
+
+out_nopath:
+	ccwreq_stop(cdev, -EACCES);
+}
+
+/**
+ * ccw_request_cancel - cancel running I/O request
+ * @cdev: ccw device
+ *
+ * Cancel the I/O request specified by cdev->req. Return non-zero if request
+ * has already finished, zero otherwise.
+ */
+int ccw_request_cancel(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+	int rc;
+
+	if (req->done)
+		return 1;
+	req->cancel = 1;
+	rc = cio_clear(sch);
+	if (rc)
+		ccwreq_stop(cdev, rc);
+	return 0;
+}
+
+/*
+ * Return the status of the internal I/O started on the specified ccw device.
+ * Perform BASIC SENSE if required.
+ */
+static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
+{
+	struct irb *irb = &cdev->private->irb;
+	struct cmd_scsw *scsw = &irb->scsw.cmd;
+	enum uc_todo todo;
+
+	/* Perform BASIC SENSE if needed. */
+	if (ccw_device_accumulate_and_sense(cdev, lcirb))
+		return IO_RUNNING;
+	/* Check for halt/clear interrupt. */
+	if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+		return IO_KILLED;
+	/* Check for path error. */
+	if (scsw->cc == 3 || scsw->pno)
+		return IO_PATH_ERROR;
+	/* Handle BASIC SENSE data. */
+	if (irb->esw.esw0.erw.cons) {
+		CIO_TRACE_EVENT(2, "sensedata");
+		CIO_HEX_EVENT(2, &cdev->private->dev_id,
+			      sizeof(struct ccw_dev_id));
+		CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
+		/* Check for command reject. */
+		if (irb->ecw[0] & SNS0_CMD_REJECT)
+			return IO_REJECTED;
+		/* Ask the driver what to do */
+		if (cdev->drv && cdev->drv->uc_handler) {
+			todo = cdev->drv->uc_handler(cdev, lcirb);
+			CIO_TRACE_EVENT(2, "uc_response");
+			CIO_HEX_EVENT(2, &todo, sizeof(todo));
+			switch (todo) {
+			case UC_TODO_RETRY:
+				return IO_STATUS_ERROR;
+			case UC_TODO_RETRY_ON_NEW_PATH:
+				return IO_PATH_ERROR;
+			case UC_TODO_STOP:
+				return IO_REJECTED;
+			default:
+				return IO_STATUS_ERROR;
+			}
+		}
+		/* Assume that unexpected SENSE data implies an error. */
+		return IO_STATUS_ERROR;
+	}
+	/* Check for channel errors. */
+	if (scsw->cstat != 0)
+		return IO_STATUS_ERROR;
+	/* Check for device errors. */
+	if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+		return IO_STATUS_ERROR;
+	/* Check for final state. */
+	if (!(scsw->dstat & DEV_STAT_DEV_END))
+		return IO_RUNNING;
+	/* Check for other improper status. */
+	if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
+		return IO_STATUS_ERROR;
+	return IO_DONE;
+}
+
+/*
+ * Log ccw request status.
+ */
+static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
+{
+	struct ccw_request *req = &cdev->private->req;
+	struct {
+		struct ccw_dev_id dev_id;
+		u16 retries;
+		u8 lpm;
+		u8 status;
+	}  __attribute__ ((packed)) data;
+	data.dev_id	= cdev->private->dev_id;
+	data.retries	= req->retries;
+	data.lpm	= (u8) req->mask;
+	data.status	= (u8) status;
+	CIO_TRACE_EVENT(2, "reqstat");
+	CIO_HEX_EVENT(2, &data, sizeof(data));
+}
+
+/**
+ * ccw_request_handler - interrupt handler for I/O request procedure.
+ * @cdev: ccw device
+ *
+ * Handle interrupt during I/O request procedure.
+ */
+void ccw_request_handler(struct ccw_device *cdev)
+{
+	struct irb *irb = this_cpu_ptr(&cio_irb);
+	struct ccw_request *req = &cdev->private->req;
+	enum io_status status;
+	int rc = -EOPNOTSUPP;
+
+	/* Check status of I/O request. */
+	status = ccwreq_status(cdev, irb);
+	if (req->filter)
+		status = req->filter(cdev, req->data, irb, status);
+	if (status != IO_RUNNING)
+		ccw_device_set_timeout(cdev, 0);
+	if (status != IO_DONE && status != IO_RUNNING)
+		ccwreq_log_status(cdev, status);
+	switch (status) {
+	case IO_DONE:
+		break;
+	case IO_RUNNING:
+		return;
+	case IO_REJECTED:
+		goto err;
+	case IO_PATH_ERROR:
+		goto out_next_path;
+	case IO_STATUS_ERROR:
+		goto out_restart;
+	case IO_KILLED:
+		/* Check if request was cancelled on purpose. */
+		if (req->cancel) {
+			rc = -EIO;
+			goto err;
+		}
+		goto out_restart;
+	}
+	/* Check back with request initiator. */
+	if (!req->check)
+		goto out;
+	switch (req->check(cdev, req->data)) {
+	case 0:
+		break;
+	case -EAGAIN:
+		goto out_restart;
+	case -EACCES:
+		goto out_next_path;
+	default:
+		goto err;
+	}
+out:
+	ccwreq_stop(cdev, 0);
+	return;
+
+out_next_path:
+	/* Try next path and restart I/O. */
+	if (!ccwreq_next_path(cdev)) {
+		rc = -EACCES;
+		goto err;
+	}
+out_restart:
+	/* Restart. */
+	ccwreq_do(cdev);
+	return;
+err:
+	ccwreq_stop(cdev, rc);
+}
+
+
+/**
+ * ccw_request_timeout - timeout handler for I/O request procedure
+ * @cdev: ccw device
+ *
+ * Handle timeout during I/O request procedure.
+ */
+void ccw_request_timeout(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+	int rc = -ENODEV, chp;
+
+	if (cio_update_schib(sch))
+		goto err;
+
+	for (chp = 0; chp < 8; chp++) {
+		if ((0x80 >> chp) & sch->schib.pmcw.lpum)
+			pr_warn("%s: No interrupt was received within %lus (CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
+				dev_name(&cdev->dev), req->timeout / HZ,
+				scsw_cstat(&sch->schib.scsw),
+				scsw_dstat(&sch->schib.scsw),
+				sch->schid.cssid,
+				sch->schib.pmcw.chpid[chp]);
+	}
+
+	if (!ccwreq_next_path(cdev)) {
+		/* set the final return code for this request */
+		req->drc = -ETIME;
+	}
+	rc = cio_clear(sch);
+	if (rc)
+		goto err;
+	return;
+
+err:
+	ccwreq_stop(cdev, rc);
+}
+
+/**
+ * ccw_request_notoper - notoper handler for I/O request procedure
+ * @cdev: ccw device
+ *
+ * Handle notoper during I/O request procedure.
+ */
+void ccw_request_notoper(struct ccw_device *cdev)
+{
+	ccwreq_stop(cdev, -ENODEV);
+}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
new file mode 100644
index 0000000..51038ec
--- /dev/null
+++ b/drivers/s390/cio/chp.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    Copyright IBM Corp. 1999, 2010
+ *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
+ *		 Arnd Bergmann (arndb@de.ibm.com)
+ *		 Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <asm/chpid.h>
+#include <asm/sclp.h>
+#include <asm/crw.h>
+
+#include "cio.h"
+#include "css.h"
+#include "ioasm.h"
+#include "cio_debug.h"
+#include "chp.h"
+
+#define to_channelpath(device) container_of(device, struct channel_path, dev)
+#define CHP_INFO_UPDATE_INTERVAL	1*HZ
+
+enum cfg_task_t {
+	cfg_none,
+	cfg_configure,
+	cfg_deconfigure
+};
+
+/* Map for pending configure tasks. */
+static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
+static DEFINE_SPINLOCK(cfg_lock);
+
+/* Map for channel-path status. */
+static struct sclp_chp_info chp_info;
+static DEFINE_MUTEX(info_lock);
+
+/* Time after which channel-path status may be outdated. */
+static unsigned long chp_info_expires;
+
+static struct work_struct cfg_work;
+
+/* Wait queue for configure completion events. */
+static wait_queue_head_t cfg_wait_queue;
+
+/* Set vary state for given chpid. */
+static void set_chp_logically_online(struct chp_id chpid, int onoff)
+{
+	chpid_to_chp(chpid)->state = onoff;
+}
+
+/* On success return 0 if channel-path is varied offline, 1 if it is varied
+ * online. Return -ENODEV if channel-path is not registered. */
+int chp_get_status(struct chp_id chpid)
+{
+	return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
+}
+
+/**
+ * chp_get_sch_opm - return opm for subchannel
+ * @sch: subchannel
+ *
+ * Calculate and return the operational path mask (opm) based on the chpids
+ * used by the subchannel and the status of the associated channel-paths.
+ */
+u8 chp_get_sch_opm(struct subchannel *sch)
+{
+	struct chp_id chpid;
+	int opm;
+	int i;
+
+	opm = 0;
+	chp_id_init(&chpid);
+	for (i = 0; i < 8; i++) {
+		opm <<= 1;
+		chpid.id = sch->schib.pmcw.chpid[i];
+		if (chp_get_status(chpid) != 0)
+			opm |= 1;
+	}
+	return opm;
+}
+EXPORT_SYMBOL_GPL(chp_get_sch_opm);
+
+/**
+ * chp_is_registered - check if a channel-path is registered
+ * @chpid: channel-path ID
+ *
+ * Return non-zero if a channel-path with the given chpid is registered,
+ * zero otherwise.
+ */
+int chp_is_registered(struct chp_id chpid)
+{
+	return chpid_to_chp(chpid) != NULL;
+}
+
+/*
+ * Function: s390_vary_chpid
+ * Varies the specified chpid online or offline
+ */
+static int s390_vary_chpid(struct chp_id chpid, int on)
+{
+	char dbf_text[15];
+	int status;
+
+	sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
+		chpid.id);
+	CIO_TRACE_EVENT(2, dbf_text);
+
+	status = chp_get_status(chpid);
+	if (!on && !status)
+		return 0;
+
+	set_chp_logically_online(chpid, on);
+	chsc_chp_vary(chpid, on);
+	return 0;
+}
+
+/*
+ * Channel measurement related functions
+ */
+static ssize_t chp_measurement_chars_read(struct file *filp,
+					  struct kobject *kobj,
+					  struct bin_attribute *bin_attr,
+					  char *buf, loff_t off, size_t count)
+{
+	struct channel_path *chp;
+	struct device *device;
+
+	device = container_of(kobj, struct device, kobj);
+	chp = to_channelpath(device);
+	if (chp->cmg == -1)
+		return 0;
+
+	return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
+				       sizeof(chp->cmg_chars));
+}
+
+static const struct bin_attribute chp_measurement_chars_attr = {
+	.attr = {
+		.name = "measurement_chars",
+		.mode = S_IRUSR,
+	},
+	.size = sizeof(struct cmg_chars),
+	.read = chp_measurement_chars_read,
+};
+
+static void chp_measurement_copy_block(struct cmg_entry *buf,
+				       struct channel_subsystem *css,
+				       struct chp_id chpid)
+{
+	void *area;
+	struct cmg_entry *entry, reference_buf;
+	int idx;
+
+	if (chpid.id < 128) {
+		area = css->cub_addr1;
+		idx = chpid.id;
+	} else {
+		area = css->cub_addr2;
+		idx = chpid.id - 128;
+	}
+	entry = area + (idx * sizeof(struct cmg_entry));
+	do {
+		memcpy(buf, entry, sizeof(*entry));
+		memcpy(&reference_buf, entry, sizeof(*entry));
+	} while (reference_buf.values[0] != buf->values[0]);
+}
+
+static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
+				    struct bin_attribute *bin_attr,
+				    char *buf, loff_t off, size_t count)
+{
+	struct channel_path *chp;
+	struct channel_subsystem *css;
+	struct device *device;
+	unsigned int size;
+
+	device = container_of(kobj, struct device, kobj);
+	chp = to_channelpath(device);
+	css = to_css(chp->dev.parent);
+
+	size = sizeof(struct cmg_entry);
+
+	/* Only allow single reads. */
+	if (off || count < size)
+		return 0;
+	chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
+	count = size;
+	return count;
+}
+
+static const struct bin_attribute chp_measurement_attr = {
+	.attr = {
+		.name = "measurement",
+		.mode = S_IRUSR,
+	},
+	.size = sizeof(struct cmg_entry),
+	.read = chp_measurement_read,
+};
+
+void chp_remove_cmg_attr(struct channel_path *chp)
+{
+	device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
+	device_remove_bin_file(&chp->dev, &chp_measurement_attr);
+}
+
+int chp_add_cmg_attr(struct channel_path *chp)
+{
+	int ret;
+
+	ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
+	if (ret)
+		return ret;
+	ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
+	if (ret)
+		device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
+	return ret;
+}
+
+/*
+ * Files for the channel path entries.
+ */
+static ssize_t chp_status_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct channel_path *chp = to_channelpath(dev);
+	int status;
+
+	mutex_lock(&chp->lock);
+	status = chp->state;
+	mutex_unlock(&chp->lock);
+
+	return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
+}
+
+static ssize_t chp_status_write(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct channel_path *cp = to_channelpath(dev);
+	char cmd[10];
+	int num_args;
+	int error;
+
+	num_args = sscanf(buf, "%5s", cmd);
+	if (!num_args)
+		return count;
+
+	if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
+		mutex_lock(&cp->lock);
+		error = s390_vary_chpid(cp->chpid, 1);
+		mutex_unlock(&cp->lock);
+	} else if (!strncasecmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
+		mutex_lock(&cp->lock);
+		error = s390_vary_chpid(cp->chpid, 0);
+		mutex_unlock(&cp->lock);
+	} else
+		error = -EINVAL;
+
+	return error < 0 ? error : count;
+}
+
+static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
+
+static ssize_t chp_configure_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct channel_path *cp;
+	int status;
+
+	cp = to_channelpath(dev);
+	status = chp_info_get_status(cp->chpid);
+	if (status < 0)
+		return status;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", status);
+}
+
+static int cfg_wait_idle(void);
+
+static ssize_t chp_configure_write(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	struct channel_path *cp;
+	int val;
+	char delim;
+
+	if (sscanf(buf, "%d %c", &val, &delim) != 1)
+		return -EINVAL;
+	if (val != 0 && val != 1)
+		return -EINVAL;
+	cp = to_channelpath(dev);
+	chp_cfg_schedule(cp->chpid, val);
+	cfg_wait_idle();
+
+	return count;
+}
+
+static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
+
+static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct channel_path *chp = to_channelpath(dev);
+	u8 type;
+
+	mutex_lock(&chp->lock);
+	type = chp->desc.desc;
+	mutex_unlock(&chp->lock);
+	return sprintf(buf, "%x\n", type);
+}
+
+static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
+
+static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct channel_path *chp = to_channelpath(dev);
+
+	if (!chp)
+		return 0;
+	if (chp->cmg == -1) /* channel measurements not available */
+		return sprintf(buf, "unknown\n");
+	return sprintf(buf, "%x\n", chp->cmg);
+}
+
+static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
+
+static ssize_t chp_shared_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct channel_path *chp = to_channelpath(dev);
+
+	if (!chp)
+		return 0;
+	if (chp->shared == -1) /* channel measurements not available */
+		return sprintf(buf, "unknown\n");
+	return sprintf(buf, "%x\n", chp->shared);
+}
+
+static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
+
+static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct channel_path *chp = to_channelpath(dev);
+	ssize_t rc;
+
+	mutex_lock(&chp->lock);
+	if (chp->desc_fmt1.flags & 0x10)
+		rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
+	else
+		rc = 0;
+	mutex_unlock(&chp->lock);
+
+	return rc;
+}
+static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL);
+
+static ssize_t chp_chid_external_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct channel_path *chp = to_channelpath(dev);
+	ssize_t rc;
+
+	mutex_lock(&chp->lock);
+	if (chp->desc_fmt1.flags & 0x10)
+		rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
+	else
+		rc = 0;
+	mutex_unlock(&chp->lock);
+
+	return rc;
+}
+static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
+
+static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
+				struct bin_attribute *attr, char *buf,
+				loff_t off, size_t count)
+{
+	struct channel_path *chp = to_channelpath(kobj_to_dev(kobj));
+	ssize_t rc;
+
+	mutex_lock(&chp->lock);
+	rc = memory_read_from_buffer(buf, count, &off, chp->desc_fmt3.util_str,
+				     sizeof(chp->desc_fmt3.util_str));
+	mutex_unlock(&chp->lock);
+
+	return rc;
+}
+static BIN_ATTR_RO(util_string,
+		   sizeof(((struct channel_path_desc_fmt3 *)0)->util_str));
+
+static struct bin_attribute *chp_bin_attrs[] = {
+	&bin_attr_util_string,
+	NULL,
+};
+
+static struct attribute *chp_attrs[] = {
+	&dev_attr_status.attr,
+	&dev_attr_configure.attr,
+	&dev_attr_type.attr,
+	&dev_attr_cmg.attr,
+	&dev_attr_shared.attr,
+	&dev_attr_chid.attr,
+	&dev_attr_chid_external.attr,
+	NULL,
+};
+static struct attribute_group chp_attr_group = {
+	.attrs = chp_attrs,
+	.bin_attrs = chp_bin_attrs,
+};
+static const struct attribute_group *chp_attr_groups[] = {
+	&chp_attr_group,
+	NULL,
+};
+
+static void chp_release(struct device *dev)
+{
+	struct channel_path *cp;
+
+	cp = to_channelpath(dev);
+	kfree(cp);
+}
+
+/**
+ * chp_update_desc - update channel-path description
+ * @chp: channel-path
+ *
+ * Update the channel-path description of the specified channel-path
+ * including channel measurement related information.
+ * Return zero on success, non-zero otherwise.
+ */
+int chp_update_desc(struct channel_path *chp)
+{
+	int rc;
+
+	rc = chsc_determine_fmt0_channel_path_desc(chp->chpid, &chp->desc);
+	if (rc)
+		return rc;
+
+	/*
+	 * Fetching the following data is optional. Not all machines or
+	 * hypervisors implement the required chsc commands.
+	 */
+	chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
+	chsc_determine_fmt3_channel_path_desc(chp->chpid, &chp->desc_fmt3);
+	chsc_get_channel_measurement_chars(chp);
+
+	return 0;
+}
+
+/**
+ * chp_new - register a new channel-path
+ * @chpid: channel-path ID
+ *
+ * Create and register data structure representing new channel-path. Return
+ * zero on success, non-zero otherwise.
+ */
+int chp_new(struct chp_id chpid)
+{
+	struct channel_subsystem *css = css_by_id(chpid.cssid);
+	struct channel_path *chp;
+	int ret = 0;
+
+	mutex_lock(&css->mutex);
+	if (chp_is_registered(chpid))
+		goto out;
+
+	chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
+	if (!chp) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	/* fill in status, etc. */
+	chp->chpid = chpid;
+	chp->state = 1;
+	chp->dev.parent = &css->device;
+	chp->dev.groups = chp_attr_groups;
+	chp->dev.release = chp_release;
+	mutex_init(&chp->lock);
+
+	/* Obtain channel path description and fill it in. */
+	ret = chp_update_desc(chp);
+	if (ret)
+		goto out_free;
+	if ((chp->desc.flags & 0x80) == 0) {
+		ret = -ENODEV;
+		goto out_free;
+	}
+	dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
+
+	/* make it known to the system */
+	ret = device_register(&chp->dev);
+	if (ret) {
+		CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
+			      chpid.cssid, chpid.id, ret);
+		put_device(&chp->dev);
+		goto out;
+	}
+
+	if (css->cm_enabled) {
+		ret = chp_add_cmg_attr(chp);
+		if (ret) {
+			device_unregister(&chp->dev);
+			goto out;
+		}
+	}
+	css->chps[chpid.id] = chp;
+	goto out;
+out_free:
+	kfree(chp);
+out:
+	mutex_unlock(&css->mutex);
+	return ret;
+}
+
+/**
+ * chp_get_chp_desc - return newly allocated channel-path description
+ * @chpid: channel-path ID
+ *
+ * On success return a newly allocated copy of the channel-path description
+ * data associated with the given channel-path ID. Return %NULL on error.
+ */
+struct channel_path_desc_fmt0 *chp_get_chp_desc(struct chp_id chpid)
+{
+	struct channel_path *chp;
+	struct channel_path_desc_fmt0 *desc;
+
+	chp = chpid_to_chp(chpid);
+	if (!chp)
+		return NULL;
+	desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return NULL;
+
+	mutex_lock(&chp->lock);
+	memcpy(desc, &chp->desc, sizeof(*desc));
+	mutex_unlock(&chp->lock);
+	return desc;
+}
+
+/**
+ * chp_process_crw - process channel-path status change
+ * @crw0: channel report-word to handler
+ * @crw1: second channel-report word (always NULL)
+ * @overflow: crw overflow indication
+ *
+ * Handle channel-report-words indicating that the status of a channel-path
+ * has changed.
+ */
+static void chp_process_crw(struct crw *crw0, struct crw *crw1,
+			    int overflow)
+{
+	struct chp_id chpid;
+
+	if (overflow) {
+		css_schedule_eval_all();
+		return;
+	}
+	CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
+		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
+		      crw0->erc, crw0->rsid);
+	/*
+	 * Check for solicited machine checks. These are
+	 * created by reset channel path and need not be
+	 * handled here.
+	 */
+	if (crw0->slct) {
+		CIO_CRW_EVENT(2, "solicited machine check for "
+			      "channel path %02X\n", crw0->rsid);
+		return;
+	}
+	chp_id_init(&chpid);
+	chpid.id = crw0->rsid;
+	switch (crw0->erc) {
+	case CRW_ERC_IPARM: /* Path has come. */
+	case CRW_ERC_INIT:
+		chp_new(chpid);
+		chsc_chp_online(chpid);
+		break;
+	case CRW_ERC_PERRI: /* Path has gone. */
+	case CRW_ERC_PERRN:
+		chsc_chp_offline(chpid);
+		break;
+	default:
+		CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
+			      crw0->erc);
+	}
+}
+
+int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
+{
+	int i;
+	int mask;
+
+	for (i = 0; i < 8; i++) {
+		mask = 0x80 >> i;
+		if (!(ssd->path_mask & mask))
+			continue;
+		if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
+			continue;
+		if ((ssd->fla_valid_mask & mask) &&
+		    ((ssd->fla[i] & link->fla_mask) != link->fla))
+			continue;
+		return mask;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
+
+static inline int info_bit_num(struct chp_id id)
+{
+	return id.id + id.cssid * (__MAX_CHPID + 1);
+}
+
+/* Force chp_info refresh on next call to info_validate(). */
+static void info_expire(void)
+{
+	mutex_lock(&info_lock);
+	chp_info_expires = jiffies - 1;
+	mutex_unlock(&info_lock);
+}
+
+/* Ensure that chp_info is up-to-date. */
+static int info_update(void)
+{
+	int rc;
+
+	mutex_lock(&info_lock);
+	rc = 0;
+	if (time_after(jiffies, chp_info_expires)) {
+		/* Data is too old, update. */
+		rc = sclp_chp_read_info(&chp_info);
+		chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
+	}
+	mutex_unlock(&info_lock);
+
+	return rc;
+}
+
+/**
+ * chp_info_get_status - retrieve configure status of a channel-path
+ * @chpid: channel-path ID
+ *
+ * On success, return 0 for standby, 1 for configured, 2 for reserved,
+ * 3 for not recognized. Return negative error code on error.
+ */
+int chp_info_get_status(struct chp_id chpid)
+{
+	int rc;
+	int bit;
+
+	rc = info_update();
+	if (rc)
+		return rc;
+
+	bit = info_bit_num(chpid);
+	mutex_lock(&info_lock);
+	if (!chp_test_bit(chp_info.recognized, bit))
+		rc = CHP_STATUS_NOT_RECOGNIZED;
+	else if (chp_test_bit(chp_info.configured, bit))
+		rc = CHP_STATUS_CONFIGURED;
+	else if (chp_test_bit(chp_info.standby, bit))
+		rc = CHP_STATUS_STANDBY;
+	else
+		rc = CHP_STATUS_RESERVED;
+	mutex_unlock(&info_lock);
+
+	return rc;
+}
+
+/* Return configure task for chpid. */
+static enum cfg_task_t cfg_get_task(struct chp_id chpid)
+{
+	return chp_cfg_task[chpid.cssid][chpid.id];
+}
+
+/* Set configure task for chpid. */
+static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
+{
+	chp_cfg_task[chpid.cssid][chpid.id] = cfg;
+}
+
+/* Fetch the first configure task. Set chpid accordingly. */
+static enum cfg_task_t chp_cfg_fetch_task(struct chp_id *chpid)
+{
+	enum cfg_task_t t = cfg_none;
+
+	chp_id_for_each(chpid) {
+		t = cfg_get_task(*chpid);
+		if (t != cfg_none)
+			break;
+	}
+
+	return t;
+}
+
+/* Perform one configure/deconfigure request. Reschedule work function until
+ * last request. */
+static void cfg_func(struct work_struct *work)
+{
+	struct chp_id chpid;
+	enum cfg_task_t t;
+	int rc;
+
+	spin_lock(&cfg_lock);
+	t = chp_cfg_fetch_task(&chpid);
+	spin_unlock(&cfg_lock);
+
+	switch (t) {
+	case cfg_configure:
+		rc = sclp_chp_configure(chpid);
+		if (rc)
+			CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
+				      "%d\n", chpid.cssid, chpid.id, rc);
+		else {
+			info_expire();
+			chsc_chp_online(chpid);
+		}
+		break;
+	case cfg_deconfigure:
+		rc = sclp_chp_deconfigure(chpid);
+		if (rc)
+			CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
+				      "%d\n", chpid.cssid, chpid.id, rc);
+		else {
+			info_expire();
+			chsc_chp_offline(chpid);
+		}
+		break;
+	case cfg_none:
+		/* Get updated information after last change. */
+		info_update();
+		wake_up_interruptible(&cfg_wait_queue);
+		return;
+	}
+	spin_lock(&cfg_lock);
+	if (t == cfg_get_task(chpid))
+		cfg_set_task(chpid, cfg_none);
+	spin_unlock(&cfg_lock);
+	schedule_work(&cfg_work);
+}
+
+/**
+ * chp_cfg_schedule - schedule chpid configuration request
+ * @chpid: channel-path ID
+ * @configure: Non-zero for configure, zero for deconfigure
+ *
+ * Schedule a channel-path configuration/deconfiguration request.
+ */
+void chp_cfg_schedule(struct chp_id chpid, int configure)
+{
+	CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
+		      configure);
+	spin_lock(&cfg_lock);
+	cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
+	spin_unlock(&cfg_lock);
+	schedule_work(&cfg_work);
+}
+
+/**
+ * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
+ * @chpid: channel-path ID
+ *
+ * Cancel an active channel-path deconfiguration request if it has not yet
+ * been performed.
+ */
+void chp_cfg_cancel_deconfigure(struct chp_id chpid)
+{
+	CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
+	spin_lock(&cfg_lock);
+	if (cfg_get_task(chpid) == cfg_deconfigure)
+		cfg_set_task(chpid, cfg_none);
+	spin_unlock(&cfg_lock);
+}
+
+static bool cfg_idle(void)
+{
+	struct chp_id chpid;
+	enum cfg_task_t t;
+
+	spin_lock(&cfg_lock);
+	t = chp_cfg_fetch_task(&chpid);
+	spin_unlock(&cfg_lock);
+
+	return t == cfg_none;
+}
+
+static int cfg_wait_idle(void)
+{
+	if (wait_event_interruptible(cfg_wait_queue, cfg_idle()))
+		return -ERESTARTSYS;
+	return 0;
+}
+
+static int __init chp_init(void)
+{
+	struct chp_id chpid;
+	int state, ret;
+
+	ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
+	if (ret)
+		return ret;
+	INIT_WORK(&cfg_work, cfg_func);
+	init_waitqueue_head(&cfg_wait_queue);
+	if (info_update())
+		return 0;
+	/* Register available channel-paths. */
+	chp_id_for_each(&chpid) {
+		state = chp_info_get_status(chpid);
+		if (state == CHP_STATUS_CONFIGURED ||
+		    state == CHP_STATUS_STANDBY)
+			chp_new(chpid);
+	}
+
+	return 0;
+}
+
+subsys_initcall(chp_init);
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
new file mode 100644
index 0000000..20259f3
--- /dev/null
+++ b/drivers/s390/cio/chp.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    Copyright IBM Corp. 2007, 2010
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef S390_CHP_H
+#define S390_CHP_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <asm/chpid.h>
+#include "chsc.h"
+#include "css.h"
+
+#define CHP_STATUS_STANDBY		0
+#define CHP_STATUS_CONFIGURED		1
+#define CHP_STATUS_RESERVED		2
+#define CHP_STATUS_NOT_RECOGNIZED	3
+
+#define CHP_ONLINE 0
+#define CHP_OFFLINE 1
+#define CHP_VARY_ON 2
+#define CHP_VARY_OFF 3
+
+struct chp_link {
+	struct chp_id chpid;
+	u32 fla_mask;
+	u16 fla;
+};
+
+static inline int chp_test_bit(u8 *bitmap, int num)
+{
+	int byte = num >> 3;
+	int mask = 128 >> (num & 7);
+
+	return (bitmap[byte] & mask) ? 1 : 0;
+}
+
+
+struct channel_path {
+	struct device dev;
+	struct chp_id chpid;
+	struct mutex lock; /* Serialize access to below members. */
+	int state;
+	struct channel_path_desc_fmt0 desc;
+	struct channel_path_desc_fmt1 desc_fmt1;
+	struct channel_path_desc_fmt3 desc_fmt3;
+	/* Channel-measurement related stuff: */
+	int cmg;
+	int shared;
+	struct cmg_chars cmg_chars;
+};
+
+/* Return channel_path struct for given chpid. */
+static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
+{
+	return css_by_id(chpid.cssid)->chps[chpid.id];
+}
+
+int chp_get_status(struct chp_id chpid);
+u8 chp_get_sch_opm(struct subchannel *sch);
+int chp_is_registered(struct chp_id chpid);
+struct channel_path_desc_fmt0 *chp_get_chp_desc(struct chp_id chpid);
+void chp_remove_cmg_attr(struct channel_path *chp);
+int chp_add_cmg_attr(struct channel_path *chp);
+int chp_update_desc(struct channel_path *chp);
+int chp_new(struct chp_id chpid);
+void chp_cfg_schedule(struct chp_id chpid, int configure);
+void chp_cfg_cancel_deconfigure(struct chp_id chpid);
+int chp_info_get_status(struct chp_id chpid);
+int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *);
+#endif /* S390_CHP_H */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
new file mode 100644
index 0000000..a0baee2
--- /dev/null
+++ b/drivers/s390/cio/chsc.c
@@ -0,0 +1,1384 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *   S/390 common I/O routines -- channel subsystem call
+ *
+ *    Copyright IBM Corp. 1999,2012
+ *    Author(s): Ingo Adlung (adlung@de.ibm.com)
+ *		 Cornelia Huck (cornelia.huck@de.ibm.com)
+ *		 Arnd Bergmann (arndb@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+
+#include <asm/cio.h>
+#include <asm/chpid.h>
+#include <asm/chsc.h>
+#include <asm/crw.h>
+#include <asm/isc.h>
+#include <asm/ebcdic.h>
+
+#include "css.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "ioasm.h"
+#include "chp.h"
+#include "chsc.h"
+
+static void *sei_page;
+static void *chsc_page;
+static DEFINE_SPINLOCK(chsc_page_lock);
+
+/**
+ * chsc_error_from_response() - convert a chsc response to an error
+ * @response: chsc response code
+ *
+ * Returns an appropriate Linux error code for @response.
+ */
+int chsc_error_from_response(int response)
+{
+	switch (response) {
+	case 0x0001:
+		return 0;
+	case 0x0002:
+	case 0x0003:
+	case 0x0006:
+	case 0x0007:
+	case 0x0008:
+	case 0x000a:
+	case 0x0104:
+		return -EINVAL;
+	case 0x0004:
+		return -EOPNOTSUPP;
+	case 0x000b:
+	case 0x0107:		/* "Channel busy" for the op 0x003d */
+		return -EBUSY;
+	case 0x0100:
+	case 0x0102:
+		return -ENOMEM;
+	default:
+		return -EIO;
+	}
+}
+EXPORT_SYMBOL_GPL(chsc_error_from_response);
+
+struct chsc_ssd_area {
+	struct chsc_header request;
+	u16 :10;
+	u16 ssid:2;
+	u16 :4;
+	u16 f_sch;	  /* first subchannel */
+	u16 :16;
+	u16 l_sch;	  /* last subchannel */
+	u32 :32;
+	struct chsc_header response;
+	u32 :32;
+	u8 sch_valid : 1;
+	u8 dev_valid : 1;
+	u8 st	     : 3; /* subchannel type */
+	u8 zeroes    : 3;
+	u8  unit_addr;	  /* unit address */
+	u16 devno;	  /* device number */
+	u8 path_mask;
+	u8 fla_valid_mask;
+	u16 sch;	  /* subchannel */
+	u8 chpid[8];	  /* chpids 0-7 */
+	u16 fla[8];	  /* full link addresses 0-7 */
+} __packed __aligned(PAGE_SIZE);
+
+int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
+{
+	struct chsc_ssd_area *ssd_area;
+	unsigned long flags;
+	int ccode;
+	int ret;
+	int i;
+	int mask;
+
+	spin_lock_irqsave(&chsc_page_lock, flags);
+	memset(chsc_page, 0, PAGE_SIZE);
+	ssd_area = chsc_page;
+	ssd_area->request.length = 0x0010;
+	ssd_area->request.code = 0x0004;
+	ssd_area->ssid = schid.ssid;
+	ssd_area->f_sch = schid.sch_no;
+	ssd_area->l_sch = schid.sch_no;
+
+	ccode = chsc(ssd_area);
+	/* Check response. */
+	if (ccode > 0) {
+		ret = (ccode == 3) ? -ENODEV : -EBUSY;
+		goto out;
+	}
+	ret = chsc_error_from_response(ssd_area->response.code);
+	if (ret != 0) {
+		CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
+			      schid.ssid, schid.sch_no,
+			      ssd_area->response.code);
+		goto out;
+	}
+	if (!ssd_area->sch_valid) {
+		ret = -ENODEV;
+		goto out;
+	}
+	/* Copy data */
+	ret = 0;
+	memset(ssd, 0, sizeof(struct chsc_ssd_info));
+	if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
+	    (ssd_area->st != SUBCHANNEL_TYPE_MSG))
+		goto out;
+	ssd->path_mask = ssd_area->path_mask;
+	ssd->fla_valid_mask = ssd_area->fla_valid_mask;
+	for (i = 0; i < 8; i++) {
+		mask = 0x80 >> i;
+		if (ssd_area->path_mask & mask) {
+			chp_id_init(&ssd->chpid[i]);
+			ssd->chpid[i].id = ssd_area->chpid[i];
+		}
+		if (ssd_area->fla_valid_mask & mask)
+			ssd->fla[i] = ssd_area->fla[i];
+	}
+out:
+	spin_unlock_irqrestore(&chsc_page_lock, flags);
+	return ret;
+}
+
+/**
+ * chsc_ssqd() - store subchannel QDIO data (SSQD)
+ * @schid: id of the subchannel on which SSQD is performed
+ * @ssqd: request and response block for SSQD
+ *
+ * Returns 0 on success.
+ */
+int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd)
+{
+	memset(ssqd, 0, sizeof(*ssqd));
+	ssqd->request.length = 0x0010;
+	ssqd->request.code = 0x0024;
+	ssqd->first_sch = schid.sch_no;
+	ssqd->last_sch = schid.sch_no;
+	ssqd->ssid = schid.ssid;
+
+	if (chsc(ssqd))
+		return -EIO;
+
+	return chsc_error_from_response(ssqd->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_ssqd);
+
+/**
+ * chsc_sadc() - set adapter device controls (SADC)
+ * @schid: id of the subchannel on which SADC is performed
+ * @scssc: request and response block for SADC
+ * @summary_indicator_addr: summary indicator address
+ * @subchannel_indicator_addr: subchannel indicator address
+ *
+ * Returns 0 on success.
+ */
+int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
+	      u64 summary_indicator_addr, u64 subchannel_indicator_addr)
+{
+	memset(scssc, 0, sizeof(*scssc));
+	scssc->request.length = 0x0fe0;
+	scssc->request.code = 0x0021;
+	scssc->operation_code = 0;
+
+	scssc->summary_indicator_addr = summary_indicator_addr;
+	scssc->subchannel_indicator_addr = subchannel_indicator_addr;
+
+	scssc->ks = PAGE_DEFAULT_KEY >> 4;
+	scssc->kc = PAGE_DEFAULT_KEY >> 4;
+	scssc->isc = QDIO_AIRQ_ISC;
+	scssc->schid = schid;
+
+	/* enable the time delay disablement facility */
+	if (css_general_characteristics.aif_tdd)
+		scssc->word_with_d_bit = 0x10000000;
+
+	if (chsc(scssc))
+		return -EIO;
+
+	return chsc_error_from_response(scssc->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_sadc);
+
+static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
+{
+	spin_lock_irq(sch->lock);
+	if (sch->driver && sch->driver->chp_event)
+		if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
+			goto out_unreg;
+	spin_unlock_irq(sch->lock);
+	return 0;
+
+out_unreg:
+	sch->lpm = 0;
+	spin_unlock_irq(sch->lock);
+	css_schedule_eval(sch->schid);
+	return 0;
+}
+
+void chsc_chp_offline(struct chp_id chpid)
+{
+	struct channel_path *chp = chpid_to_chp(chpid);
+	struct chp_link link;
+	char dbf_txt[15];
+
+	sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
+	CIO_TRACE_EVENT(2, dbf_txt);
+
+	if (chp_get_status(chpid) <= 0)
+		return;
+	memset(&link, 0, sizeof(struct chp_link));
+	link.chpid = chpid;
+	/* Wait until previous actions have settled. */
+	css_wait_for_slow_path();
+
+	mutex_lock(&chp->lock);
+	chp_update_desc(chp);
+	mutex_unlock(&chp->lock);
+
+	for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
+}
+
+static int __s390_process_res_acc(struct subchannel *sch, void *data)
+{
+	spin_lock_irq(sch->lock);
+	if (sch->driver && sch->driver->chp_event)
+		sch->driver->chp_event(sch, data, CHP_ONLINE);
+	spin_unlock_irq(sch->lock);
+
+	return 0;
+}
+
+static void s390_process_res_acc(struct chp_link *link)
+{
+	char dbf_txt[15];
+
+	sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
+		link->chpid.id);
+	CIO_TRACE_EVENT( 2, dbf_txt);
+	if (link->fla != 0) {
+		sprintf(dbf_txt, "fla%x", link->fla);
+		CIO_TRACE_EVENT( 2, dbf_txt);
+	}
+	/* Wait until previous actions have settled. */
+	css_wait_for_slow_path();
+	/*
+	 * I/O resources may have become accessible.
+	 * Scan through all subchannels that may be concerned and
+	 * do a validation on those.
+	 * The more information we have (info), the less scanning
+	 * will we have to do.
+	 */
+	for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
+	css_schedule_reprobe();
+}
+
+struct chsc_sei_nt0_area {
+	u8  flags;
+	u8  vf;				/* validity flags */
+	u8  rs;				/* reporting source */
+	u8  cc;				/* content code */
+	u16 fla;			/* full link address */
+	u16 rsid;			/* reporting source id */
+	u32 reserved1;
+	u32 reserved2;
+	/* ccdf has to be big enough for a link-incident record */
+	u8  ccdf[PAGE_SIZE - 24 - 16];	/* content-code dependent field */
+} __packed;
+
+struct chsc_sei_nt2_area {
+	u8  flags;			/* p and v bit */
+	u8  reserved1;
+	u8  reserved2;
+	u8  cc;				/* content code */
+	u32 reserved3[13];
+	u8  ccdf[PAGE_SIZE - 24 - 56];	/* content-code dependent field */
+} __packed;
+
+#define CHSC_SEI_NT0	(1ULL << 63)
+#define CHSC_SEI_NT2	(1ULL << 61)
+
+struct chsc_sei {
+	struct chsc_header request;
+	u32 reserved1;
+	u64 ntsm;			/* notification type mask */
+	struct chsc_header response;
+	u32 :24;
+	u8 nt;
+	union {
+		struct chsc_sei_nt0_area nt0_area;
+		struct chsc_sei_nt2_area nt2_area;
+		u8 nt_area[PAGE_SIZE - 24];
+	} u;
+} __packed __aligned(PAGE_SIZE);
+
+/*
+ * Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands"
+ */
+
+#define ND_VALIDITY_VALID	0
+#define ND_VALIDITY_OUTDATED	1
+#define ND_VALIDITY_INVALID	2
+
+struct node_descriptor {
+	/* Flags. */
+	union {
+		struct {
+			u32 validity:3;
+			u32 reserved:5;
+		} __packed;
+		u8 byte0;
+	} __packed;
+
+	/* Node parameters. */
+	u32 params:24;
+
+	/* Node ID. */
+	char type[6];
+	char model[3];
+	char manufacturer[3];
+	char plant[2];
+	char seq[12];
+	u16 tag;
+} __packed;
+
+/*
+ * Link Incident Record as defined in SA22-7202, "ESCON I/O Interface"
+ */
+
+#define LIR_IQ_CLASS_INFO		0
+#define LIR_IQ_CLASS_DEGRADED		1
+#define LIR_IQ_CLASS_NOT_OPERATIONAL	2
+
+struct lir {
+	struct {
+		u32 null:1;
+		u32 reserved:3;
+		u32 class:2;
+		u32 reserved2:2;
+	} __packed iq;
+	u32 ic:8;
+	u32 reserved:16;
+	struct node_descriptor incident_node;
+	struct node_descriptor attached_node;
+	u8 reserved2[32];
+} __packed;
+
+#define PARAMS_LEN	10	/* PARAMS=xx,xxxxxx */
+#define NODEID_LEN	35	/* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
+
+/* Copy EBCIDC text, convert to ASCII and optionally add delimiter. */
+static char *store_ebcdic(char *dest, const char *src, unsigned long len,
+			  char delim)
+{
+	memcpy(dest, src, len);
+	EBCASC(dest, len);
+
+	if (delim)
+		dest[len++] = delim;
+
+	return dest + len;
+}
+
+/* Format node ID and parameters for output in LIR log message. */
+static void format_node_data(char *params, char *id, struct node_descriptor *nd)
+{
+	memset(params, 0, PARAMS_LEN);
+	memset(id, 0, NODEID_LEN);
+
+	if (nd->validity != ND_VALIDITY_VALID) {
+		strncpy(params, "n/a", PARAMS_LEN - 1);
+		strncpy(id, "n/a", NODEID_LEN - 1);
+		return;
+	}
+
+	/* PARAMS=xx,xxxxxx */
+	snprintf(params, PARAMS_LEN, "%02x,%06x", nd->byte0, nd->params);
+	/* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
+	id = store_ebcdic(id, nd->type, sizeof(nd->type), '/');
+	id = store_ebcdic(id, nd->model, sizeof(nd->model), ',');
+	id = store_ebcdic(id, nd->manufacturer, sizeof(nd->manufacturer), '.');
+	id = store_ebcdic(id, nd->plant, sizeof(nd->plant), 0);
+	id = store_ebcdic(id, nd->seq, sizeof(nd->seq), ',');
+	sprintf(id, "%04X", nd->tag);
+}
+
+static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
+{
+	struct lir *lir = (struct lir *) &sei_area->ccdf;
+	char iuparams[PARAMS_LEN], iunodeid[NODEID_LEN], auparams[PARAMS_LEN],
+	     aunodeid[NODEID_LEN];
+
+	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x, iq=%02x)\n",
+		      sei_area->rs, sei_area->rsid, sei_area->ccdf[0]);
+
+	/* Ignore NULL Link Incident Records. */
+	if (lir->iq.null)
+		return;
+
+	/* Inform user that a link requires maintenance actions because it has
+	 * become degraded or not operational. Note that this log message is
+	 * the primary intention behind a Link Incident Record. */
+
+	format_node_data(iuparams, iunodeid, &lir->incident_node);
+	format_node_data(auparams, aunodeid, &lir->attached_node);
+
+	switch (lir->iq.class) {
+	case LIR_IQ_CLASS_DEGRADED:
+		pr_warn("Link degraded: RS=%02x RSID=%04x IC=%02x "
+			"IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
+			sei_area->rs, sei_area->rsid, lir->ic, iuparams,
+			iunodeid, auparams, aunodeid);
+		break;
+	case LIR_IQ_CLASS_NOT_OPERATIONAL:
+		pr_err("Link stopped: RS=%02x RSID=%04x IC=%02x "
+		       "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
+		       sei_area->rs, sei_area->rsid, lir->ic, iuparams,
+		       iunodeid, auparams, aunodeid);
+		break;
+	default:
+		break;
+	}
+}
+
+static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
+{
+	struct channel_path *chp;
+	struct chp_link link;
+	struct chp_id chpid;
+	int status;
+
+	CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
+		      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
+	if (sei_area->rs != 4)
+		return;
+	chp_id_init(&chpid);
+	chpid.id = sei_area->rsid;
+	/* allocate a new channel path structure, if needed */
+	status = chp_get_status(chpid);
+	if (!status)
+		return;
+
+	if (status < 0) {
+		chp_new(chpid);
+	} else {
+		chp = chpid_to_chp(chpid);
+		mutex_lock(&chp->lock);
+		chp_update_desc(chp);
+		mutex_unlock(&chp->lock);
+	}
+	memset(&link, 0, sizeof(struct chp_link));
+	link.chpid = chpid;
+	if ((sei_area->vf & 0xc0) != 0) {
+		link.fla = sei_area->fla;
+		if ((sei_area->vf & 0xc0) == 0xc0)
+			/* full link address */
+			link.fla_mask = 0xffff;
+		else
+			/* link address */
+			link.fla_mask = 0xff00;
+	}
+	s390_process_res_acc(&link);
+}
+
+static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
+{
+	struct channel_path *chp;
+	struct chp_id chpid;
+	u8 *data;
+	int num;
+
+	CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
+	if (sei_area->rs != 0)
+		return;
+	data = sei_area->ccdf;
+	chp_id_init(&chpid);
+	for (num = 0; num <= __MAX_CHPID; num++) {
+		if (!chp_test_bit(data, num))
+			continue;
+		chpid.id = num;
+
+		CIO_CRW_EVENT(4, "Update information for channel path "
+			      "%x.%02x\n", chpid.cssid, chpid.id);
+		chp = chpid_to_chp(chpid);
+		if (!chp) {
+			chp_new(chpid);
+			continue;
+		}
+		mutex_lock(&chp->lock);
+		chp_update_desc(chp);
+		mutex_unlock(&chp->lock);
+	}
+}
+
+struct chp_config_data {
+	u8 map[32];
+	u8 op;
+	u8 pc;
+};
+
+static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
+{
+	struct chp_config_data *data;
+	struct chp_id chpid;
+	int num;
+	char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
+
+	CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
+	if (sei_area->rs != 0)
+		return;
+	data = (struct chp_config_data *) &(sei_area->ccdf);
+	chp_id_init(&chpid);
+	for (num = 0; num <= __MAX_CHPID; num++) {
+		if (!chp_test_bit(data->map, num))
+			continue;
+		chpid.id = num;
+		pr_notice("Processing %s for channel path %x.%02x\n",
+			  events[data->op], chpid.cssid, chpid.id);
+		switch (data->op) {
+		case 0:
+			chp_cfg_schedule(chpid, 1);
+			break;
+		case 1:
+			chp_cfg_schedule(chpid, 0);
+			break;
+		case 2:
+			chp_cfg_cancel_deconfigure(chpid);
+			break;
+		}
+	}
+}
+
+static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
+{
+	int ret;
+
+	CIO_CRW_EVENT(4, "chsc: scm change notification\n");
+	if (sei_area->rs != 7)
+		return;
+
+	ret = scm_update_information();
+	if (ret)
+		CIO_CRW_EVENT(0, "chsc: updating change notification"
+			      " failed (rc=%d).\n", ret);
+}
+
+static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
+{
+	int ret;
+
+	CIO_CRW_EVENT(4, "chsc: scm available information\n");
+	if (sei_area->rs != 7)
+		return;
+
+	ret = scm_process_availability_information();
+	if (ret)
+		CIO_CRW_EVENT(0, "chsc: process availability information"
+			      " failed (rc=%d).\n", ret);
+}
+
+static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
+{
+	switch (sei_area->cc) {
+	case 1:
+		zpci_event_error(sei_area->ccdf);
+		break;
+	case 2:
+		zpci_event_availability(sei_area->ccdf);
+		break;
+	default:
+		CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
+			      sei_area->cc);
+		break;
+	}
+}
+
+static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
+{
+	/* which kind of information was stored? */
+	switch (sei_area->cc) {
+	case 1: /* link incident*/
+		chsc_process_sei_link_incident(sei_area);
+		break;
+	case 2: /* i/o resource accessibility */
+		chsc_process_sei_res_acc(sei_area);
+		break;
+	case 7: /* channel-path-availability information */
+		chsc_process_sei_chp_avail(sei_area);
+		break;
+	case 8: /* channel-path-configuration notification */
+		chsc_process_sei_chp_config(sei_area);
+		break;
+	case 12: /* scm change notification */
+		chsc_process_sei_scm_change(sei_area);
+		break;
+	case 14: /* scm available notification */
+		chsc_process_sei_scm_avail(sei_area);
+		break;
+	default: /* other stuff */
+		CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
+			      sei_area->cc);
+		break;
+	}
+
+	/* Check if we might have lost some information. */
+	if (sei_area->flags & 0x40) {
+		CIO_CRW_EVENT(2, "chsc: event overflow\n");
+		css_schedule_eval_all();
+	}
+}
+
+static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
+{
+	static int ntsm_unsupported;
+
+	while (true) {
+		memset(sei, 0, sizeof(*sei));
+		sei->request.length = 0x0010;
+		sei->request.code = 0x000e;
+		if (!ntsm_unsupported)
+			sei->ntsm = ntsm;
+
+		if (chsc(sei))
+			break;
+
+		if (sei->response.code != 0x0001) {
+			CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
+				      sei->response.code, sei->ntsm);
+
+			if (sei->response.code == 3 && sei->ntsm) {
+				/* Fallback for old firmware. */
+				ntsm_unsupported = 1;
+				continue;
+			}
+			break;
+		}
+
+		CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
+		switch (sei->nt) {
+		case 0:
+			chsc_process_sei_nt0(&sei->u.nt0_area);
+			break;
+		case 2:
+			chsc_process_sei_nt2(&sei->u.nt2_area);
+			break;
+		default:
+			CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
+			break;
+		}
+
+		if (!(sei->u.nt0_area.flags & 0x80))
+			break;
+	}
+}
+
+/*
+ * Handle channel subsystem related CRWs.
+ * Use store event information to find out what's going on.
+ *
+ * Note: Access to sei_page is serialized through machine check handler
+ * thread, so no need for locking.
+ */
+static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
+{
+	struct chsc_sei *sei = sei_page;
+
+	if (overflow) {
+		css_schedule_eval_all();
+		return;
+	}
+	CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
+		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
+		      crw0->erc, crw0->rsid);
+
+	CIO_TRACE_EVENT(2, "prcss");
+	chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
+}
+
+void chsc_chp_online(struct chp_id chpid)
+{
+	struct channel_path *chp = chpid_to_chp(chpid);
+	struct chp_link link;
+	char dbf_txt[15];
+
+	sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
+	CIO_TRACE_EVENT(2, dbf_txt);
+
+	if (chp_get_status(chpid) != 0) {
+		memset(&link, 0, sizeof(struct chp_link));
+		link.chpid = chpid;
+		/* Wait until previous actions have settled. */
+		css_wait_for_slow_path();
+
+		mutex_lock(&chp->lock);
+		chp_update_desc(chp);
+		mutex_unlock(&chp->lock);
+
+		for_each_subchannel_staged(__s390_process_res_acc, NULL,
+					   &link);
+		css_schedule_reprobe();
+	}
+}
+
+static void __s390_subchannel_vary_chpid(struct subchannel *sch,
+					 struct chp_id chpid, int on)
+{
+	unsigned long flags;
+	struct chp_link link;
+
+	memset(&link, 0, sizeof(struct chp_link));
+	link.chpid = chpid;
+	spin_lock_irqsave(sch->lock, flags);
+	if (sch->driver && sch->driver->chp_event)
+		sch->driver->chp_event(sch, &link,
+				       on ? CHP_VARY_ON : CHP_VARY_OFF);
+	spin_unlock_irqrestore(sch->lock, flags);
+}
+
+static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
+{
+	struct chp_id *chpid = data;
+
+	__s390_subchannel_vary_chpid(sch, *chpid, 0);
+	return 0;
+}
+
+static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
+{
+	struct chp_id *chpid = data;
+
+	__s390_subchannel_vary_chpid(sch, *chpid, 1);
+	return 0;
+}
+
+/**
+ * chsc_chp_vary - propagate channel-path vary operation to subchannels
+ * @chpid: channl-path ID
+ * @on: non-zero for vary online, zero for vary offline
+ */
+int chsc_chp_vary(struct chp_id chpid, int on)
+{
+	struct channel_path *chp = chpid_to_chp(chpid);
+
+	/* Wait until previous actions have settled. */
+	css_wait_for_slow_path();
+	/*
+	 * Redo PathVerification on the devices the chpid connects to
+	 */
+	if (on) {
+		/* Try to update the channel path description. */
+		chp_update_desc(chp);
+		for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
+					   NULL, &chpid);
+		css_schedule_reprobe();
+	} else
+		for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
+					   NULL, &chpid);
+
+	return 0;
+}
+
+static void
+chsc_remove_cmg_attr(struct channel_subsystem *css)
+{
+	int i;
+
+	for (i = 0; i <= __MAX_CHPID; i++) {
+		if (!css->chps[i])
+			continue;
+		chp_remove_cmg_attr(css->chps[i]);
+	}
+}
+
+static int
+chsc_add_cmg_attr(struct channel_subsystem *css)
+{
+	int i, ret;
+
+	ret = 0;
+	for (i = 0; i <= __MAX_CHPID; i++) {
+		if (!css->chps[i])
+			continue;
+		ret = chp_add_cmg_attr(css->chps[i]);
+		if (ret)
+			goto cleanup;
+	}
+	return ret;
+cleanup:
+	for (--i; i >= 0; i--) {
+		if (!css->chps[i])
+			continue;
+		chp_remove_cmg_attr(css->chps[i]);
+	}
+	return ret;
+}
+
+int __chsc_do_secm(struct channel_subsystem *css, int enable)
+{
+	struct {
+		struct chsc_header request;
+		u32 operation_code : 2;
+		u32 : 30;
+		u32 key : 4;
+		u32 : 28;
+		u32 zeroes1;
+		u32 cub_addr1;
+		u32 zeroes2;
+		u32 cub_addr2;
+		u32 reserved[13];
+		struct chsc_header response;
+		u32 status : 8;
+		u32 : 4;
+		u32 fmt : 4;
+		u32 : 16;
+	} *secm_area;
+	unsigned long flags;
+	int ret, ccode;
+
+	spin_lock_irqsave(&chsc_page_lock, flags);
+	memset(chsc_page, 0, PAGE_SIZE);
+	secm_area = chsc_page;
+	secm_area->request.length = 0x0050;
+	secm_area->request.code = 0x0016;
+
+	secm_area->key = PAGE_DEFAULT_KEY >> 4;
+	secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
+	secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
+
+	secm_area->operation_code = enable ? 0 : 1;
+
+	ccode = chsc(secm_area);
+	if (ccode > 0) {
+		ret = (ccode == 3) ? -ENODEV : -EBUSY;
+		goto out;
+	}
+
+	switch (secm_area->response.code) {
+	case 0x0102:
+	case 0x0103:
+		ret = -EINVAL;
+		break;
+	default:
+		ret = chsc_error_from_response(secm_area->response.code);
+	}
+	if (ret != 0)
+		CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
+			      secm_area->response.code);
+out:
+	spin_unlock_irqrestore(&chsc_page_lock, flags);
+	return ret;
+}
+
+int
+chsc_secm(struct channel_subsystem *css, int enable)
+{
+	int ret;
+
+	if (enable && !css->cm_enabled) {
+		css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		if (!css->cub_addr1 || !css->cub_addr2) {
+			free_page((unsigned long)css->cub_addr1);
+			free_page((unsigned long)css->cub_addr2);
+			return -ENOMEM;
+		}
+	}
+	ret = __chsc_do_secm(css, enable);
+	if (!ret) {
+		css->cm_enabled = enable;
+		if (css->cm_enabled) {
+			ret = chsc_add_cmg_attr(css);
+			if (ret) {
+				__chsc_do_secm(css, 0);
+				css->cm_enabled = 0;
+			}
+		} else
+			chsc_remove_cmg_attr(css);
+	}
+	if (!css->cm_enabled) {
+		free_page((unsigned long)css->cub_addr1);
+		free_page((unsigned long)css->cub_addr2);
+	}
+	return ret;
+}
+
+int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
+				     int c, int m, void *page)
+{
+	struct chsc_scpd *scpd_area;
+	int ccode, ret;
+
+	if ((rfmt == 1 || rfmt == 0) && c == 1 &&
+	    !css_general_characteristics.fcs)
+		return -EINVAL;
+	if ((rfmt == 2) && !css_general_characteristics.cib)
+		return -EINVAL;
+	if ((rfmt == 3) && !css_general_characteristics.util_str)
+		return -EINVAL;
+
+	memset(page, 0, PAGE_SIZE);
+	scpd_area = page;
+	scpd_area->request.length = 0x0010;
+	scpd_area->request.code = 0x0002;
+	scpd_area->cssid = chpid.cssid;
+	scpd_area->first_chpid = chpid.id;
+	scpd_area->last_chpid = chpid.id;
+	scpd_area->m = m;
+	scpd_area->c = c;
+	scpd_area->fmt = fmt;
+	scpd_area->rfmt = rfmt;
+
+	ccode = chsc(scpd_area);
+	if (ccode > 0)
+		return (ccode == 3) ? -ENODEV : -EBUSY;
+
+	ret = chsc_error_from_response(scpd_area->response.code);
+	if (ret)
+		CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
+			      scpd_area->response.code);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
+
+#define chsc_det_chp_desc(FMT, c)					\
+int chsc_determine_fmt##FMT##_channel_path_desc(			\
+	struct chp_id chpid, struct channel_path_desc_fmt##FMT *desc)	\
+{									\
+	struct chsc_scpd *scpd_area;					\
+	unsigned long flags;						\
+	int ret;							\
+									\
+	spin_lock_irqsave(&chsc_page_lock, flags);			\
+	scpd_area = chsc_page;						\
+	ret = chsc_determine_channel_path_desc(chpid, 0, FMT, c, 0,	\
+					       scpd_area);		\
+	if (ret)							\
+		goto out;						\
+									\
+	memcpy(desc, scpd_area->data, sizeof(*desc));			\
+out:									\
+	spin_unlock_irqrestore(&chsc_page_lock, flags);			\
+	return ret;							\
+}
+
+chsc_det_chp_desc(0, 0)
+chsc_det_chp_desc(1, 1)
+chsc_det_chp_desc(3, 0)
+
+static void
+chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
+			  struct cmg_chars *chars)
+{
+	int i, mask;
+
+	for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
+		mask = 0x80 >> (i + 3);
+		if (cmcv & mask)
+			chp->cmg_chars.values[i] = chars->values[i];
+		else
+			chp->cmg_chars.values[i] = 0;
+	}
+}
+
+int chsc_get_channel_measurement_chars(struct channel_path *chp)
+{
+	unsigned long flags;
+	int ccode, ret;
+
+	struct {
+		struct chsc_header request;
+		u32 : 24;
+		u32 first_chpid : 8;
+		u32 : 24;
+		u32 last_chpid : 8;
+		u32 zeroes1;
+		struct chsc_header response;
+		u32 zeroes2;
+		u32 not_valid : 1;
+		u32 shared : 1;
+		u32 : 22;
+		u32 chpid : 8;
+		u32 cmcv : 5;
+		u32 : 11;
+		u32 cmgq : 8;
+		u32 cmg : 8;
+		u32 zeroes3;
+		u32 data[NR_MEASUREMENT_CHARS];
+	} *scmc_area;
+
+	chp->shared = -1;
+	chp->cmg = -1;
+
+	if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
+		return -EINVAL;
+
+	spin_lock_irqsave(&chsc_page_lock, flags);
+	memset(chsc_page, 0, PAGE_SIZE);
+	scmc_area = chsc_page;
+	scmc_area->request.length = 0x0010;
+	scmc_area->request.code = 0x0022;
+	scmc_area->first_chpid = chp->chpid.id;
+	scmc_area->last_chpid = chp->chpid.id;
+
+	ccode = chsc(scmc_area);
+	if (ccode > 0) {
+		ret = (ccode == 3) ? -ENODEV : -EBUSY;
+		goto out;
+	}
+
+	ret = chsc_error_from_response(scmc_area->response.code);
+	if (ret) {
+		CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
+			      scmc_area->response.code);
+		goto out;
+	}
+	if (scmc_area->not_valid)
+		goto out;
+
+	chp->cmg = scmc_area->cmg;
+	chp->shared = scmc_area->shared;
+	if (chp->cmg != 2 && chp->cmg != 3) {
+		/* No cmg-dependent data. */
+		goto out;
+	}
+	chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
+				  (struct cmg_chars *) &scmc_area->data);
+out:
+	spin_unlock_irqrestore(&chsc_page_lock, flags);
+	return ret;
+}
+
+int __init chsc_init(void)
+{
+	int ret;
+
+	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sei_page || !chsc_page) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+	ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
+	if (ret)
+		goto out_err;
+	return ret;
+out_err:
+	free_page((unsigned long)chsc_page);
+	free_page((unsigned long)sei_page);
+	return ret;
+}
+
+void __init chsc_init_cleanup(void)
+{
+	crw_unregister_handler(CRW_RSC_CSS);
+	free_page((unsigned long)chsc_page);
+	free_page((unsigned long)sei_page);
+}
+
+int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code)
+{
+	int ret;
+
+	sda_area->request.length = 0x0400;
+	sda_area->request.code = 0x0031;
+	sda_area->operation_code = operation_code;
+
+	ret = chsc(sda_area);
+	if (ret > 0) {
+		ret = (ret == 3) ? -ENODEV : -EBUSY;
+		goto out;
+	}
+
+	switch (sda_area->response.code) {
+	case 0x0101:
+		ret = -EOPNOTSUPP;
+		break;
+	default:
+		ret = chsc_error_from_response(sda_area->response.code);
+	}
+out:
+	return ret;
+}
+
+int chsc_enable_facility(int operation_code)
+{
+	struct chsc_sda_area *sda_area;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&chsc_page_lock, flags);
+	memset(chsc_page, 0, PAGE_SIZE);
+	sda_area = chsc_page;
+
+	ret = __chsc_enable_facility(sda_area, operation_code);
+	if (ret != 0)
+		CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
+			      operation_code, sda_area->response.code);
+
+	spin_unlock_irqrestore(&chsc_page_lock, flags);
+	return ret;
+}
+
+int __init chsc_get_cssid(int idx)
+{
+	struct {
+		struct chsc_header request;
+		u8 atype;
+		u32 : 24;
+		u32 reserved1[6];
+		struct chsc_header response;
+		u32 reserved2[3];
+		struct {
+			u8 cssid;
+			u32 : 24;
+		} list[0];
+	} *sdcal_area;
+	int ret;
+
+	spin_lock_irq(&chsc_page_lock);
+	memset(chsc_page, 0, PAGE_SIZE);
+	sdcal_area = chsc_page;
+	sdcal_area->request.length = 0x0020;
+	sdcal_area->request.code = 0x0034;
+	sdcal_area->atype = 4;
+
+	ret = chsc(sdcal_area);
+	if (ret) {
+		ret = (ret == 3) ? -ENODEV : -EBUSY;
+		goto exit;
+	}
+
+	ret = chsc_error_from_response(sdcal_area->response.code);
+	if (ret) {
+		CIO_CRW_EVENT(2, "chsc: sdcal failed (rc=%04x)\n",
+			      sdcal_area->response.code);
+		goto exit;
+	}
+
+	if ((addr_t) &sdcal_area->list[idx] <
+	    (addr_t) &sdcal_area->response + sdcal_area->response.length)
+		ret = sdcal_area->list[idx].cssid;
+	else
+		ret = -ENODEV;
+exit:
+	spin_unlock_irq(&chsc_page_lock);
+	return ret;
+}
+
+struct css_general_char css_general_characteristics;
+struct css_chsc_char css_chsc_characteristics;
+
+int __init
+chsc_determine_css_characteristics(void)
+{
+	unsigned long flags;
+	int result;
+	struct {
+		struct chsc_header request;
+		u32 reserved1;
+		u32 reserved2;
+		u32 reserved3;
+		struct chsc_header response;
+		u32 reserved4;
+		u32 general_char[510];
+		u32 chsc_char[508];
+	} *scsc_area;
+
+	spin_lock_irqsave(&chsc_page_lock, flags);
+	memset(chsc_page, 0, PAGE_SIZE);
+	scsc_area = chsc_page;
+	scsc_area->request.length = 0x0010;
+	scsc_area->request.code = 0x0010;
+
+	result = chsc(scsc_area);
+	if (result) {
+		result = (result == 3) ? -ENODEV : -EBUSY;
+		goto exit;
+	}
+
+	result = chsc_error_from_response(scsc_area->response.code);
+	if (result == 0) {
+		memcpy(&css_general_characteristics, scsc_area->general_char,
+		       sizeof(css_general_characteristics));
+		memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
+		       sizeof(css_chsc_characteristics));
+	} else
+		CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
+			      scsc_area->response.code);
+exit:
+	spin_unlock_irqrestore(&chsc_page_lock, flags);
+	return result;
+}
+
+EXPORT_SYMBOL_GPL(css_general_characteristics);
+EXPORT_SYMBOL_GPL(css_chsc_characteristics);
+
+int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
+{
+	struct {
+		struct chsc_header request;
+		unsigned int rsvd0;
+		unsigned int op : 8;
+		unsigned int rsvd1 : 8;
+		unsigned int ctrl : 16;
+		unsigned int rsvd2[5];
+		struct chsc_header response;
+		unsigned int rsvd3[3];
+		u64 clock_delta;
+		unsigned int rsvd4[2];
+	} *rr;
+	int rc;
+
+	memset(page, 0, PAGE_SIZE);
+	rr = page;
+	rr->request.length = 0x0020;
+	rr->request.code = 0x0033;
+	rr->op = op;
+	rr->ctrl = ctrl;
+	rc = chsc(rr);
+	if (rc)
+		return -EIO;
+	rc = (rr->response.code == 0x0001) ? 0 : -EIO;
+	if (clock_delta)
+		*clock_delta = rr->clock_delta;
+	return rc;
+}
+
+int chsc_sstpi(void *page, void *result, size_t size)
+{
+	struct {
+		struct chsc_header request;
+		unsigned int rsvd0[3];
+		struct chsc_header response;
+		char data[];
+	} *rr;
+	int rc;
+
+	memset(page, 0, PAGE_SIZE);
+	rr = page;
+	rr->request.length = 0x0010;
+	rr->request.code = 0x0038;
+	rc = chsc(rr);
+	if (rc)
+		return -EIO;
+	memcpy(result, &rr->data, size);
+	return (rr->response.code == 0x0001) ? 0 : -EIO;
+}
+
+int chsc_siosl(struct subchannel_id schid)
+{
+	struct {
+		struct chsc_header request;
+		u32 word1;
+		struct subchannel_id sid;
+		u32 word3;
+		struct chsc_header response;
+		u32 word[11];
+	} *siosl_area;
+	unsigned long flags;
+	int ccode;
+	int rc;
+
+	spin_lock_irqsave(&chsc_page_lock, flags);
+	memset(chsc_page, 0, PAGE_SIZE);
+	siosl_area = chsc_page;
+	siosl_area->request.length = 0x0010;
+	siosl_area->request.code = 0x0046;
+	siosl_area->word1 = 0x80000000;
+	siosl_area->sid = schid;
+
+	ccode = chsc(siosl_area);
+	if (ccode > 0) {
+		if (ccode == 3)
+			rc = -ENODEV;
+		else
+			rc = -EBUSY;
+		CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
+			      schid.ssid, schid.sch_no, ccode);
+		goto out;
+	}
+	rc = chsc_error_from_response(siosl_area->response.code);
+	if (rc)
+		CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
+			      schid.ssid, schid.sch_no,
+			      siosl_area->response.code);
+	else
+		CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
+			      schid.ssid, schid.sch_no);
+out:
+	spin_unlock_irqrestore(&chsc_page_lock, flags);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(chsc_siosl);
+
+/**
+ * chsc_scm_info() - store SCM information (SSI)
+ * @scm_area: request and response block for SSI
+ * @token: continuation token
+ *
+ * Returns 0 on success.
+ */
+int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
+{
+	int ccode, ret;
+
+	memset(scm_area, 0, sizeof(*scm_area));
+	scm_area->request.length = 0x0020;
+	scm_area->request.code = 0x004C;
+	scm_area->reqtok = token;
+
+	ccode = chsc(scm_area);
+	if (ccode > 0) {
+		ret = (ccode == 3) ? -ENODEV : -EBUSY;
+		goto out;
+	}
+	ret = chsc_error_from_response(scm_area->response.code);
+	if (ret != 0)
+		CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
+			      scm_area->response.code);
+out:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(chsc_scm_info);
+
+/**
+ * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
+ * @schid:		id of the subchannel on which PNSO is performed
+ * @brinfo_area:	request and response block for the operation
+ * @resume_token:	resume token for multiblock response
+ * @cnc:		Boolean change-notification control
+ *
+ * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
+ *
+ * Returns 0 on success.
+ */
+int chsc_pnso_brinfo(struct subchannel_id schid,
+		struct chsc_pnso_area *brinfo_area,
+		struct chsc_brinfo_resume_token resume_token,
+		int cnc)
+{
+	memset(brinfo_area, 0, sizeof(*brinfo_area));
+	brinfo_area->request.length = 0x0030;
+	brinfo_area->request.code = 0x003d; /* network-subchannel operation */
+	brinfo_area->m	   = schid.m;
+	brinfo_area->ssid  = schid.ssid;
+	brinfo_area->sch   = schid.sch_no;
+	brinfo_area->cssid = schid.cssid;
+	brinfo_area->oc    = 0; /* Store-network-bridging-information list */
+	brinfo_area->resume_token = resume_token;
+	brinfo_area->n	   = (cnc != 0);
+	if (chsc(brinfo_area))
+		return -EIO;
+	return chsc_error_from_response(brinfo_area->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
new file mode 100644
index 0000000..78aba8d
--- /dev/null
+++ b/drivers/s390/cio/chsc.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_CHSC_H
+#define S390_CHSC_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <asm/css_chars.h>
+#include <asm/chpid.h>
+#include <asm/chsc.h>
+#include <asm/schid.h>
+#include <asm/qdio.h>
+
+#define CHSC_SDA_OC_MSS   0x2
+
+#define NR_MEASUREMENT_CHARS 5
+struct cmg_chars {
+	u32 values[NR_MEASUREMENT_CHARS];
+};
+
+#define NR_MEASUREMENT_ENTRIES 8
+struct cmg_entry {
+	u32 values[NR_MEASUREMENT_ENTRIES];
+};
+
+struct channel_path_desc_fmt1 {
+	u8 flags;
+	u8 lsn;
+	u8 desc;
+	u8 chpid;
+	u32:24;
+	u8 chpp;
+	u32 unused[2];
+	u16 chid;
+	u32:16;
+	u16 mdc;
+	u16:13;
+	u8 r:1;
+	u8 s:1;
+	u8 f:1;
+	u32 zeros[2];
+};
+
+struct channel_path_desc_fmt3 {
+	struct channel_path_desc_fmt1 fmt1_desc;
+	u8 util_str[64];
+};
+
+struct channel_path;
+
+struct css_chsc_char {
+	u64 res;
+	u64 : 20;
+	u32 secm : 1; /* bit 84 */
+	u32 : 1;
+	u32 scmc : 1; /* bit 86 */
+	u32 : 20;
+	u32 scssc : 1;  /* bit 107 */
+	u32 scsscf : 1; /* bit 108 */
+	u32:7;
+	u32 pnso:1; /* bit 116 */
+	u32:11;
+} __packed;
+
+extern struct css_chsc_char css_chsc_characteristics;
+
+struct chsc_ssd_info {
+	u8 path_mask;
+	u8 fla_valid_mask;
+	struct chp_id chpid[8];
+	u16 fla[8];
+};
+
+struct chsc_ssqd_area {
+	struct chsc_header request;
+	u16:10;
+	u8 ssid:2;
+	u8 fmt:4;
+	u16 first_sch;
+	u16:16;
+	u16 last_sch;
+	u32:32;
+	struct chsc_header response;
+	u32:32;
+	struct qdio_ssqd_desc qdio_ssqd;
+} __packed __aligned(PAGE_SIZE);
+
+struct chsc_scssc_area {
+	struct chsc_header request;
+	u16 operation_code;
+	u16:16;
+	u32:32;
+	u32:32;
+	u64 summary_indicator_addr;
+	u64 subchannel_indicator_addr;
+	u32 ks:4;
+	u32 kc:4;
+	u32:21;
+	u32 isc:3;
+	u32 word_with_d_bit;
+	u32:32;
+	struct subchannel_id schid;
+	u32 reserved[1004];
+	struct chsc_header response;
+	u32:32;
+} __packed __aligned(PAGE_SIZE);
+
+struct chsc_scpd {
+	struct chsc_header request;
+	u32:2;
+	u32 m:1;
+	u32 c:1;
+	u32 fmt:4;
+	u32 cssid:8;
+	u32:4;
+	u32 rfmt:4;
+	u32 first_chpid:8;
+	u32:24;
+	u32 last_chpid:8;
+	u32 zeroes1;
+	struct chsc_header response;
+	u32:32;
+	u8 data[0];
+} __packed __aligned(PAGE_SIZE);
+
+struct chsc_sda_area {
+	struct chsc_header request;
+	u8 :4;
+	u8 format:4;
+	u8 :8;
+	u16 operation_code;
+	u32 :32;
+	u32 :32;
+	u32 operation_data_area[252];
+	struct chsc_header response;
+	u32 :4;
+	u32 format2:4;
+	u32 :24;
+} __packed __aligned(PAGE_SIZE);
+
+extern int chsc_get_ssd_info(struct subchannel_id schid,
+			     struct chsc_ssd_info *ssd);
+extern int chsc_determine_css_characteristics(void);
+extern int chsc_init(void);
+extern void chsc_init_cleanup(void);
+
+int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code);
+extern int chsc_enable_facility(int);
+struct channel_subsystem;
+extern int chsc_secm(struct channel_subsystem *, int);
+int __chsc_do_secm(struct channel_subsystem *css, int enable);
+
+int chsc_chp_vary(struct chp_id chpid, int on);
+int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
+				     int c, int m, void *page);
+int chsc_determine_fmt0_channel_path_desc(struct chp_id chpid,
+					  struct channel_path_desc_fmt0 *desc);
+int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
+					  struct channel_path_desc_fmt1 *desc);
+int chsc_determine_fmt3_channel_path_desc(struct chp_id chpid,
+					  struct channel_path_desc_fmt3 *desc);
+void chsc_chp_online(struct chp_id chpid);
+void chsc_chp_offline(struct chp_id chpid);
+int chsc_get_channel_measurement_chars(struct channel_path *chp);
+int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
+int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
+	      u64 summary_indicator_addr, u64 subchannel_indicator_addr);
+int chsc_error_from_response(int response);
+
+int chsc_siosl(struct subchannel_id schid);
+
+/* Functions and definitions to query storage-class memory. */
+struct sale {
+	u64 sa;
+	u32 p:4;
+	u32 op_state:4;
+	u32 data_state:4;
+	u32 rank:4;
+	u32 r:1;
+	u32:7;
+	u32 rid:8;
+	u32:32;
+} __packed;
+
+struct chsc_scm_info {
+	struct chsc_header request;
+	u32:32;
+	u64 reqtok;
+	u32 reserved1[4];
+	struct chsc_header response;
+	u64:56;
+	u8 rq;
+	u32 mbc;
+	u64 msa;
+	u16 is;
+	u16 mmc;
+	u32 mci;
+	u64 nr_scm_ini;
+	u64 nr_scm_unini;
+	u32 reserved2[10];
+	u64 restok;
+	struct sale scmal[248];
+} __packed __aligned(PAGE_SIZE);
+
+int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
+
+struct chsc_brinfo_resume_token {
+	u64 t1;
+	u64 t2;
+} __packed;
+
+struct chsc_brinfo_naihdr {
+	struct chsc_brinfo_resume_token resume_token;
+	u32:32;
+	u32 instance;
+	u32:24;
+	u8 naids;
+	u32 reserved[3];
+} __packed;
+
+struct chsc_pnso_area {
+	struct chsc_header request;
+	u8:2;
+	u8 m:1;
+	u8:5;
+	u8:2;
+	u8 ssid:2;
+	u8 fmt:4;
+	u16 sch;
+	u8:8;
+	u8 cssid;
+	u16:16;
+	u8 oc;
+	u32:24;
+	struct chsc_brinfo_resume_token resume_token;
+	u32 n:1;
+	u32:31;
+	u32 reserved[3];
+	struct chsc_header response;
+	u32:32;
+	struct chsc_brinfo_naihdr naihdr;
+	union {
+		struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0];
+		struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0];
+		struct qdio_brinfo_entry_l2	 l2[0];
+	} entries;
+} __packed __aligned(PAGE_SIZE);
+
+int chsc_pnso_brinfo(struct subchannel_id schid,
+		struct chsc_pnso_area *brinfo_area,
+		struct chsc_brinfo_resume_token resume_token,
+		int cnc);
+
+int __init chsc_get_cssid(int idx);
+
+#ifdef CONFIG_SCM_BUS
+int scm_update_information(void);
+int scm_process_availability_information(void);
+#else /* CONFIG_SCM_BUS */
+static inline int scm_update_information(void) { return 0; }
+static inline int scm_process_availability_information(void) { return 0; }
+#endif /* CONFIG_SCM_BUS */
+
+
+#endif
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
new file mode 100644
index 0000000..8d9f366
--- /dev/null
+++ b/drivers/s390/cio/chsc_sch.c
@@ -0,0 +1,1012 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for s390 chsc subchannels
+ *
+ * Copyright IBM Corp. 2008, 2011
+ *
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/cio.h>
+#include <asm/chsc.h>
+#include <asm/isc.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "chsc_sch.h"
+#include "ioasm.h"
+
+static debug_info_t *chsc_debug_msg_id;
+static debug_info_t *chsc_debug_log_id;
+
+static struct chsc_request *on_close_request;
+static struct chsc_async_area *on_close_chsc_area;
+static DEFINE_MUTEX(on_close_mutex);
+
+#define CHSC_MSG(imp, args...) do {					\
+		debug_sprintf_event(chsc_debug_msg_id, imp , ##args);	\
+	} while (0)
+
+#define CHSC_LOG(imp, txt) do {					\
+		debug_text_event(chsc_debug_log_id, imp , txt);	\
+	} while (0)
+
+static void CHSC_LOG_HEX(int level, void *data, int length)
+{
+	debug_event(chsc_debug_log_id, level, data, length);
+}
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("driver for s390 chsc subchannels");
+MODULE_LICENSE("GPL");
+
+static void chsc_subchannel_irq(struct subchannel *sch)
+{
+	struct chsc_private *private = dev_get_drvdata(&sch->dev);
+	struct chsc_request *request = private->request;
+	struct irb *irb = this_cpu_ptr(&cio_irb);
+
+	CHSC_LOG(4, "irb");
+	CHSC_LOG_HEX(4, irb, sizeof(*irb));
+	inc_irq_stat(IRQIO_CSC);
+
+	/* Copy irb to provided request and set done. */
+	if (!request) {
+		CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
+			 sch->schid.ssid, sch->schid.sch_no);
+		return;
+	}
+	private->request = NULL;
+	memcpy(&request->irb, irb, sizeof(*irb));
+	cio_update_schib(sch);
+	complete(&request->completion);
+	put_device(&sch->dev);
+}
+
+static int chsc_subchannel_probe(struct subchannel *sch)
+{
+	struct chsc_private *private;
+	int ret;
+
+	CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
+		 sch->schid.ssid, sch->schid.sch_no);
+	sch->isc = CHSC_SCH_ISC;
+	private = kzalloc(sizeof(*private), GFP_KERNEL);
+	if (!private)
+		return -ENOMEM;
+	dev_set_drvdata(&sch->dev, private);
+	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+	if (ret) {
+		CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
+			 sch->schid.ssid, sch->schid.sch_no, ret);
+		dev_set_drvdata(&sch->dev, NULL);
+		kfree(private);
+	} else {
+		if (dev_get_uevent_suppress(&sch->dev)) {
+			dev_set_uevent_suppress(&sch->dev, 0);
+			kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+		}
+	}
+	return ret;
+}
+
+static int chsc_subchannel_remove(struct subchannel *sch)
+{
+	struct chsc_private *private;
+
+	cio_disable_subchannel(sch);
+	private = dev_get_drvdata(&sch->dev);
+	dev_set_drvdata(&sch->dev, NULL);
+	if (private->request) {
+		complete(&private->request->completion);
+		put_device(&sch->dev);
+	}
+	kfree(private);
+	return 0;
+}
+
+static void chsc_subchannel_shutdown(struct subchannel *sch)
+{
+	cio_disable_subchannel(sch);
+}
+
+static int chsc_subchannel_prepare(struct subchannel *sch)
+{
+	int cc;
+	struct schib schib;
+	/*
+	 * Don't allow suspend while the subchannel is not idle
+	 * since we don't have a way to clear the subchannel and
+	 * cannot disable it with a request running.
+	 */
+	cc = stsch(sch->schid, &schib);
+	if (!cc && scsw_stctl(&schib.scsw))
+		return -EAGAIN;
+	return 0;
+}
+
+static int chsc_subchannel_freeze(struct subchannel *sch)
+{
+	return cio_disable_subchannel(sch);
+}
+
+static int chsc_subchannel_restore(struct subchannel *sch)
+{
+	return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+}
+
+static struct css_device_id chsc_subchannel_ids[] = {
+	{ .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
+	{ /* end of list */ },
+};
+MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
+
+static struct css_driver chsc_subchannel_driver = {
+	.drv = {
+		.owner = THIS_MODULE,
+		.name = "chsc_subchannel",
+	},
+	.subchannel_type = chsc_subchannel_ids,
+	.irq = chsc_subchannel_irq,
+	.probe = chsc_subchannel_probe,
+	.remove = chsc_subchannel_remove,
+	.shutdown = chsc_subchannel_shutdown,
+	.prepare = chsc_subchannel_prepare,
+	.freeze = chsc_subchannel_freeze,
+	.thaw = chsc_subchannel_restore,
+	.restore = chsc_subchannel_restore,
+};
+
+static int __init chsc_init_dbfs(void)
+{
+	chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long));
+	if (!chsc_debug_msg_id)
+		goto out;
+	debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
+	debug_set_level(chsc_debug_msg_id, 2);
+	chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
+	if (!chsc_debug_log_id)
+		goto out;
+	debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
+	debug_set_level(chsc_debug_log_id, 2);
+	return 0;
+out:
+	debug_unregister(chsc_debug_msg_id);
+	return -ENOMEM;
+}
+
+static void chsc_remove_dbfs(void)
+{
+	debug_unregister(chsc_debug_log_id);
+	debug_unregister(chsc_debug_msg_id);
+}
+
+static int __init chsc_init_sch_driver(void)
+{
+	return css_driver_register(&chsc_subchannel_driver);
+}
+
+static void chsc_cleanup_sch_driver(void)
+{
+	css_driver_unregister(&chsc_subchannel_driver);
+}
+
+static DEFINE_SPINLOCK(chsc_lock);
+
+static int chsc_subchannel_match_next_free(struct device *dev, void *data)
+{
+	struct subchannel *sch = to_subchannel(dev);
+
+	return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
+}
+
+static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
+{
+	struct device *dev;
+
+	dev = driver_find_device(&chsc_subchannel_driver.drv,
+				 sch ? &sch->dev : NULL, NULL,
+				 chsc_subchannel_match_next_free);
+	return dev ? to_subchannel(dev) : NULL;
+}
+
+/**
+ * chsc_async() - try to start a chsc request asynchronously
+ * @chsc_area: request to be started
+ * @request: request structure to associate
+ *
+ * Tries to start a chsc request on one of the existing chsc subchannels.
+ * Returns:
+ *  %0 if the request was performed synchronously
+ *  %-EINPROGRESS if the request was successfully started
+ *  %-EBUSY if all chsc subchannels are busy
+ *  %-ENODEV if no chsc subchannels are available
+ * Context:
+ *  interrupts disabled, chsc_lock held
+ */
+static int chsc_async(struct chsc_async_area *chsc_area,
+		      struct chsc_request *request)
+{
+	int cc;
+	struct chsc_private *private;
+	struct subchannel *sch = NULL;
+	int ret = -ENODEV;
+	char dbf[10];
+
+	chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
+	while ((sch = chsc_get_next_subchannel(sch))) {
+		spin_lock(sch->lock);
+		private = dev_get_drvdata(&sch->dev);
+		if (private->request) {
+			spin_unlock(sch->lock);
+			ret = -EBUSY;
+			continue;
+		}
+		chsc_area->header.sid = sch->schid;
+		CHSC_LOG(2, "schid");
+		CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
+		cc = chsc(chsc_area);
+		snprintf(dbf, sizeof(dbf), "cc:%d", cc);
+		CHSC_LOG(2, dbf);
+		switch (cc) {
+		case 0:
+			ret = 0;
+			break;
+		case 1:
+			sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
+			ret = -EINPROGRESS;
+			private->request = request;
+			break;
+		case 2:
+			ret = -EBUSY;
+			break;
+		default:
+			ret = -ENODEV;
+		}
+		spin_unlock(sch->lock);
+		CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
+			 sch->schid.ssid, sch->schid.sch_no, cc);
+		if (ret == -EINPROGRESS)
+			return -EINPROGRESS;
+		put_device(&sch->dev);
+		if (ret == 0)
+			return 0;
+	}
+	return ret;
+}
+
+static void chsc_log_command(void *chsc_area)
+{
+	char dbf[10];
+
+	snprintf(dbf, sizeof(dbf), "CHSC:%x", ((uint16_t *)chsc_area)[1]);
+	CHSC_LOG(0, dbf);
+	CHSC_LOG_HEX(0, chsc_area, 32);
+}
+
+static int chsc_examine_irb(struct chsc_request *request)
+{
+	int backed_up;
+
+	if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND))
+		return -EIO;
+	backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
+	request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
+	if (scsw_cstat(&request->irb.scsw) == 0)
+		return 0;
+	if (!backed_up)
+		return 0;
+	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
+		return -EIO;
+	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
+		return -EPERM;
+	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
+		return -EAGAIN;
+	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
+		return -EAGAIN;
+	return -EIO;
+}
+
+static int chsc_ioctl_start(void __user *user_area)
+{
+	struct chsc_request *request;
+	struct chsc_async_area *chsc_area;
+	int ret;
+	char dbf[10];
+
+	if (!css_general_characteristics.dynio)
+		/* It makes no sense to try. */
+		return -EOPNOTSUPP;
+	chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
+	if (!chsc_area)
+		return -ENOMEM;
+	request = kzalloc(sizeof(*request), GFP_KERNEL);
+	if (!request) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	init_completion(&request->completion);
+	if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	chsc_log_command(chsc_area);
+	spin_lock_irq(&chsc_lock);
+	ret = chsc_async(chsc_area, request);
+	spin_unlock_irq(&chsc_lock);
+	if (ret == -EINPROGRESS) {
+		wait_for_completion(&request->completion);
+		ret = chsc_examine_irb(request);
+	}
+	/* copy area back to user */
+	if (!ret)
+		if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
+			ret = -EFAULT;
+out_free:
+	snprintf(dbf, sizeof(dbf), "ret:%d", ret);
+	CHSC_LOG(0, dbf);
+	kfree(request);
+	free_page((unsigned long)chsc_area);
+	return ret;
+}
+
+static int chsc_ioctl_on_close_set(void __user *user_area)
+{
+	char dbf[13];
+	int ret;
+
+	mutex_lock(&on_close_mutex);
+	if (on_close_chsc_area) {
+		ret = -EBUSY;
+		goto out_unlock;
+	}
+	on_close_request = kzalloc(sizeof(*on_close_request), GFP_KERNEL);
+	if (!on_close_request) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+	on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
+	if (!on_close_chsc_area) {
+		ret = -ENOMEM;
+		goto out_free_request;
+	}
+	if (copy_from_user(on_close_chsc_area, user_area, PAGE_SIZE)) {
+		ret = -EFAULT;
+		goto out_free_chsc;
+	}
+	ret = 0;
+	goto out_unlock;
+
+out_free_chsc:
+	free_page((unsigned long)on_close_chsc_area);
+	on_close_chsc_area = NULL;
+out_free_request:
+	kfree(on_close_request);
+	on_close_request = NULL;
+out_unlock:
+	mutex_unlock(&on_close_mutex);
+	snprintf(dbf, sizeof(dbf), "ocsret:%d", ret);
+	CHSC_LOG(0, dbf);
+	return ret;
+}
+
+static int chsc_ioctl_on_close_remove(void)
+{
+	char dbf[13];
+	int ret;
+
+	mutex_lock(&on_close_mutex);
+	if (!on_close_chsc_area) {
+		ret = -ENOENT;
+		goto out_unlock;
+	}
+	free_page((unsigned long)on_close_chsc_area);
+	on_close_chsc_area = NULL;
+	kfree(on_close_request);
+	on_close_request = NULL;
+	ret = 0;
+out_unlock:
+	mutex_unlock(&on_close_mutex);
+	snprintf(dbf, sizeof(dbf), "ocrret:%d", ret);
+	CHSC_LOG(0, dbf);
+	return ret;
+}
+
+static int chsc_ioctl_start_sync(void __user *user_area)
+{
+	struct chsc_sync_area *chsc_area;
+	int ret, ccode;
+
+	chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!chsc_area)
+		return -ENOMEM;
+	if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	if (chsc_area->header.code & 0x4000) {
+		ret = -EINVAL;
+		goto out_free;
+	}
+	chsc_log_command(chsc_area);
+	ccode = chsc(chsc_area);
+	if (ccode != 0) {
+		ret = -EIO;
+		goto out_free;
+	}
+	if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
+		ret = -EFAULT;
+	else
+		ret = 0;
+out_free:
+	free_page((unsigned long)chsc_area);
+	return ret;
+}
+
+static int chsc_ioctl_info_channel_path(void __user *user_cd)
+{
+	struct chsc_chp_cd *cd;
+	int ret, ccode;
+	struct {
+		struct chsc_header request;
+		u32 : 2;
+		u32 m : 1;
+		u32 : 1;
+		u32 fmt1 : 4;
+		u32 cssid : 8;
+		u32 : 8;
+		u32 first_chpid : 8;
+		u32 : 24;
+		u32 last_chpid : 8;
+		u32 : 32;
+		struct chsc_header response;
+		u8 data[PAGE_SIZE - 20];
+	} __attribute__ ((packed)) *scpcd_area;
+
+	scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!scpcd_area)
+		return -ENOMEM;
+	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+	if (!cd) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	if (copy_from_user(cd, user_cd, sizeof(*cd))) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	scpcd_area->request.length = 0x0010;
+	scpcd_area->request.code = 0x0028;
+	scpcd_area->m = cd->m;
+	scpcd_area->fmt1 = cd->fmt;
+	scpcd_area->cssid = cd->chpid.cssid;
+	scpcd_area->first_chpid = cd->chpid.id;
+	scpcd_area->last_chpid = cd->chpid.id;
+
+	ccode = chsc(scpcd_area);
+	if (ccode != 0) {
+		ret = -EIO;
+		goto out_free;
+	}
+	if (scpcd_area->response.code != 0x0001) {
+		ret = -EIO;
+		CHSC_MSG(0, "scpcd: response code=%x\n",
+			 scpcd_area->response.code);
+		goto out_free;
+	}
+	memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
+	if (copy_to_user(user_cd, cd, sizeof(*cd)))
+		ret = -EFAULT;
+	else
+		ret = 0;
+out_free:
+	kfree(cd);
+	free_page((unsigned long)scpcd_area);
+	return ret;
+}
+
+static int chsc_ioctl_info_cu(void __user *user_cd)
+{
+	struct chsc_cu_cd *cd;
+	int ret, ccode;
+	struct {
+		struct chsc_header request;
+		u32 : 2;
+		u32 m : 1;
+		u32 : 1;
+		u32 fmt1 : 4;
+		u32 cssid : 8;
+		u32 : 8;
+		u32 first_cun : 8;
+		u32 : 24;
+		u32 last_cun : 8;
+		u32 : 32;
+		struct chsc_header response;
+		u8 data[PAGE_SIZE - 20];
+	} __attribute__ ((packed)) *scucd_area;
+
+	scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!scucd_area)
+		return -ENOMEM;
+	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+	if (!cd) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	if (copy_from_user(cd, user_cd, sizeof(*cd))) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	scucd_area->request.length = 0x0010;
+	scucd_area->request.code = 0x0026;
+	scucd_area->m = cd->m;
+	scucd_area->fmt1 = cd->fmt;
+	scucd_area->cssid = cd->cssid;
+	scucd_area->first_cun = cd->cun;
+	scucd_area->last_cun = cd->cun;
+
+	ccode = chsc(scucd_area);
+	if (ccode != 0) {
+		ret = -EIO;
+		goto out_free;
+	}
+	if (scucd_area->response.code != 0x0001) {
+		ret = -EIO;
+		CHSC_MSG(0, "scucd: response code=%x\n",
+			 scucd_area->response.code);
+		goto out_free;
+	}
+	memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
+	if (copy_to_user(user_cd, cd, sizeof(*cd)))
+		ret = -EFAULT;
+	else
+		ret = 0;
+out_free:
+	kfree(cd);
+	free_page((unsigned long)scucd_area);
+	return ret;
+}
+
+static int chsc_ioctl_info_sch_cu(void __user *user_cud)
+{
+	struct chsc_sch_cud *cud;
+	int ret, ccode;
+	struct {
+		struct chsc_header request;
+		u32 : 2;
+		u32 m : 1;
+		u32 : 5;
+		u32 fmt1 : 4;
+		u32 : 2;
+		u32 ssid : 2;
+		u32 first_sch : 16;
+		u32 : 8;
+		u32 cssid : 8;
+		u32 last_sch : 16;
+		u32 : 32;
+		struct chsc_header response;
+		u8 data[PAGE_SIZE - 20];
+	} __attribute__ ((packed)) *sscud_area;
+
+	sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sscud_area)
+		return -ENOMEM;
+	cud = kzalloc(sizeof(*cud), GFP_KERNEL);
+	if (!cud) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	if (copy_from_user(cud, user_cud, sizeof(*cud))) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	sscud_area->request.length = 0x0010;
+	sscud_area->request.code = 0x0006;
+	sscud_area->m = cud->schid.m;
+	sscud_area->fmt1 = cud->fmt;
+	sscud_area->ssid = cud->schid.ssid;
+	sscud_area->first_sch = cud->schid.sch_no;
+	sscud_area->cssid = cud->schid.cssid;
+	sscud_area->last_sch = cud->schid.sch_no;
+
+	ccode = chsc(sscud_area);
+	if (ccode != 0) {
+		ret = -EIO;
+		goto out_free;
+	}
+	if (sscud_area->response.code != 0x0001) {
+		ret = -EIO;
+		CHSC_MSG(0, "sscud: response code=%x\n",
+			 sscud_area->response.code);
+		goto out_free;
+	}
+	memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
+	if (copy_to_user(user_cud, cud, sizeof(*cud)))
+		ret = -EFAULT;
+	else
+		ret = 0;
+out_free:
+	kfree(cud);
+	free_page((unsigned long)sscud_area);
+	return ret;
+}
+
+static int chsc_ioctl_conf_info(void __user *user_ci)
+{
+	struct chsc_conf_info *ci;
+	int ret, ccode;
+	struct {
+		struct chsc_header request;
+		u32 : 2;
+		u32 m : 1;
+		u32 : 1;
+		u32 fmt1 : 4;
+		u32 cssid : 8;
+		u32 : 6;
+		u32 ssid : 2;
+		u32 : 8;
+		u64 : 64;
+		struct chsc_header response;
+		u8 data[PAGE_SIZE - 20];
+	} __attribute__ ((packed)) *sci_area;
+
+	sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sci_area)
+		return -ENOMEM;
+	ci = kzalloc(sizeof(*ci), GFP_KERNEL);
+	if (!ci) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	if (copy_from_user(ci, user_ci, sizeof(*ci))) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	sci_area->request.length = 0x0010;
+	sci_area->request.code = 0x0012;
+	sci_area->m = ci->id.m;
+	sci_area->fmt1 = ci->fmt;
+	sci_area->cssid = ci->id.cssid;
+	sci_area->ssid = ci->id.ssid;
+
+	ccode = chsc(sci_area);
+	if (ccode != 0) {
+		ret = -EIO;
+		goto out_free;
+	}
+	if (sci_area->response.code != 0x0001) {
+		ret = -EIO;
+		CHSC_MSG(0, "sci: response code=%x\n",
+			 sci_area->response.code);
+		goto out_free;
+	}
+	memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
+	if (copy_to_user(user_ci, ci, sizeof(*ci)))
+		ret = -EFAULT;
+	else
+		ret = 0;
+out_free:
+	kfree(ci);
+	free_page((unsigned long)sci_area);
+	return ret;
+}
+
+static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
+{
+	struct chsc_comp_list *ccl;
+	int ret, ccode;
+	struct {
+		struct chsc_header request;
+		u32 ctype : 8;
+		u32 : 4;
+		u32 fmt : 4;
+		u32 : 16;
+		u64 : 64;
+		u32 list_parm[2];
+		u64 : 64;
+		struct chsc_header response;
+		u8 data[PAGE_SIZE - 36];
+	} __attribute__ ((packed)) *sccl_area;
+	struct {
+		u32 m : 1;
+		u32 : 31;
+		u32 cssid : 8;
+		u32 : 16;
+		u32 chpid : 8;
+	} __attribute__ ((packed)) *chpid_parm;
+	struct {
+		u32 f_cssid : 8;
+		u32 l_cssid : 8;
+		u32 : 16;
+		u32 res;
+	} __attribute__ ((packed)) *cssids_parm;
+
+	sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccl_area)
+		return -ENOMEM;
+	ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
+	if (!ccl) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	sccl_area->request.length = 0x0020;
+	sccl_area->request.code = 0x0030;
+	sccl_area->fmt = ccl->req.fmt;
+	sccl_area->ctype = ccl->req.ctype;
+	switch (sccl_area->ctype) {
+	case CCL_CU_ON_CHP:
+	case CCL_IOP_CHP:
+		chpid_parm = (void *)&sccl_area->list_parm;
+		chpid_parm->m = ccl->req.chpid.m;
+		chpid_parm->cssid = ccl->req.chpid.chp.cssid;
+		chpid_parm->chpid = ccl->req.chpid.chp.id;
+		break;
+	case CCL_CSS_IMG:
+	case CCL_CSS_IMG_CONF_CHAR:
+		cssids_parm = (void *)&sccl_area->list_parm;
+		cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
+		cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
+		break;
+	}
+	ccode = chsc(sccl_area);
+	if (ccode != 0) {
+		ret = -EIO;
+		goto out_free;
+	}
+	if (sccl_area->response.code != 0x0001) {
+		ret = -EIO;
+		CHSC_MSG(0, "sccl: response code=%x\n",
+			 sccl_area->response.code);
+		goto out_free;
+	}
+	memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
+	if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
+		ret = -EFAULT;
+	else
+		ret = 0;
+out_free:
+	kfree(ccl);
+	free_page((unsigned long)sccl_area);
+	return ret;
+}
+
+static int chsc_ioctl_chpd(void __user *user_chpd)
+{
+	struct chsc_scpd *scpd_area;
+	struct chsc_cpd_info *chpd;
+	int ret;
+
+	chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
+	scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!scpd_area || !chpd) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
+					       chpd->rfmt, chpd->c, chpd->m,
+					       scpd_area);
+	if (ret)
+		goto out_free;
+	memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length);
+	if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
+		ret = -EFAULT;
+out_free:
+	kfree(chpd);
+	free_page((unsigned long)scpd_area);
+	return ret;
+}
+
+static int chsc_ioctl_dcal(void __user *user_dcal)
+{
+	struct chsc_dcal *dcal;
+	int ret, ccode;
+	struct {
+		struct chsc_header request;
+		u32 atype : 8;
+		u32 : 4;
+		u32 fmt : 4;
+		u32 : 16;
+		u32 res0[2];
+		u32 list_parm[2];
+		u32 res1[2];
+		struct chsc_header response;
+		u8 data[PAGE_SIZE - 36];
+	} __attribute__ ((packed)) *sdcal_area;
+
+	sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sdcal_area)
+		return -ENOMEM;
+	dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
+	if (!dcal) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	sdcal_area->request.length = 0x0020;
+	sdcal_area->request.code = 0x0034;
+	sdcal_area->atype = dcal->req.atype;
+	sdcal_area->fmt = dcal->req.fmt;
+	memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
+	       sizeof(sdcal_area->list_parm));
+
+	ccode = chsc(sdcal_area);
+	if (ccode != 0) {
+		ret = -EIO;
+		goto out_free;
+	}
+	if (sdcal_area->response.code != 0x0001) {
+		ret = -EIO;
+		CHSC_MSG(0, "sdcal: response code=%x\n",
+			 sdcal_area->response.code);
+		goto out_free;
+	}
+	memcpy(&dcal->sdcal, &sdcal_area->response,
+	       sdcal_area->response.length);
+	if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
+		ret = -EFAULT;
+	else
+		ret = 0;
+out_free:
+	kfree(dcal);
+	free_page((unsigned long)sdcal_area);
+	return ret;
+}
+
+static long chsc_ioctl(struct file *filp, unsigned int cmd,
+		       unsigned long arg)
+{
+	void __user *argp;
+
+	CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
+	if (is_compat_task())
+		argp = compat_ptr(arg);
+	else
+		argp = (void __user *)arg;
+	switch (cmd) {
+	case CHSC_START:
+		return chsc_ioctl_start(argp);
+	case CHSC_START_SYNC:
+		return chsc_ioctl_start_sync(argp);
+	case CHSC_INFO_CHANNEL_PATH:
+		return chsc_ioctl_info_channel_path(argp);
+	case CHSC_INFO_CU:
+		return chsc_ioctl_info_cu(argp);
+	case CHSC_INFO_SCH_CU:
+		return chsc_ioctl_info_sch_cu(argp);
+	case CHSC_INFO_CI:
+		return chsc_ioctl_conf_info(argp);
+	case CHSC_INFO_CCL:
+		return chsc_ioctl_conf_comp_list(argp);
+	case CHSC_INFO_CPD:
+		return chsc_ioctl_chpd(argp);
+	case CHSC_INFO_DCAL:
+		return chsc_ioctl_dcal(argp);
+	case CHSC_ON_CLOSE_SET:
+		return chsc_ioctl_on_close_set(argp);
+	case CHSC_ON_CLOSE_REMOVE:
+		return chsc_ioctl_on_close_remove();
+	default: /* unknown ioctl number */
+		return -ENOIOCTLCMD;
+	}
+}
+
+static atomic_t chsc_ready_for_use = ATOMIC_INIT(1);
+
+static int chsc_open(struct inode *inode, struct file *file)
+{
+	if (!atomic_dec_and_test(&chsc_ready_for_use)) {
+		atomic_inc(&chsc_ready_for_use);
+		return -EBUSY;
+	}
+	return nonseekable_open(inode, file);
+}
+
+static int chsc_release(struct inode *inode, struct file *filp)
+{
+	char dbf[13];
+	int ret;
+
+	mutex_lock(&on_close_mutex);
+	if (!on_close_chsc_area)
+		goto out_unlock;
+	init_completion(&on_close_request->completion);
+	CHSC_LOG(0, "on_close");
+	chsc_log_command(on_close_chsc_area);
+	spin_lock_irq(&chsc_lock);
+	ret = chsc_async(on_close_chsc_area, on_close_request);
+	spin_unlock_irq(&chsc_lock);
+	if (ret == -EINPROGRESS) {
+		wait_for_completion(&on_close_request->completion);
+		ret = chsc_examine_irb(on_close_request);
+	}
+	snprintf(dbf, sizeof(dbf), "relret:%d", ret);
+	CHSC_LOG(0, dbf);
+	free_page((unsigned long)on_close_chsc_area);
+	on_close_chsc_area = NULL;
+	kfree(on_close_request);
+	on_close_request = NULL;
+out_unlock:
+	mutex_unlock(&on_close_mutex);
+	atomic_inc(&chsc_ready_for_use);
+	return 0;
+}
+
+static const struct file_operations chsc_fops = {
+	.owner = THIS_MODULE,
+	.open = chsc_open,
+	.release = chsc_release,
+	.unlocked_ioctl = chsc_ioctl,
+	.compat_ioctl = chsc_ioctl,
+	.llseek = no_llseek,
+};
+
+static struct miscdevice chsc_misc_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "chsc",
+	.fops = &chsc_fops,
+};
+
+static int __init chsc_misc_init(void)
+{
+	return misc_register(&chsc_misc_device);
+}
+
+static void chsc_misc_cleanup(void)
+{
+	misc_deregister(&chsc_misc_device);
+}
+
+static int __init chsc_sch_init(void)
+{
+	int ret;
+
+	ret = chsc_init_dbfs();
+	if (ret)
+		return ret;
+	isc_register(CHSC_SCH_ISC);
+	ret = chsc_init_sch_driver();
+	if (ret)
+		goto out_dbf;
+	ret = chsc_misc_init();
+	if (ret)
+		goto out_driver;
+	return ret;
+out_driver:
+	chsc_cleanup_sch_driver();
+out_dbf:
+	isc_unregister(CHSC_SCH_ISC);
+	chsc_remove_dbfs();
+	return ret;
+}
+
+static void __exit chsc_sch_exit(void)
+{
+	chsc_misc_cleanup();
+	chsc_cleanup_sch_driver();
+	isc_unregister(CHSC_SCH_ISC);
+	chsc_remove_dbfs();
+}
+
+module_init(chsc_sch_init);
+module_exit(chsc_sch_exit);
diff --git a/drivers/s390/cio/chsc_sch.h b/drivers/s390/cio/chsc_sch.h
new file mode 100644
index 0000000..ff5328b
--- /dev/null
+++ b/drivers/s390/cio/chsc_sch.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CHSC_SCH_H
+#define _CHSC_SCH_H
+
+struct chsc_request {
+	struct completion completion;
+	struct irb irb;
+};
+
+struct chsc_private {
+	struct chsc_request *request;
+};
+
+#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
new file mode 100644
index 0000000..de744ca
--- /dev/null
+++ b/drivers/s390/cio/cio.c
@@ -0,0 +1,762 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *   S/390 common I/O routines -- low level i/o calls
+ *
+ *    Copyright IBM Corp. 1999, 2008
+ *    Author(s): Ingo Adlung (adlung@de.ibm.com)
+ *		 Cornelia Huck (cornelia.huck@de.ibm.com)
+ *		 Arnd Bergmann (arndb@de.ibm.com)
+ *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/ftrace.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/cio.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/setup.h>
+#include <asm/ipl.h>
+#include <asm/chpid.h>
+#include <asm/airq.h>
+#include <asm/isc.h>
+#include <linux/sched/cputime.h>
+#include <asm/fcx.h>
+#include <asm/nmi.h>
+#include <asm/crw.h>
+#include "cio.h"
+#include "css.h"
+#include "chsc.h"
+#include "ioasm.h"
+#include "io_sch.h"
+#include "blacklist.h"
+#include "cio_debug.h"
+#include "chp.h"
+#include "trace.h"
+
+debug_info_t *cio_debug_msg_id;
+debug_info_t *cio_debug_trace_id;
+debug_info_t *cio_debug_crw_id;
+
+DEFINE_PER_CPU_ALIGNED(struct irb, cio_irb);
+EXPORT_PER_CPU_SYMBOL(cio_irb);
+
+/*
+ * Function: cio_debug_init
+ * Initializes three debug logs for common I/O:
+ * - cio_msg logs generic cio messages
+ * - cio_trace logs the calling of different functions
+ * - cio_crw logs machine check related cio messages
+ */
+static int __init cio_debug_init(void)
+{
+	cio_debug_msg_id = debug_register("cio_msg", 16, 1, 11 * sizeof(long));
+	if (!cio_debug_msg_id)
+		goto out_unregister;
+	debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
+	debug_set_level(cio_debug_msg_id, 2);
+	cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16);
+	if (!cio_debug_trace_id)
+		goto out_unregister;
+	debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
+	debug_set_level(cio_debug_trace_id, 2);
+	cio_debug_crw_id = debug_register("cio_crw", 8, 1, 8 * sizeof(long));
+	if (!cio_debug_crw_id)
+		goto out_unregister;
+	debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
+	debug_set_level(cio_debug_crw_id, 4);
+	return 0;
+
+out_unregister:
+	debug_unregister(cio_debug_msg_id);
+	debug_unregister(cio_debug_trace_id);
+	debug_unregister(cio_debug_crw_id);
+	return -1;
+}
+
+arch_initcall (cio_debug_init);
+
+int cio_set_options(struct subchannel *sch, int flags)
+{
+	struct io_subchannel_private *priv = to_io_private(sch);
+
+	priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
+	priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
+	priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
+	return 0;
+}
+
+static int
+cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
+{
+	char dbf_text[15];
+
+	if (lpm != 0)
+		sch->lpm &= ~lpm;
+	else
+		sch->lpm = 0;
+
+	CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
+		      "subchannel 0.%x.%04x!\n", sch->schid.ssid,
+		      sch->schid.sch_no);
+
+	if (cio_update_schib(sch))
+		return -ENODEV;
+
+	sprintf(dbf_text, "no%s", dev_name(&sch->dev));
+	CIO_TRACE_EVENT(0, dbf_text);
+	CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
+
+	return (sch->lpm ? -EACCES : -ENODEV);
+}
+
+int
+cio_start_key (struct subchannel *sch,	/* subchannel structure */
+	       struct ccw1 * cpa,	/* logical channel prog addr */
+	       __u8 lpm,		/* logical path mask */
+	       __u8 key)                /* storage key */
+{
+	struct io_subchannel_private *priv = to_io_private(sch);
+	union orb *orb = &priv->orb;
+	int ccode;
+
+	CIO_TRACE_EVENT(5, "stIO");
+	CIO_TRACE_EVENT(5, dev_name(&sch->dev));
+
+	memset(orb, 0, sizeof(union orb));
+	/* sch is always under 2G. */
+	orb->cmd.intparm = (u32)(addr_t)sch;
+	orb->cmd.fmt = 1;
+
+	orb->cmd.pfch = priv->options.prefetch == 0;
+	orb->cmd.spnd = priv->options.suspend;
+	orb->cmd.ssic = priv->options.suspend && priv->options.inter;
+	orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
+	/*
+	 * for 64 bit we always support 64 bit IDAWs with 4k page size only
+	 */
+	orb->cmd.c64 = 1;
+	orb->cmd.i2k = 0;
+	orb->cmd.key = key >> 4;
+	/* issue "Start Subchannel" */
+	orb->cmd.cpa = (__u32) __pa(cpa);
+	ccode = ssch(sch->schid, orb);
+
+	/* process condition code */
+	CIO_HEX_EVENT(5, &ccode, sizeof(ccode));
+
+	switch (ccode) {
+	case 0:
+		/*
+		 * initialize device status information
+		 */
+		sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
+		return 0;
+	case 1:		/* status pending */
+	case 2:		/* busy */
+		return -EBUSY;
+	case 3:		/* device/path not operational */
+		return cio_start_handle_notoper(sch, lpm);
+	default:
+		return ccode;
+	}
+}
+EXPORT_SYMBOL_GPL(cio_start_key);
+
+int
+cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
+{
+	return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY);
+}
+EXPORT_SYMBOL_GPL(cio_start);
+
+/*
+ * resume suspended I/O operation
+ */
+int
+cio_resume (struct subchannel *sch)
+{
+	int ccode;
+
+	CIO_TRACE_EVENT(4, "resIO");
+	CIO_TRACE_EVENT(4, dev_name(&sch->dev));
+
+	ccode = rsch (sch->schid);
+
+	CIO_HEX_EVENT(4, &ccode, sizeof(ccode));
+
+	switch (ccode) {
+	case 0:
+		sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
+		return 0;
+	case 1:
+		return -EBUSY;
+	case 2:
+		return -EINVAL;
+	default:
+		/*
+		 * useless to wait for request completion
+		 *  as device is no longer operational !
+		 */
+		return -ENODEV;
+	}
+}
+EXPORT_SYMBOL_GPL(cio_resume);
+
+/*
+ * halt I/O operation
+ */
+int
+cio_halt(struct subchannel *sch)
+{
+	int ccode;
+
+	if (!sch)
+		return -ENODEV;
+
+	CIO_TRACE_EVENT(2, "haltIO");
+	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+	/*
+	 * Issue "Halt subchannel" and process condition code
+	 */
+	ccode = hsch (sch->schid);
+
+	CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
+
+	switch (ccode) {
+	case 0:
+		sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
+		return 0;
+	case 1:		/* status pending */
+	case 2:		/* busy */
+		return -EBUSY;
+	default:		/* device not operational */
+		return -ENODEV;
+	}
+}
+EXPORT_SYMBOL_GPL(cio_halt);
+
+/*
+ * Clear I/O operation
+ */
+int
+cio_clear(struct subchannel *sch)
+{
+	int ccode;
+
+	if (!sch)
+		return -ENODEV;
+
+	CIO_TRACE_EVENT(2, "clearIO");
+	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+	/*
+	 * Issue "Clear subchannel" and process condition code
+	 */
+	ccode = csch (sch->schid);
+
+	CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
+
+	switch (ccode) {
+	case 0:
+		sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
+		return 0;
+	default:		/* device not operational */
+		return -ENODEV;
+	}
+}
+EXPORT_SYMBOL_GPL(cio_clear);
+
+/*
+ * Function: cio_cancel
+ * Issues a "Cancel Subchannel" on the specified subchannel
+ * Note: We don't need any fancy intparms and flags here
+ *	 since xsch is executed synchronously.
+ * Only for common I/O internal use as for now.
+ */
+int
+cio_cancel (struct subchannel *sch)
+{
+	int ccode;
+
+	if (!sch)
+		return -ENODEV;
+
+	CIO_TRACE_EVENT(2, "cancelIO");
+	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+	ccode = xsch (sch->schid);
+
+	CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
+
+	switch (ccode) {
+	case 0:		/* success */
+		/* Update information in scsw. */
+		if (cio_update_schib(sch))
+			return -ENODEV;
+		return 0;
+	case 1:		/* status pending */
+		return -EBUSY;
+	case 2:		/* not applicable */
+		return -EINVAL;
+	default:	/* not oper */
+		return -ENODEV;
+	}
+}
+EXPORT_SYMBOL_GPL(cio_cancel);
+
+/**
+ * cio_cancel_halt_clear - Cancel running I/O by performing cancel, halt
+ * and clear ordinally if subchannel is valid.
+ * @sch: subchannel on which to perform the cancel_halt_clear operation
+ * @iretry: the number of the times remained to retry the next operation
+ *
+ * This should be called repeatedly since halt/clear are asynchronous
+ * operations. We do one try with cio_cancel, three tries with cio_halt,
+ * 255 tries with cio_clear. The caller should initialize @iretry with
+ * the value 255 for its first call to this, and keep using the same
+ * @iretry in the subsequent calls until it gets a non -EBUSY return.
+ *
+ * Returns 0 if device now idle, -ENODEV for device not operational,
+ * -EBUSY if an interrupt is expected (either from halt/clear or from a
+ * status pending), and -EIO if out of retries.
+ */
+int cio_cancel_halt_clear(struct subchannel *sch, int *iretry)
+{
+	int ret;
+
+	if (cio_update_schib(sch))
+		return -ENODEV;
+	if (!sch->schib.pmcw.ena)
+		/* Not operational -> done. */
+		return 0;
+	/* Stage 1: cancel io. */
+	if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
+	    !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
+		if (!scsw_is_tm(&sch->schib.scsw)) {
+			ret = cio_cancel(sch);
+			if (ret != -EINVAL)
+				return ret;
+		}
+		/*
+		 * Cancel io unsuccessful or not applicable (transport mode).
+		 * Continue with asynchronous instructions.
+		 */
+		*iretry = 3;	/* 3 halt retries. */
+	}
+	/* Stage 2: halt io. */
+	if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
+		if (*iretry) {
+			*iretry -= 1;
+			ret = cio_halt(sch);
+			if (ret != -EBUSY)
+				return (ret == 0) ? -EBUSY : ret;
+		}
+		/* Halt io unsuccessful. */
+		*iretry = 255;	/* 255 clear retries. */
+	}
+	/* Stage 3: clear io. */
+	if (*iretry) {
+		*iretry -= 1;
+		ret = cio_clear(sch);
+		return (ret == 0) ? -EBUSY : ret;
+	}
+	/* Function was unsuccessful */
+	return -EIO;
+}
+EXPORT_SYMBOL_GPL(cio_cancel_halt_clear);
+
+static void cio_apply_config(struct subchannel *sch, struct schib *schib)
+{
+	schib->pmcw.intparm = sch->config.intparm;
+	schib->pmcw.mbi = sch->config.mbi;
+	schib->pmcw.isc = sch->config.isc;
+	schib->pmcw.ena = sch->config.ena;
+	schib->pmcw.mme = sch->config.mme;
+	schib->pmcw.mp = sch->config.mp;
+	schib->pmcw.csense = sch->config.csense;
+	schib->pmcw.mbfc = sch->config.mbfc;
+	if (sch->config.mbfc)
+		schib->mba = sch->config.mba;
+}
+
+static int cio_check_config(struct subchannel *sch, struct schib *schib)
+{
+	return (schib->pmcw.intparm == sch->config.intparm) &&
+		(schib->pmcw.mbi == sch->config.mbi) &&
+		(schib->pmcw.isc == sch->config.isc) &&
+		(schib->pmcw.ena == sch->config.ena) &&
+		(schib->pmcw.mme == sch->config.mme) &&
+		(schib->pmcw.mp == sch->config.mp) &&
+		(schib->pmcw.csense == sch->config.csense) &&
+		(schib->pmcw.mbfc == sch->config.mbfc) &&
+		(!sch->config.mbfc || (schib->mba == sch->config.mba));
+}
+
+/*
+ * cio_commit_config - apply configuration to the subchannel
+ */
+int cio_commit_config(struct subchannel *sch)
+{
+	int ccode, retry, ret = 0;
+	struct schib schib;
+	struct irb irb;
+
+	if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
+		return -ENODEV;
+
+	for (retry = 0; retry < 5; retry++) {
+		/* copy desired changes to local schib */
+		cio_apply_config(sch, &schib);
+		ccode = msch(sch->schid, &schib);
+		if (ccode < 0) /* -EIO if msch gets a program check. */
+			return ccode;
+		switch (ccode) {
+		case 0: /* successful */
+			if (stsch(sch->schid, &schib) ||
+			    !css_sch_is_valid(&schib))
+				return -ENODEV;
+			if (cio_check_config(sch, &schib)) {
+				/* commit changes from local schib */
+				memcpy(&sch->schib, &schib, sizeof(schib));
+				return 0;
+			}
+			ret = -EAGAIN;
+			break;
+		case 1: /* status pending */
+			ret = -EBUSY;
+			if (tsch(sch->schid, &irb))
+				return ret;
+			break;
+		case 2: /* busy */
+			udelay(100); /* allow for recovery */
+			ret = -EBUSY;
+			break;
+		case 3: /* not operational */
+			return -ENODEV;
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cio_commit_config);
+
+/**
+ * cio_update_schib - Perform stsch and update schib if subchannel is valid.
+ * @sch: subchannel on which to perform stsch
+ * Return zero on success, -ENODEV otherwise.
+ */
+int cio_update_schib(struct subchannel *sch)
+{
+	struct schib schib;
+
+	if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
+		return -ENODEV;
+
+	memcpy(&sch->schib, &schib, sizeof(schib));
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cio_update_schib);
+
+/**
+ * cio_enable_subchannel - enable a subchannel.
+ * @sch: subchannel to be enabled
+ * @intparm: interruption parameter to set
+ */
+int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
+{
+	int ret;
+
+	CIO_TRACE_EVENT(2, "ensch");
+	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+	if (sch_is_pseudo_sch(sch))
+		return -EINVAL;
+	if (cio_update_schib(sch))
+		return -ENODEV;
+
+	sch->config.ena = 1;
+	sch->config.isc = sch->isc;
+	sch->config.intparm = intparm;
+
+	ret = cio_commit_config(sch);
+	if (ret == -EIO) {
+		/*
+		 * Got a program check in msch. Try without
+		 * the concurrent sense bit the next time.
+		 */
+		sch->config.csense = 0;
+		ret = cio_commit_config(sch);
+	}
+	CIO_HEX_EVENT(2, &ret, sizeof(ret));
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cio_enable_subchannel);
+
+/**
+ * cio_disable_subchannel - disable a subchannel.
+ * @sch: subchannel to disable
+ */
+int cio_disable_subchannel(struct subchannel *sch)
+{
+	int ret;
+
+	CIO_TRACE_EVENT(2, "dissch");
+	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+	if (sch_is_pseudo_sch(sch))
+		return 0;
+	if (cio_update_schib(sch))
+		return -ENODEV;
+
+	sch->config.ena = 0;
+	ret = cio_commit_config(sch);
+
+	CIO_HEX_EVENT(2, &ret, sizeof(ret));
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cio_disable_subchannel);
+
+/*
+ * do_cio_interrupt() handles all normal I/O device IRQ's
+ */
+static irqreturn_t do_cio_interrupt(int irq, void *dummy)
+{
+	struct tpi_info *tpi_info;
+	struct subchannel *sch;
+	struct irb *irb;
+
+	set_cpu_flag(CIF_NOHZ_DELAY);
+	tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
+	trace_s390_cio_interrupt(tpi_info);
+	irb = this_cpu_ptr(&cio_irb);
+	sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
+	if (!sch) {
+		/* Clear pending interrupt condition. */
+		inc_irq_stat(IRQIO_CIO);
+		tsch(tpi_info->schid, irb);
+		return IRQ_HANDLED;
+	}
+	spin_lock(sch->lock);
+	/* Store interrupt response block to lowcore. */
+	if (tsch(tpi_info->schid, irb) == 0) {
+		/* Keep subchannel information word up to date. */
+		memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw));
+		/* Call interrupt handler if there is one. */
+		if (sch->driver && sch->driver->irq)
+			sch->driver->irq(sch);
+		else
+			inc_irq_stat(IRQIO_CIO);
+	} else
+		inc_irq_stat(IRQIO_CIO);
+	spin_unlock(sch->lock);
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction io_interrupt = {
+	.name	 = "IO",
+	.handler = do_cio_interrupt,
+};
+
+void __init init_cio_interrupts(void)
+{
+	irq_set_chip_and_handler(IO_INTERRUPT,
+				 &dummy_irq_chip, handle_percpu_irq);
+	setup_irq(IO_INTERRUPT, &io_interrupt);
+}
+
+#ifdef CONFIG_CCW_CONSOLE
+static struct subchannel *console_sch;
+static struct lock_class_key console_sch_key;
+
+/*
+ * Use cio_tsch to update the subchannel status and call the interrupt handler
+ * if status had been pending. Called with the subchannel's lock held.
+ */
+void cio_tsch(struct subchannel *sch)
+{
+	struct irb *irb;
+	int irq_context;
+
+	irb = this_cpu_ptr(&cio_irb);
+	/* Store interrupt response block to lowcore. */
+	if (tsch(sch->schid, irb) != 0)
+		/* Not status pending or not operational. */
+		return;
+	memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
+	/* Call interrupt handler with updated status. */
+	irq_context = in_interrupt();
+	if (!irq_context) {
+		local_bh_disable();
+		irq_enter();
+	}
+	kstat_incr_irq_this_cpu(IO_INTERRUPT);
+	if (sch->driver && sch->driver->irq)
+		sch->driver->irq(sch);
+	else
+		inc_irq_stat(IRQIO_CIO);
+	if (!irq_context) {
+		irq_exit();
+		_local_bh_enable();
+	}
+}
+
+static int cio_test_for_console(struct subchannel_id schid, void *data)
+{
+	struct schib schib;
+
+	if (stsch(schid, &schib) != 0)
+		return -ENXIO;
+	if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
+	    (schib.pmcw.dev == console_devno)) {
+		console_irq = schid.sch_no;
+		return 1; /* found */
+	}
+	return 0;
+}
+
+static int cio_get_console_sch_no(void)
+{
+	struct subchannel_id schid;
+	struct schib schib;
+
+	init_subchannel_id(&schid);
+	if (console_irq != -1) {
+		/* VM provided us with the irq number of the console. */
+		schid.sch_no = console_irq;
+		if (stsch(schid, &schib) != 0 ||
+		    (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv)
+			return -1;
+		console_devno = schib.pmcw.dev;
+	} else if (console_devno != -1) {
+		/* At least the console device number is known. */
+		for_each_subchannel(cio_test_for_console, NULL);
+	}
+	return console_irq;
+}
+
+struct subchannel *cio_probe_console(void)
+{
+	struct subchannel_id schid;
+	struct subchannel *sch;
+	struct schib schib;
+	int sch_no, ret;
+
+	sch_no = cio_get_console_sch_no();
+	if (sch_no == -1) {
+		pr_warn("No CCW console was found\n");
+		return ERR_PTR(-ENODEV);
+	}
+	init_subchannel_id(&schid);
+	schid.sch_no = sch_no;
+	ret = stsch(schid, &schib);
+	if (ret)
+		return ERR_PTR(-ENODEV);
+
+	sch = css_alloc_subchannel(schid, &schib);
+	if (IS_ERR(sch))
+		return sch;
+
+	lockdep_set_class(sch->lock, &console_sch_key);
+	isc_register(CONSOLE_ISC);
+	sch->config.isc = CONSOLE_ISC;
+	sch->config.intparm = (u32)(addr_t)sch;
+	ret = cio_commit_config(sch);
+	if (ret) {
+		isc_unregister(CONSOLE_ISC);
+		put_device(&sch->dev);
+		return ERR_PTR(ret);
+	}
+	console_sch = sch;
+	return sch;
+}
+
+int cio_is_console(struct subchannel_id schid)
+{
+	if (!console_sch)
+		return 0;
+	return schid_equal(&schid, &console_sch->schid);
+}
+
+void cio_register_early_subchannels(void)
+{
+	int ret;
+
+	if (!console_sch)
+		return;
+
+	ret = css_register_subchannel(console_sch);
+	if (ret)
+		put_device(&console_sch->dev);
+}
+#endif /* CONFIG_CCW_CONSOLE */
+
+/**
+ * cio_tm_start_key - perform start function
+ * @sch: subchannel on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @lpm: mask of paths to use
+ * @key: storage key to use for storage access
+ *
+ * Start the tcw on the given subchannel. Return zero on success, non-zero
+ * otherwise.
+ */
+int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
+{
+	int cc;
+	union orb *orb = &to_io_private(sch)->orb;
+
+	memset(orb, 0, sizeof(union orb));
+	orb->tm.intparm = (u32) (addr_t) sch;
+	orb->tm.key = key >> 4;
+	orb->tm.b = 1;
+	orb->tm.lpm = lpm ? lpm : sch->lpm;
+	orb->tm.tcw = (u32) (addr_t) tcw;
+	cc = ssch(sch->schid, orb);
+	switch (cc) {
+	case 0:
+		return 0;
+	case 1:
+	case 2:
+		return -EBUSY;
+	default:
+		return cio_start_handle_notoper(sch, lpm);
+	}
+}
+EXPORT_SYMBOL_GPL(cio_tm_start_key);
+
+/**
+ * cio_tm_intrg - perform interrogate function
+ * @sch: subchannel on which to perform the interrogate function
+ *
+ * If the specified subchannel is running in transport-mode, perform the
+ * interrogate function. Return zero on success, non-zero otherwie.
+ */
+int cio_tm_intrg(struct subchannel *sch)
+{
+	int cc;
+
+	if (!to_io_private(sch)->orb.tm.b)
+		return -EINVAL;
+	cc = xsch(sch->schid);
+	switch (cc) {
+	case 0:
+	case 2:
+		return 0;
+	case 1:
+		return -EBUSY;
+	default:
+		return -ENODEV;
+	}
+}
+EXPORT_SYMBOL_GPL(cio_tm_intrg);
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
new file mode 100644
index 0000000..9811fd8
--- /dev/null
+++ b/drivers/s390/cio/cio.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_CIO_H
+#define S390_CIO_H
+
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <asm/chpid.h>
+#include <asm/cio.h>
+#include <asm/fcx.h>
+#include <asm/schid.h>
+#include "chsc.h"
+
+/*
+ * path management control word
+ */
+struct pmcw {
+	u32 intparm;		/* interruption parameter */
+	u32 qf	 : 1;		/* qdio facility */
+	u32 w	 : 1;
+	u32 isc  : 3;		/* interruption sublass */
+	u32 res5 : 3;		/* reserved zeros */
+	u32 ena  : 1;		/* enabled */
+	u32 lm	 : 2;		/* limit mode */
+	u32 mme  : 2;		/* measurement-mode enable */
+	u32 mp	 : 1;		/* multipath mode */
+	u32 tf	 : 1;		/* timing facility */
+	u32 dnv  : 1;		/* device number valid */
+	u32 dev  : 16;		/* device number */
+	u8  lpm;		/* logical path mask */
+	u8  pnom;		/* path not operational mask */
+	u8  lpum;		/* last path used mask */
+	u8  pim;		/* path installed mask */
+	u16 mbi;		/* measurement-block index */
+	u8  pom;		/* path operational mask */
+	u8  pam;		/* path available mask */
+	u8  chpid[8];		/* CHPID 0-7 (if available) */
+	u32 unused1 : 8;	/* reserved zeros */
+	u32 st	    : 3;	/* subchannel type */
+	u32 unused2 : 18;	/* reserved zeros */
+	u32 mbfc    : 1;	/* measurement block format control */
+	u32 xmwme   : 1;	/* extended measurement word mode enable */
+	u32 csense  : 1;	/* concurrent sense; can be enabled ...*/
+				/*  ... per MSCH, however, if facility */
+				/*  ... is not installed, this results */
+				/*  ... in an operand exception.       */
+} __attribute__ ((packed));
+
+/* I/O-Interruption Code as stored by TEST PENDING INTERRUPTION (TPI). */
+struct tpi_info {
+	struct subchannel_id schid;
+	u32 intparm;
+	u32 adapter_IO:1;
+	u32 :1;
+	u32 isc:3;
+	u32 :27;
+	u32 type:3;
+	u32 :12;
+} __packed __aligned(4);
+
+/* Target SCHIB configuration. */
+struct schib_config {
+	u64 mba;
+	u32 intparm;
+	u16 mbi;
+	u32 isc:3;
+	u32 ena:1;
+	u32 mme:2;
+	u32 mp:1;
+	u32 csense:1;
+	u32 mbfc:1;
+} __attribute__ ((packed));
+
+/*
+ * subchannel information block
+ */
+struct schib {
+	struct pmcw pmcw;	 /* path management control word */
+	union scsw scsw;	 /* subchannel status word */
+	__u64 mba;               /* measurement block address */
+	__u8 mda[4];		 /* model dependent area */
+} __attribute__ ((packed,aligned(4)));
+
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
+enum sch_todo {
+	SCH_TODO_NOTHING,
+	SCH_TODO_EVAL,
+	SCH_TODO_UNREG,
+};
+
+/* subchannel data structure used by I/O subroutines */
+struct subchannel {
+	struct subchannel_id schid;
+	spinlock_t *lock;	/* subchannel lock */
+	struct mutex reg_mutex;
+	enum {
+		SUBCHANNEL_TYPE_IO = 0,
+		SUBCHANNEL_TYPE_CHSC = 1,
+		SUBCHANNEL_TYPE_MSG = 2,
+		SUBCHANNEL_TYPE_ADM = 3,
+	} st;			/* subchannel type */
+	__u8 vpm;		/* verified path mask */
+	__u8 lpm;		/* logical path mask */
+	__u8 opm;               /* operational path mask */
+	struct schib schib;	/* subchannel information block */
+	int isc; /* desired interruption subclass */
+	struct chsc_ssd_info ssd_info;	/* subchannel description */
+	struct device dev;	/* entry in device tree */
+	struct css_driver *driver;
+	enum sch_todo todo;
+	struct work_struct todo_work;
+	struct schib_config config;
+} __attribute__ ((aligned(8)));
+
+DECLARE_PER_CPU(struct irb, cio_irb);
+
+#define to_subchannel(n) container_of(n, struct subchannel, dev)
+
+extern int cio_enable_subchannel(struct subchannel *, u32);
+extern int cio_disable_subchannel (struct subchannel *);
+extern int cio_cancel (struct subchannel *);
+extern int cio_clear (struct subchannel *);
+extern int cio_cancel_halt_clear(struct subchannel *, int *);
+extern int cio_resume (struct subchannel *);
+extern int cio_halt (struct subchannel *);
+extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
+extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
+extern int cio_set_options (struct subchannel *, int);
+extern int cio_update_schib(struct subchannel *sch);
+extern int cio_commit_config(struct subchannel *sch);
+
+int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
+int cio_tm_intrg(struct subchannel *sch);
+
+/* Use with care. */
+#ifdef CONFIG_CCW_CONSOLE
+extern struct subchannel *cio_probe_console(void);
+extern int cio_is_console(struct subchannel_id);
+extern void cio_register_early_subchannels(void);
+extern void cio_tsch(struct subchannel *sch);
+#else
+#define cio_is_console(schid) 0
+static inline void cio_register_early_subchannels(void) {}
+#endif
+
+#endif
diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h
new file mode 100644
index 0000000..7bdbe73
--- /dev/null
+++ b/drivers/s390/cio/cio_debug.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef CIO_DEBUG_H
+#define CIO_DEBUG_H
+
+#include <asm/debug.h>
+
+/* for use of debug feature */
+extern debug_info_t *cio_debug_msg_id;
+extern debug_info_t *cio_debug_trace_id;
+extern debug_info_t *cio_debug_crw_id;
+
+#define CIO_TRACE_EVENT(imp, txt) do {				\
+		debug_text_event(cio_debug_trace_id, imp, txt); \
+	} while (0)
+
+#define CIO_MSG_EVENT(imp, args...) do {				\
+		debug_sprintf_event(cio_debug_msg_id, imp , ##args);	\
+	} while (0)
+
+#define CIO_CRW_EVENT(imp, args...) do {				\
+		debug_sprintf_event(cio_debug_crw_id, imp , ##args);	\
+	} while (0)
+
+static inline void CIO_HEX_EVENT(int level, void *data, int length)
+{
+	debug_event(cio_debug_trace_id, level, data, length);
+}
+
+#endif
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
new file mode 100644
index 0000000..8af4948
--- /dev/null
+++ b/drivers/s390/cio/cmf.c
@@ -0,0 +1,1309 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Linux on zSeries Channel Measurement Facility support
+ *
+ * Copyright IBM Corp. 2000, 2006
+ *
+ * Authors: Arnd Bergmann <arndb@de.ibm.com>
+ *	    Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/bootmem.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/export.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/timex.h>	/* get_tod_clock() */
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/cmb.h>
+#include <asm/div64.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "chsc.h"
+
+/*
+ * parameter to enable cmf during boot, possible uses are:
+ *  "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
+ *               used on any subchannel
+ *  "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
+ *                     <num> subchannel, where <num> is an integer
+ *                     between 1 and 65535, default is 1024
+ */
+#define ARGSTRING "s390cmf"
+
+/* indices for READCMB */
+enum cmb_index {
+	avg_utilization = -1,
+ /* basic and exended format: */
+	cmb_ssch_rsch_count = 0,
+	cmb_sample_count,
+	cmb_device_connect_time,
+	cmb_function_pending_time,
+	cmb_device_disconnect_time,
+	cmb_control_unit_queuing_time,
+	cmb_device_active_only_time,
+ /* extended format only: */
+	cmb_device_busy_time,
+	cmb_initial_command_response_time,
+};
+
+/**
+ * enum cmb_format - types of supported measurement block formats
+ *
+ * @CMF_BASIC:      traditional channel measurement blocks supported
+ *		    by all machines that we run on
+ * @CMF_EXTENDED:   improved format that was introduced with the z990
+ *		    machine
+ * @CMF_AUTODETECT: default: use extended format when running on a machine
+ *		    supporting extended format, otherwise fall back to
+ *		    basic format
+ */
+enum cmb_format {
+	CMF_BASIC,
+	CMF_EXTENDED,
+	CMF_AUTODETECT = -1,
+};
+
+/*
+ * format - actual format for all measurement blocks
+ *
+ * The format module parameter can be set to a value of 0 (zero)
+ * or 1, indicating basic or extended format as described for
+ * enum cmb_format.
+ */
+static int format = CMF_AUTODETECT;
+module_param(format, bint, 0444);
+
+/**
+ * struct cmb_operations - functions to use depending on cmb_format
+ *
+ * Most of these functions operate on a struct ccw_device. There is only
+ * one instance of struct cmb_operations because the format of the measurement
+ * data is guaranteed to be the same for every ccw_device.
+ *
+ * @alloc:	allocate memory for a channel measurement block,
+ *		either with the help of a special pool or with kmalloc
+ * @free:	free memory allocated with @alloc
+ * @set:	enable or disable measurement
+ * @read:	read a measurement entry at an index
+ * @readall:	read a measurement block in a common format
+ * @reset:	clear the data in the associated measurement block and
+ *		reset its time stamp
+ */
+struct cmb_operations {
+	int  (*alloc)  (struct ccw_device *);
+	void (*free)   (struct ccw_device *);
+	int  (*set)    (struct ccw_device *, u32);
+	u64  (*read)   (struct ccw_device *, int);
+	int  (*readall)(struct ccw_device *, struct cmbdata *);
+	void (*reset)  (struct ccw_device *);
+/* private: */
+	struct attribute_group *attr_group;
+};
+static struct cmb_operations *cmbops;
+
+struct cmb_data {
+	void *hw_block;   /* Pointer to block updated by hardware */
+	void *last_block; /* Last changed block copied from hardware block */
+	int size;	  /* Size of hw_block and last_block */
+	unsigned long long last_update;  /* when last_block was updated */
+};
+
+/*
+ * Our user interface is designed in terms of nanoseconds,
+ * while the hardware measures total times in its own
+ * unit.
+ */
+static inline u64 time_to_nsec(u32 value)
+{
+	return ((u64)value) * 128000ull;
+}
+
+/*
+ * Users are usually interested in average times,
+ * not accumulated time.
+ * This also helps us with atomicity problems
+ * when reading sinlge values.
+ */
+static inline u64 time_to_avg_nsec(u32 value, u32 count)
+{
+	u64 ret;
+
+	/* no samples yet, avoid division by 0 */
+	if (count == 0)
+		return 0;
+
+	/* value comes in units of 128 µsec */
+	ret = time_to_nsec(value);
+	do_div(ret, count);
+
+	return ret;
+}
+
+#define CMF_OFF 0
+#define CMF_ON	2
+
+/*
+ * Activate or deactivate the channel monitor. When area is NULL,
+ * the monitor is deactivated. The channel monitor needs to
+ * be active in order to measure subchannels, which also need
+ * to be enabled.
+ */
+static inline void cmf_activate(void *area, unsigned int onoff)
+{
+	register void * __gpr2 asm("2");
+	register long __gpr1 asm("1");
+
+	__gpr2 = area;
+	__gpr1 = onoff;
+	/* activate channel measurement */
+	asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
+}
+
+static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
+		     unsigned long address)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	int ret;
+
+	sch->config.mme = mme;
+	sch->config.mbfc = mbfc;
+	/* address can be either a block address or a block index */
+	if (mbfc)
+		sch->config.mba = address;
+	else
+		sch->config.mbi = address;
+
+	ret = cio_commit_config(sch);
+	if (!mme && ret == -ENODEV) {
+		/*
+		 * The task was to disable measurement block updates but
+		 * the subchannel is already gone. Report success.
+		 */
+		ret = 0;
+	}
+	return ret;
+}
+
+struct set_schib_struct {
+	u32 mme;
+	int mbfc;
+	unsigned long address;
+	wait_queue_head_t wait;
+	int ret;
+};
+
+#define CMF_PENDING 1
+#define SET_SCHIB_TIMEOUT (10 * HZ)
+
+static int set_schib_wait(struct ccw_device *cdev, u32 mme,
+			  int mbfc, unsigned long address)
+{
+	struct set_schib_struct set_data;
+	int ret = -ENODEV;
+
+	spin_lock_irq(cdev->ccwlock);
+	if (!cdev->private->cmb)
+		goto out;
+
+	ret = set_schib(cdev, mme, mbfc, address);
+	if (ret != -EBUSY)
+		goto out;
+
+	/* if the device is not online, don't even try again */
+	if (cdev->private->state != DEV_STATE_ONLINE)
+		goto out;
+
+	init_waitqueue_head(&set_data.wait);
+	set_data.mme = mme;
+	set_data.mbfc = mbfc;
+	set_data.address = address;
+	set_data.ret = CMF_PENDING;
+
+	cdev->private->state = DEV_STATE_CMFCHANGE;
+	cdev->private->cmb_wait = &set_data;
+	spin_unlock_irq(cdev->ccwlock);
+
+	ret = wait_event_interruptible_timeout(set_data.wait,
+					       set_data.ret != CMF_PENDING,
+					       SET_SCHIB_TIMEOUT);
+	spin_lock_irq(cdev->ccwlock);
+	if (ret <= 0) {
+		if (set_data.ret == CMF_PENDING) {
+			set_data.ret = (ret == 0) ? -ETIME : ret;
+			if (cdev->private->state == DEV_STATE_CMFCHANGE)
+				cdev->private->state = DEV_STATE_ONLINE;
+		}
+	}
+	cdev->private->cmb_wait = NULL;
+	ret = set_data.ret;
+out:
+	spin_unlock_irq(cdev->ccwlock);
+	return ret;
+}
+
+void retry_set_schib(struct ccw_device *cdev)
+{
+	struct set_schib_struct *set_data = cdev->private->cmb_wait;
+
+	if (!set_data)
+		return;
+
+	set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
+				  set_data->address);
+	wake_up(&set_data->wait);
+}
+
+static int cmf_copy_block(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct cmb_data *cmb_data;
+	void *hw_block;
+
+	if (cio_update_schib(sch))
+		return -ENODEV;
+
+	if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
+		/* Don't copy if a start function is in progress. */
+		if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
+		    (scsw_actl(&sch->schib.scsw) &
+		     (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
+		    (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
+			return -EBUSY;
+	}
+	cmb_data = cdev->private->cmb;
+	hw_block = cmb_data->hw_block;
+	memcpy(cmb_data->last_block, hw_block, cmb_data->size);
+	cmb_data->last_update = get_tod_clock();
+	return 0;
+}
+
+struct copy_block_struct {
+	wait_queue_head_t wait;
+	int ret;
+};
+
+static int cmf_cmb_copy_wait(struct ccw_device *cdev)
+{
+	struct copy_block_struct copy_block;
+	int ret = -ENODEV;
+
+	spin_lock_irq(cdev->ccwlock);
+	if (!cdev->private->cmb)
+		goto out;
+
+	ret = cmf_copy_block(cdev);
+	if (ret != -EBUSY)
+		goto out;
+
+	if (cdev->private->state != DEV_STATE_ONLINE)
+		goto out;
+
+	init_waitqueue_head(&copy_block.wait);
+	copy_block.ret = CMF_PENDING;
+
+	cdev->private->state = DEV_STATE_CMFUPDATE;
+	cdev->private->cmb_wait = &copy_block;
+	spin_unlock_irq(cdev->ccwlock);
+
+	ret = wait_event_interruptible(copy_block.wait,
+				       copy_block.ret != CMF_PENDING);
+	spin_lock_irq(cdev->ccwlock);
+	if (ret) {
+		if (copy_block.ret == CMF_PENDING) {
+			copy_block.ret = -ERESTARTSYS;
+			if (cdev->private->state == DEV_STATE_CMFUPDATE)
+				cdev->private->state = DEV_STATE_ONLINE;
+		}
+	}
+	cdev->private->cmb_wait = NULL;
+	ret = copy_block.ret;
+out:
+	spin_unlock_irq(cdev->ccwlock);
+	return ret;
+}
+
+void cmf_retry_copy_block(struct ccw_device *cdev)
+{
+	struct copy_block_struct *copy_block = cdev->private->cmb_wait;
+
+	if (!copy_block)
+		return;
+
+	copy_block->ret = cmf_copy_block(cdev);
+	wake_up(&copy_block->wait);
+}
+
+static void cmf_generic_reset(struct ccw_device *cdev)
+{
+	struct cmb_data *cmb_data;
+
+	spin_lock_irq(cdev->ccwlock);
+	cmb_data = cdev->private->cmb;
+	if (cmb_data) {
+		memset(cmb_data->last_block, 0, cmb_data->size);
+		/*
+		 * Need to reset hw block as well to make the hardware start
+		 * from 0 again.
+		 */
+		memset(cmb_data->hw_block, 0, cmb_data->size);
+		cmb_data->last_update = 0;
+	}
+	cdev->private->cmb_start_time = get_tod_clock();
+	spin_unlock_irq(cdev->ccwlock);
+}
+
+/**
+ * struct cmb_area - container for global cmb data
+ *
+ * @mem:	pointer to CMBs (only in basic measurement mode)
+ * @list:	contains a linked list of all subchannels
+ * @num_channels: number of channels to be measured
+ * @lock:	protect concurrent access to @mem and @list
+ */
+struct cmb_area {
+	struct cmb *mem;
+	struct list_head list;
+	int num_channels;
+	spinlock_t lock;
+};
+
+static struct cmb_area cmb_area = {
+	.lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
+	.list = LIST_HEAD_INIT(cmb_area.list),
+	.num_channels  = 1024,
+};
+
+/* ****** old style CMB handling ********/
+
+/*
+ * Basic channel measurement blocks are allocated in one contiguous
+ * block of memory, which can not be moved as long as any channel
+ * is active. Therefore, a maximum number of subchannels needs to
+ * be defined somewhere. This is a module parameter, defaulting to
+ * a reasonable value of 1024, or 32 kb of memory.
+ * Current kernels don't allow kmalloc with more than 128kb, so the
+ * maximum is 4096.
+ */
+
+module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
+
+/**
+ * struct cmb - basic channel measurement block
+ * @ssch_rsch_count: number of ssch and rsch
+ * @sample_count: number of samples
+ * @device_connect_time: time of device connect
+ * @function_pending_time: time of function pending
+ * @device_disconnect_time: time of device disconnect
+ * @control_unit_queuing_time: time of control unit queuing
+ * @device_active_only_time: time of device active only
+ * @reserved: unused in basic measurement mode
+ *
+ * The measurement block as used by the hardware. The fields are described
+ * further in z/Architecture Principles of Operation, chapter 17.
+ *
+ * The cmb area made up from these blocks must be a contiguous array and may
+ * not be reallocated or freed.
+ * Only one cmb area can be present in the system.
+ */
+struct cmb {
+	u16 ssch_rsch_count;
+	u16 sample_count;
+	u32 device_connect_time;
+	u32 function_pending_time;
+	u32 device_disconnect_time;
+	u32 control_unit_queuing_time;
+	u32 device_active_only_time;
+	u32 reserved[2];
+};
+
+/*
+ * Insert a single device into the cmb_area list.
+ * Called with cmb_area.lock held from alloc_cmb.
+ */
+static int alloc_cmb_single(struct ccw_device *cdev,
+			    struct cmb_data *cmb_data)
+{
+	struct cmb *cmb;
+	struct ccw_device_private *node;
+	int ret;
+
+	spin_lock_irq(cdev->ccwlock);
+	if (!list_empty(&cdev->private->cmb_list)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/*
+	 * Find first unused cmb in cmb_area.mem.
+	 * This is a little tricky: cmb_area.list
+	 * remains sorted by ->cmb->hw_data pointers.
+	 */
+	cmb = cmb_area.mem;
+	list_for_each_entry(node, &cmb_area.list, cmb_list) {
+		struct cmb_data *data;
+		data = node->cmb;
+		if ((struct cmb*)data->hw_block > cmb)
+			break;
+		cmb++;
+	}
+	if (cmb - cmb_area.mem >= cmb_area.num_channels) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/* insert new cmb */
+	list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
+	cmb_data->hw_block = cmb;
+	cdev->private->cmb = cmb_data;
+	ret = 0;
+out:
+	spin_unlock_irq(cdev->ccwlock);
+	return ret;
+}
+
+static int alloc_cmb(struct ccw_device *cdev)
+{
+	int ret;
+	struct cmb *mem;
+	ssize_t size;
+	struct cmb_data *cmb_data;
+
+	/* Allocate private cmb_data. */
+	cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
+	if (!cmb_data)
+		return -ENOMEM;
+
+	cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
+	if (!cmb_data->last_block) {
+		kfree(cmb_data);
+		return -ENOMEM;
+	}
+	cmb_data->size = sizeof(struct cmb);
+	spin_lock(&cmb_area.lock);
+
+	if (!cmb_area.mem) {
+		/* there is no user yet, so we need a new area */
+		size = sizeof(struct cmb) * cmb_area.num_channels;
+		WARN_ON(!list_empty(&cmb_area.list));
+
+		spin_unlock(&cmb_area.lock);
+		mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
+				 get_order(size));
+		spin_lock(&cmb_area.lock);
+
+		if (cmb_area.mem) {
+			/* ok, another thread was faster */
+			free_pages((unsigned long)mem, get_order(size));
+		} else if (!mem) {
+			/* no luck */
+			ret = -ENOMEM;
+			goto out;
+		} else {
+			/* everything ok */
+			memset(mem, 0, size);
+			cmb_area.mem = mem;
+			cmf_activate(cmb_area.mem, CMF_ON);
+		}
+	}
+
+	/* do the actual allocation */
+	ret = alloc_cmb_single(cdev, cmb_data);
+out:
+	spin_unlock(&cmb_area.lock);
+	if (ret) {
+		kfree(cmb_data->last_block);
+		kfree(cmb_data);
+	}
+	return ret;
+}
+
+static void free_cmb(struct ccw_device *cdev)
+{
+	struct ccw_device_private *priv;
+	struct cmb_data *cmb_data;
+
+	spin_lock(&cmb_area.lock);
+	spin_lock_irq(cdev->ccwlock);
+
+	priv = cdev->private;
+	cmb_data = priv->cmb;
+	priv->cmb = NULL;
+	if (cmb_data)
+		kfree(cmb_data->last_block);
+	kfree(cmb_data);
+	list_del_init(&priv->cmb_list);
+
+	if (list_empty(&cmb_area.list)) {
+		ssize_t size;
+		size = sizeof(struct cmb) * cmb_area.num_channels;
+		cmf_activate(NULL, CMF_OFF);
+		free_pages((unsigned long)cmb_area.mem, get_order(size));
+		cmb_area.mem = NULL;
+	}
+	spin_unlock_irq(cdev->ccwlock);
+	spin_unlock(&cmb_area.lock);
+}
+
+static int set_cmb(struct ccw_device *cdev, u32 mme)
+{
+	u16 offset;
+	struct cmb_data *cmb_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(cdev->ccwlock, flags);
+	if (!cdev->private->cmb) {
+		spin_unlock_irqrestore(cdev->ccwlock, flags);
+		return -EINVAL;
+	}
+	cmb_data = cdev->private->cmb;
+	offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
+	spin_unlock_irqrestore(cdev->ccwlock, flags);
+
+	return set_schib_wait(cdev, mme, 0, offset);
+}
+
+/* calculate utilization in 0.1 percent units */
+static u64 __cmb_utilization(u64 device_connect_time, u64 function_pending_time,
+			     u64 device_disconnect_time, u64 start_time)
+{
+	u64 utilization, elapsed_time;
+
+	utilization = time_to_nsec(device_connect_time +
+				   function_pending_time +
+				   device_disconnect_time);
+
+	elapsed_time = get_tod_clock() - start_time;
+	elapsed_time = tod_to_ns(elapsed_time);
+	elapsed_time /= 1000;
+
+	return elapsed_time ? (utilization / elapsed_time) : 0;
+}
+
+static u64 read_cmb(struct ccw_device *cdev, int index)
+{
+	struct cmb_data *cmb_data;
+	unsigned long flags;
+	struct cmb *cmb;
+	u64 ret = 0;
+	u32 val;
+
+	spin_lock_irqsave(cdev->ccwlock, flags);
+	cmb_data = cdev->private->cmb;
+	if (!cmb_data)
+		goto out;
+
+	cmb = cmb_data->hw_block;
+	switch (index) {
+	case avg_utilization:
+		ret = __cmb_utilization(cmb->device_connect_time,
+					cmb->function_pending_time,
+					cmb->device_disconnect_time,
+					cdev->private->cmb_start_time);
+		goto out;
+	case cmb_ssch_rsch_count:
+		ret = cmb->ssch_rsch_count;
+		goto out;
+	case cmb_sample_count:
+		ret = cmb->sample_count;
+		goto out;
+	case cmb_device_connect_time:
+		val = cmb->device_connect_time;
+		break;
+	case cmb_function_pending_time:
+		val = cmb->function_pending_time;
+		break;
+	case cmb_device_disconnect_time:
+		val = cmb->device_disconnect_time;
+		break;
+	case cmb_control_unit_queuing_time:
+		val = cmb->control_unit_queuing_time;
+		break;
+	case cmb_device_active_only_time:
+		val = cmb->device_active_only_time;
+		break;
+	default:
+		goto out;
+	}
+	ret = time_to_avg_nsec(val, cmb->sample_count);
+out:
+	spin_unlock_irqrestore(cdev->ccwlock, flags);
+	return ret;
+}
+
+static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
+{
+	struct cmb *cmb;
+	struct cmb_data *cmb_data;
+	u64 time;
+	unsigned long flags;
+	int ret;
+
+	ret = cmf_cmb_copy_wait(cdev);
+	if (ret < 0)
+		return ret;
+	spin_lock_irqsave(cdev->ccwlock, flags);
+	cmb_data = cdev->private->cmb;
+	if (!cmb_data) {
+		ret = -ENODEV;
+		goto out;
+	}
+	if (cmb_data->last_update == 0) {
+		ret = -EAGAIN;
+		goto out;
+	}
+	cmb = cmb_data->last_block;
+	time = cmb_data->last_update - cdev->private->cmb_start_time;
+
+	memset(data, 0, sizeof(struct cmbdata));
+
+	/* we only know values before device_busy_time */
+	data->size = offsetof(struct cmbdata, device_busy_time);
+
+	data->elapsed_time = tod_to_ns(time);
+
+	/* copy data to new structure */
+	data->ssch_rsch_count = cmb->ssch_rsch_count;
+	data->sample_count = cmb->sample_count;
+
+	/* time fields are converted to nanoseconds while copying */
+	data->device_connect_time = time_to_nsec(cmb->device_connect_time);
+	data->function_pending_time = time_to_nsec(cmb->function_pending_time);
+	data->device_disconnect_time =
+		time_to_nsec(cmb->device_disconnect_time);
+	data->control_unit_queuing_time
+		= time_to_nsec(cmb->control_unit_queuing_time);
+	data->device_active_only_time
+		= time_to_nsec(cmb->device_active_only_time);
+	ret = 0;
+out:
+	spin_unlock_irqrestore(cdev->ccwlock, flags);
+	return ret;
+}
+
+static void reset_cmb(struct ccw_device *cdev)
+{
+	cmf_generic_reset(cdev);
+}
+
+static int cmf_enabled(struct ccw_device *cdev)
+{
+	int enabled;
+
+	spin_lock_irq(cdev->ccwlock);
+	enabled = !!cdev->private->cmb;
+	spin_unlock_irq(cdev->ccwlock);
+
+	return enabled;
+}
+
+static struct attribute_group cmf_attr_group;
+
+static struct cmb_operations cmbops_basic = {
+	.alloc	= alloc_cmb,
+	.free	= free_cmb,
+	.set	= set_cmb,
+	.read	= read_cmb,
+	.readall    = readall_cmb,
+	.reset	    = reset_cmb,
+	.attr_group = &cmf_attr_group,
+};
+
+/* ******** extended cmb handling ********/
+
+/**
+ * struct cmbe - extended channel measurement block
+ * @ssch_rsch_count: number of ssch and rsch
+ * @sample_count: number of samples
+ * @device_connect_time: time of device connect
+ * @function_pending_time: time of function pending
+ * @device_disconnect_time: time of device disconnect
+ * @control_unit_queuing_time: time of control unit queuing
+ * @device_active_only_time: time of device active only
+ * @device_busy_time: time of device busy
+ * @initial_command_response_time: initial command response time
+ * @reserved: unused
+ *
+ * The measurement block as used by the hardware. May be in any 64 bit physical
+ * location.
+ * The fields are described further in z/Architecture Principles of Operation,
+ * third edition, chapter 17.
+ */
+struct cmbe {
+	u32 ssch_rsch_count;
+	u32 sample_count;
+	u32 device_connect_time;
+	u32 function_pending_time;
+	u32 device_disconnect_time;
+	u32 control_unit_queuing_time;
+	u32 device_active_only_time;
+	u32 device_busy_time;
+	u32 initial_command_response_time;
+	u32 reserved[7];
+} __packed __aligned(64);
+
+static struct kmem_cache *cmbe_cache;
+
+static int alloc_cmbe(struct ccw_device *cdev)
+{
+	struct cmb_data *cmb_data;
+	struct cmbe *cmbe;
+	int ret = -ENOMEM;
+
+	cmbe = kmem_cache_zalloc(cmbe_cache, GFP_KERNEL);
+	if (!cmbe)
+		return ret;
+
+	cmb_data = kzalloc(sizeof(*cmb_data), GFP_KERNEL);
+	if (!cmb_data)
+		goto out_free;
+
+	cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
+	if (!cmb_data->last_block)
+		goto out_free;
+
+	cmb_data->size = sizeof(*cmbe);
+	cmb_data->hw_block = cmbe;
+
+	spin_lock(&cmb_area.lock);
+	spin_lock_irq(cdev->ccwlock);
+	if (cdev->private->cmb)
+		goto out_unlock;
+
+	cdev->private->cmb = cmb_data;
+
+	/* activate global measurement if this is the first channel */
+	if (list_empty(&cmb_area.list))
+		cmf_activate(NULL, CMF_ON);
+	list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
+
+	spin_unlock_irq(cdev->ccwlock);
+	spin_unlock(&cmb_area.lock);
+	return 0;
+
+out_unlock:
+	spin_unlock_irq(cdev->ccwlock);
+	spin_unlock(&cmb_area.lock);
+	ret = -EBUSY;
+out_free:
+	if (cmb_data)
+		kfree(cmb_data->last_block);
+	kfree(cmb_data);
+	kmem_cache_free(cmbe_cache, cmbe);
+
+	return ret;
+}
+
+static void free_cmbe(struct ccw_device *cdev)
+{
+	struct cmb_data *cmb_data;
+
+	spin_lock(&cmb_area.lock);
+	spin_lock_irq(cdev->ccwlock);
+	cmb_data = cdev->private->cmb;
+	cdev->private->cmb = NULL;
+	if (cmb_data) {
+		kfree(cmb_data->last_block);
+		kmem_cache_free(cmbe_cache, cmb_data->hw_block);
+	}
+	kfree(cmb_data);
+
+	/* deactivate global measurement if this is the last channel */
+	list_del_init(&cdev->private->cmb_list);
+	if (list_empty(&cmb_area.list))
+		cmf_activate(NULL, CMF_OFF);
+	spin_unlock_irq(cdev->ccwlock);
+	spin_unlock(&cmb_area.lock);
+}
+
+static int set_cmbe(struct ccw_device *cdev, u32 mme)
+{
+	unsigned long mba;
+	struct cmb_data *cmb_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(cdev->ccwlock, flags);
+	if (!cdev->private->cmb) {
+		spin_unlock_irqrestore(cdev->ccwlock, flags);
+		return -EINVAL;
+	}
+	cmb_data = cdev->private->cmb;
+	mba = mme ? (unsigned long) cmb_data->hw_block : 0;
+	spin_unlock_irqrestore(cdev->ccwlock, flags);
+
+	return set_schib_wait(cdev, mme, 1, mba);
+}
+
+static u64 read_cmbe(struct ccw_device *cdev, int index)
+{
+	struct cmb_data *cmb_data;
+	unsigned long flags;
+	struct cmbe *cmb;
+	u64 ret = 0;
+	u32 val;
+
+	spin_lock_irqsave(cdev->ccwlock, flags);
+	cmb_data = cdev->private->cmb;
+	if (!cmb_data)
+		goto out;
+
+	cmb = cmb_data->hw_block;
+	switch (index) {
+	case avg_utilization:
+		ret = __cmb_utilization(cmb->device_connect_time,
+					cmb->function_pending_time,
+					cmb->device_disconnect_time,
+					cdev->private->cmb_start_time);
+		goto out;
+	case cmb_ssch_rsch_count:
+		ret = cmb->ssch_rsch_count;
+		goto out;
+	case cmb_sample_count:
+		ret = cmb->sample_count;
+		goto out;
+	case cmb_device_connect_time:
+		val = cmb->device_connect_time;
+		break;
+	case cmb_function_pending_time:
+		val = cmb->function_pending_time;
+		break;
+	case cmb_device_disconnect_time:
+		val = cmb->device_disconnect_time;
+		break;
+	case cmb_control_unit_queuing_time:
+		val = cmb->control_unit_queuing_time;
+		break;
+	case cmb_device_active_only_time:
+		val = cmb->device_active_only_time;
+		break;
+	case cmb_device_busy_time:
+		val = cmb->device_busy_time;
+		break;
+	case cmb_initial_command_response_time:
+		val = cmb->initial_command_response_time;
+		break;
+	default:
+		goto out;
+	}
+	ret = time_to_avg_nsec(val, cmb->sample_count);
+out:
+	spin_unlock_irqrestore(cdev->ccwlock, flags);
+	return ret;
+}
+
+static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
+{
+	struct cmbe *cmb;
+	struct cmb_data *cmb_data;
+	u64 time;
+	unsigned long flags;
+	int ret;
+
+	ret = cmf_cmb_copy_wait(cdev);
+	if (ret < 0)
+		return ret;
+	spin_lock_irqsave(cdev->ccwlock, flags);
+	cmb_data = cdev->private->cmb;
+	if (!cmb_data) {
+		ret = -ENODEV;
+		goto out;
+	}
+	if (cmb_data->last_update == 0) {
+		ret = -EAGAIN;
+		goto out;
+	}
+	time = cmb_data->last_update - cdev->private->cmb_start_time;
+
+	memset (data, 0, sizeof(struct cmbdata));
+
+	/* we only know values before device_busy_time */
+	data->size = offsetof(struct cmbdata, device_busy_time);
+
+	data->elapsed_time = tod_to_ns(time);
+
+	cmb = cmb_data->last_block;
+	/* copy data to new structure */
+	data->ssch_rsch_count = cmb->ssch_rsch_count;
+	data->sample_count = cmb->sample_count;
+
+	/* time fields are converted to nanoseconds while copying */
+	data->device_connect_time = time_to_nsec(cmb->device_connect_time);
+	data->function_pending_time = time_to_nsec(cmb->function_pending_time);
+	data->device_disconnect_time =
+		time_to_nsec(cmb->device_disconnect_time);
+	data->control_unit_queuing_time
+		= time_to_nsec(cmb->control_unit_queuing_time);
+	data->device_active_only_time
+		= time_to_nsec(cmb->device_active_only_time);
+	data->device_busy_time = time_to_nsec(cmb->device_busy_time);
+	data->initial_command_response_time
+		= time_to_nsec(cmb->initial_command_response_time);
+
+	ret = 0;
+out:
+	spin_unlock_irqrestore(cdev->ccwlock, flags);
+	return ret;
+}
+
+static void reset_cmbe(struct ccw_device *cdev)
+{
+	cmf_generic_reset(cdev);
+}
+
+static struct attribute_group cmf_attr_group_ext;
+
+static struct cmb_operations cmbops_extended = {
+	.alloc	    = alloc_cmbe,
+	.free	    = free_cmbe,
+	.set	    = set_cmbe,
+	.read	    = read_cmbe,
+	.readall    = readall_cmbe,
+	.reset	    = reset_cmbe,
+	.attr_group = &cmf_attr_group_ext,
+};
+
+static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
+{
+	return sprintf(buf, "%lld\n",
+		(unsigned long long) cmf_read(to_ccwdev(dev), idx));
+}
+
+static ssize_t cmb_show_avg_sample_interval(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	unsigned long count;
+	long interval;
+
+	count = cmf_read(cdev, cmb_sample_count);
+	spin_lock_irq(cdev->ccwlock);
+	if (count) {
+		interval = get_tod_clock() - cdev->private->cmb_start_time;
+		interval = tod_to_ns(interval);
+		interval /= count;
+	} else
+		interval = -1;
+	spin_unlock_irq(cdev->ccwlock);
+	return sprintf(buf, "%ld\n", interval);
+}
+
+static ssize_t cmb_show_avg_utilization(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	unsigned long u = cmf_read(to_ccwdev(dev), avg_utilization);
+
+	return sprintf(buf, "%02lu.%01lu%%\n", u / 10, u % 10);
+}
+
+#define cmf_attr(name) \
+static ssize_t show_##name(struct device *dev, \
+			   struct device_attribute *attr, char *buf)	\
+{ return cmb_show_attr((dev), buf, cmb_##name); } \
+static DEVICE_ATTR(name, 0444, show_##name, NULL);
+
+#define cmf_attr_avg(name) \
+static ssize_t show_avg_##name(struct device *dev, \
+			       struct device_attribute *attr, char *buf) \
+{ return cmb_show_attr((dev), buf, cmb_##name); } \
+static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
+
+cmf_attr(ssch_rsch_count);
+cmf_attr(sample_count);
+cmf_attr_avg(device_connect_time);
+cmf_attr_avg(function_pending_time);
+cmf_attr_avg(device_disconnect_time);
+cmf_attr_avg(control_unit_queuing_time);
+cmf_attr_avg(device_active_only_time);
+cmf_attr_avg(device_busy_time);
+cmf_attr_avg(initial_command_response_time);
+
+static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval,
+		   NULL);
+static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
+
+static struct attribute *cmf_attributes[] = {
+	&dev_attr_avg_sample_interval.attr,
+	&dev_attr_avg_utilization.attr,
+	&dev_attr_ssch_rsch_count.attr,
+	&dev_attr_sample_count.attr,
+	&dev_attr_avg_device_connect_time.attr,
+	&dev_attr_avg_function_pending_time.attr,
+	&dev_attr_avg_device_disconnect_time.attr,
+	&dev_attr_avg_control_unit_queuing_time.attr,
+	&dev_attr_avg_device_active_only_time.attr,
+	NULL,
+};
+
+static struct attribute_group cmf_attr_group = {
+	.name  = "cmf",
+	.attrs = cmf_attributes,
+};
+
+static struct attribute *cmf_attributes_ext[] = {
+	&dev_attr_avg_sample_interval.attr,
+	&dev_attr_avg_utilization.attr,
+	&dev_attr_ssch_rsch_count.attr,
+	&dev_attr_sample_count.attr,
+	&dev_attr_avg_device_connect_time.attr,
+	&dev_attr_avg_function_pending_time.attr,
+	&dev_attr_avg_device_disconnect_time.attr,
+	&dev_attr_avg_control_unit_queuing_time.attr,
+	&dev_attr_avg_device_active_only_time.attr,
+	&dev_attr_avg_device_busy_time.attr,
+	&dev_attr_avg_initial_command_response_time.attr,
+	NULL,
+};
+
+static struct attribute_group cmf_attr_group_ext = {
+	.name  = "cmf",
+	.attrs = cmf_attributes_ext,
+};
+
+static ssize_t cmb_enable_show(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+
+	return sprintf(buf, "%d\n", cmf_enabled(cdev));
+}
+
+static ssize_t cmb_enable_store(struct device *dev,
+				struct device_attribute *attr, const char *buf,
+				size_t c)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	unsigned long val;
+	int ret;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+
+	switch (val) {
+	case 0:
+		ret = disable_cmf(cdev);
+		break;
+	case 1:
+		ret = enable_cmf(cdev);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret ? ret : c;
+}
+DEVICE_ATTR_RW(cmb_enable);
+
+int ccw_set_cmf(struct ccw_device *cdev, int enable)
+{
+	return cmbops->set(cdev, enable ? 2 : 0);
+}
+
+/**
+ * enable_cmf() - switch on the channel measurement for a specific device
+ *  @cdev:	The ccw device to be enabled
+ *
+ *  Enable channel measurements for @cdev. If this is called on a device
+ *  for which channel measurement is already enabled a reset of the
+ *  measurement data is triggered.
+ *  Returns: %0 for success or a negative error value.
+ *  Context:
+ *    non-atomic
+ */
+int enable_cmf(struct ccw_device *cdev)
+{
+	int ret = 0;
+
+	device_lock(&cdev->dev);
+	if (cmf_enabled(cdev)) {
+		cmbops->reset(cdev);
+		goto out_unlock;
+	}
+	get_device(&cdev->dev);
+	ret = cmbops->alloc(cdev);
+	if (ret)
+		goto out;
+	cmbops->reset(cdev);
+	ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
+	if (ret) {
+		cmbops->free(cdev);
+		goto out;
+	}
+	ret = cmbops->set(cdev, 2);
+	if (ret) {
+		sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
+		cmbops->free(cdev);
+	}
+out:
+	if (ret)
+		put_device(&cdev->dev);
+out_unlock:
+	device_unlock(&cdev->dev);
+	return ret;
+}
+
+/**
+ * __disable_cmf() - switch off the channel measurement for a specific device
+ *  @cdev:	The ccw device to be disabled
+ *
+ *  Returns: %0 for success or a negative error value.
+ *
+ *  Context:
+ *    non-atomic, device_lock() held.
+ */
+int __disable_cmf(struct ccw_device *cdev)
+{
+	int ret;
+
+	ret = cmbops->set(cdev, 0);
+	if (ret)
+		return ret;
+
+	sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
+	cmbops->free(cdev);
+	put_device(&cdev->dev);
+
+	return ret;
+}
+
+/**
+ * disable_cmf() - switch off the channel measurement for a specific device
+ *  @cdev:	The ccw device to be disabled
+ *
+ *  Returns: %0 for success or a negative error value.
+ *
+ *  Context:
+ *    non-atomic
+ */
+int disable_cmf(struct ccw_device *cdev)
+{
+	int ret;
+
+	device_lock(&cdev->dev);
+	ret = __disable_cmf(cdev);
+	device_unlock(&cdev->dev);
+
+	return ret;
+}
+
+/**
+ * cmf_read() - read one value from the current channel measurement block
+ * @cdev:	the channel to be read
+ * @index:	the index of the value to be read
+ *
+ * Returns: The value read or %0 if the value cannot be read.
+ *
+ *  Context:
+ *    any
+ */
+u64 cmf_read(struct ccw_device *cdev, int index)
+{
+	return cmbops->read(cdev, index);
+}
+
+/**
+ * cmf_readall() - read the current channel measurement block
+ * @cdev:	the channel to be read
+ * @data:	a pointer to a data block that will be filled
+ *
+ * Returns: %0 on success, a negative error value otherwise.
+ *
+ *  Context:
+ *    any
+ */
+int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
+{
+	return cmbops->readall(cdev, data);
+}
+
+/* Reenable cmf when a disconnected device becomes available again. */
+int cmf_reenable(struct ccw_device *cdev)
+{
+	cmbops->reset(cdev);
+	return cmbops->set(cdev, 2);
+}
+
+/**
+ * cmf_reactivate() - reactivate measurement block updates
+ *
+ * Use this during resume from hibernate.
+ */
+void cmf_reactivate(void)
+{
+	spin_lock(&cmb_area.lock);
+	if (!list_empty(&cmb_area.list))
+		cmf_activate(cmb_area.mem, CMF_ON);
+	spin_unlock(&cmb_area.lock);
+}
+
+static int __init init_cmbe(void)
+{
+	cmbe_cache = kmem_cache_create("cmbe_cache", sizeof(struct cmbe),
+				       __alignof__(struct cmbe), 0, NULL);
+
+	return cmbe_cache ? 0 : -ENOMEM;
+}
+
+static int __init init_cmf(void)
+{
+	char *format_string;
+	char *detect_string;
+	int ret;
+
+	/*
+	 * If the user did not give a parameter, see if we are running on a
+	 * machine supporting extended measurement blocks, otherwise fall back
+	 * to basic mode.
+	 */
+	if (format == CMF_AUTODETECT) {
+		if (!css_general_characteristics.ext_mb) {
+			format = CMF_BASIC;
+		} else {
+			format = CMF_EXTENDED;
+		}
+		detect_string = "autodetected";
+	} else {
+		detect_string = "parameter";
+	}
+
+	switch (format) {
+	case CMF_BASIC:
+		format_string = "basic";
+		cmbops = &cmbops_basic;
+		break;
+	case CMF_EXTENDED:
+		format_string = "extended";
+		cmbops = &cmbops_extended;
+
+		ret = init_cmbe();
+		if (ret)
+			return ret;
+		break;
+	default:
+		return -EINVAL;
+	}
+	pr_info("Channel measurement facility initialized using format "
+		"%s (mode %s)\n", format_string, detect_string);
+	return 0;
+}
+device_initcall(init_cmf);
+
+EXPORT_SYMBOL_GPL(enable_cmf);
+EXPORT_SYMBOL_GPL(disable_cmf);
+EXPORT_SYMBOL_GPL(cmf_read);
+EXPORT_SYMBOL_GPL(cmf_readall);
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
new file mode 100644
index 0000000..fc285ca
--- /dev/null
+++ b/drivers/s390/cio/crw.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *   Channel report handling code
+ *
+ *    Copyright IBM Corp. 2000, 2009
+ *    Author(s): Ingo Adlung <adlung@de.ibm.com>,
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ *		 Cornelia Huck <cornelia.huck@de.ibm.com>,
+ *		 Heiko Carstens <heiko.carstens@de.ibm.com>,
+ */
+
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <asm/crw.h>
+#include <asm/ctl_reg.h>
+#include "ioasm.h"
+
+static DEFINE_MUTEX(crw_handler_mutex);
+static crw_handler_t crw_handlers[NR_RSCS];
+static atomic_t crw_nr_req = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q);
+
+/**
+ * crw_register_handler() - register a channel report word handler
+ * @rsc: reporting source code to handle
+ * @handler: handler to be registered
+ *
+ * Returns %0 on success and a negative error value otherwise.
+ */
+int crw_register_handler(int rsc, crw_handler_t handler)
+{
+	int rc = 0;
+
+	if ((rsc < 0) || (rsc >= NR_RSCS))
+		return -EINVAL;
+	mutex_lock(&crw_handler_mutex);
+	if (crw_handlers[rsc])
+		rc = -EBUSY;
+	else
+		crw_handlers[rsc] = handler;
+	mutex_unlock(&crw_handler_mutex);
+	return rc;
+}
+
+/**
+ * crw_unregister_handler() - unregister a channel report word handler
+ * @rsc: reporting source code to handle
+ */
+void crw_unregister_handler(int rsc)
+{
+	if ((rsc < 0) || (rsc >= NR_RSCS))
+		return;
+	mutex_lock(&crw_handler_mutex);
+	crw_handlers[rsc] = NULL;
+	mutex_unlock(&crw_handler_mutex);
+}
+
+/*
+ * Retrieve CRWs and call function to handle event.
+ */
+static int crw_collect_info(void *unused)
+{
+	struct crw crw[2];
+	int ccode, signal;
+	unsigned int chain;
+
+repeat:
+	signal = wait_event_interruptible(crw_handler_wait_q,
+					  atomic_read(&crw_nr_req) > 0);
+	if (unlikely(signal))
+		atomic_inc(&crw_nr_req);
+	chain = 0;
+	while (1) {
+		crw_handler_t handler;
+
+		if (unlikely(chain > 1)) {
+			struct crw tmp_crw;
+
+			printk(KERN_WARNING"%s: Code does not support more "
+			       "than two chained crws; please report to "
+			       "linux390@de.ibm.com!\n", __func__);
+			ccode = stcrw(&tmp_crw);
+			printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
+			       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+			       __func__, tmp_crw.slct, tmp_crw.oflw,
+			       tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
+			       tmp_crw.erc, tmp_crw.rsid);
+			printk(KERN_WARNING"%s: This was crw number %x in the "
+			       "chain\n", __func__, chain);
+			if (ccode != 0)
+				break;
+			chain = tmp_crw.chn ? chain + 1 : 0;
+			continue;
+		}
+		ccode = stcrw(&crw[chain]);
+		if (ccode != 0)
+			break;
+		printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
+		       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+		       crw[chain].slct, crw[chain].oflw, crw[chain].chn,
+		       crw[chain].rsc, crw[chain].anc, crw[chain].erc,
+		       crw[chain].rsid);
+		/* Check for overflows. */
+		if (crw[chain].oflw) {
+			int i;
+
+			pr_debug("%s: crw overflow detected!\n", __func__);
+			mutex_lock(&crw_handler_mutex);
+			for (i = 0; i < NR_RSCS; i++) {
+				if (crw_handlers[i])
+					crw_handlers[i](NULL, NULL, 1);
+			}
+			mutex_unlock(&crw_handler_mutex);
+			chain = 0;
+			continue;
+		}
+		if (crw[0].chn && !chain) {
+			chain++;
+			continue;
+		}
+		mutex_lock(&crw_handler_mutex);
+		handler = crw_handlers[crw[chain].rsc];
+		if (handler)
+			handler(&crw[0], chain ? &crw[1] : NULL, 0);
+		mutex_unlock(&crw_handler_mutex);
+		/* chain is always 0 or 1 here. */
+		chain = crw[chain].chn ? chain + 1 : 0;
+	}
+	if (atomic_dec_and_test(&crw_nr_req))
+		wake_up(&crw_handler_wait_q);
+	goto repeat;
+	return 0;
+}
+
+void crw_handle_channel_report(void)
+{
+	atomic_inc(&crw_nr_req);
+	wake_up(&crw_handler_wait_q);
+}
+
+void crw_wait_for_channel_report(void)
+{
+	crw_handle_channel_report();
+	wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0);
+}
+
+/*
+ * Machine checks for the channel subsystem must be enabled
+ * after the channel subsystem is initialized
+ */
+static int __init crw_machine_check_init(void)
+{
+	struct task_struct *task;
+
+	task = kthread_run(crw_collect_info, NULL, "kmcheck");
+	if (IS_ERR(task))
+		return PTR_ERR(task);
+	ctl_set_bit(14, 28);	/* enable channel report MCH */
+	return 0;
+}
+device_initcall(crw_machine_check_init);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
new file mode 100644
index 0000000..aea5029
--- /dev/null
+++ b/drivers/s390/cio/css.c
@@ -0,0 +1,1380 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * driver for channel subsystem
+ *
+ * Copyright IBM Corp. 2002, 2010
+ *
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ *	      Cornelia Huck (cornelia.huck@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+#include <linux/proc_fs.h>
+#include <asm/isc.h>
+#include <asm/crw.h>
+
+#include "css.h"
+#include "cio.h"
+#include "blacklist.h"
+#include "cio_debug.h"
+#include "ioasm.h"
+#include "chsc.h"
+#include "device.h"
+#include "idset.h"
+#include "chp.h"
+
+int css_init_done = 0;
+int max_ssid;
+
+#define MAX_CSS_IDX 0
+struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
+static struct bus_type css_bus_type;
+
+int
+for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
+{
+	struct subchannel_id schid;
+	int ret;
+
+	init_subchannel_id(&schid);
+	do {
+		do {
+			ret = fn(schid, data);
+			if (ret)
+				break;
+		} while (schid.sch_no++ < __MAX_SUBCHANNEL);
+		schid.sch_no = 0;
+	} while (schid.ssid++ < max_ssid);
+	return ret;
+}
+
+struct cb_data {
+	void *data;
+	struct idset *set;
+	int (*fn_known_sch)(struct subchannel *, void *);
+	int (*fn_unknown_sch)(struct subchannel_id, void *);
+};
+
+static int call_fn_known_sch(struct device *dev, void *data)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct cb_data *cb = data;
+	int rc = 0;
+
+	if (cb->set)
+		idset_sch_del(cb->set, sch->schid);
+	if (cb->fn_known_sch)
+		rc = cb->fn_known_sch(sch, cb->data);
+	return rc;
+}
+
+static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
+{
+	struct cb_data *cb = data;
+	int rc = 0;
+
+	if (idset_sch_contains(cb->set, schid))
+		rc = cb->fn_unknown_sch(schid, cb->data);
+	return rc;
+}
+
+static int call_fn_all_sch(struct subchannel_id schid, void *data)
+{
+	struct cb_data *cb = data;
+	struct subchannel *sch;
+	int rc = 0;
+
+	sch = get_subchannel_by_schid(schid);
+	if (sch) {
+		if (cb->fn_known_sch)
+			rc = cb->fn_known_sch(sch, cb->data);
+		put_device(&sch->dev);
+	} else {
+		if (cb->fn_unknown_sch)
+			rc = cb->fn_unknown_sch(schid, cb->data);
+	}
+
+	return rc;
+}
+
+int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
+			       int (*fn_unknown)(struct subchannel_id,
+			       void *), void *data)
+{
+	struct cb_data cb;
+	int rc;
+
+	cb.data = data;
+	cb.fn_known_sch = fn_known;
+	cb.fn_unknown_sch = fn_unknown;
+
+	if (fn_known && !fn_unknown) {
+		/* Skip idset allocation in case of known-only loop. */
+		cb.set = NULL;
+		return bus_for_each_dev(&css_bus_type, NULL, &cb,
+					call_fn_known_sch);
+	}
+
+	cb.set = idset_sch_new();
+	if (!cb.set)
+		/* fall back to brute force scanning in case of oom */
+		return for_each_subchannel(call_fn_all_sch, &cb);
+
+	idset_fill(cb.set);
+
+	/* Process registered subchannels. */
+	rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
+	if (rc)
+		goto out;
+	/* Process unregistered subchannels. */
+	if (fn_unknown)
+		rc = for_each_subchannel(call_fn_unknown_sch, &cb);
+out:
+	idset_free(cb.set);
+
+	return rc;
+}
+
+static void css_sch_todo(struct work_struct *work);
+
+static int css_sch_create_locks(struct subchannel *sch)
+{
+	sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
+	if (!sch->lock)
+		return -ENOMEM;
+
+	spin_lock_init(sch->lock);
+	mutex_init(&sch->reg_mutex);
+
+	return 0;
+}
+
+static void css_subchannel_release(struct device *dev)
+{
+	struct subchannel *sch = to_subchannel(dev);
+
+	sch->config.intparm = 0;
+	cio_commit_config(sch);
+	kfree(sch->lock);
+	kfree(sch);
+}
+
+static int css_validate_subchannel(struct subchannel_id schid,
+				   struct schib *schib)
+{
+	int err;
+
+	switch (schib->pmcw.st) {
+	case SUBCHANNEL_TYPE_IO:
+	case SUBCHANNEL_TYPE_MSG:
+		if (!css_sch_is_valid(schib))
+			err = -ENODEV;
+		else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
+			CIO_MSG_EVENT(6, "Blacklisted device detected "
+				      "at devno %04X, subchannel set %x\n",
+				      schib->pmcw.dev, schid.ssid);
+			err = -ENODEV;
+		} else
+			err = 0;
+		break;
+	default:
+		err = 0;
+	}
+	if (err)
+		goto out;
+
+	CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
+		      schid.ssid, schid.sch_no, schib->pmcw.st);
+out:
+	return err;
+}
+
+struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
+					struct schib *schib)
+{
+	struct subchannel *sch;
+	int ret;
+
+	ret = css_validate_subchannel(schid, schib);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
+	if (!sch)
+		return ERR_PTR(-ENOMEM);
+
+	sch->schid = schid;
+	sch->schib = *schib;
+	sch->st = schib->pmcw.st;
+
+	ret = css_sch_create_locks(sch);
+	if (ret)
+		goto err;
+
+	INIT_WORK(&sch->todo_work, css_sch_todo);
+	sch->dev.release = &css_subchannel_release;
+	device_initialize(&sch->dev);
+	return sch;
+
+err:
+	kfree(sch);
+	return ERR_PTR(ret);
+}
+
+static int css_sch_device_register(struct subchannel *sch)
+{
+	int ret;
+
+	mutex_lock(&sch->reg_mutex);
+	dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
+		     sch->schid.sch_no);
+	ret = device_add(&sch->dev);
+	mutex_unlock(&sch->reg_mutex);
+	return ret;
+}
+
+/**
+ * css_sch_device_unregister - unregister a subchannel
+ * @sch: subchannel to be unregistered
+ */
+void css_sch_device_unregister(struct subchannel *sch)
+{
+	mutex_lock(&sch->reg_mutex);
+	if (device_is_registered(&sch->dev))
+		device_unregister(&sch->dev);
+	mutex_unlock(&sch->reg_mutex);
+}
+EXPORT_SYMBOL_GPL(css_sch_device_unregister);
+
+static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
+{
+	int i;
+	int mask;
+
+	memset(ssd, 0, sizeof(struct chsc_ssd_info));
+	ssd->path_mask = pmcw->pim;
+	for (i = 0; i < 8; i++) {
+		mask = 0x80 >> i;
+		if (pmcw->pim & mask) {
+			chp_id_init(&ssd->chpid[i]);
+			ssd->chpid[i].id = pmcw->chpid[i];
+		}
+	}
+}
+
+static void ssd_register_chpids(struct chsc_ssd_info *ssd)
+{
+	int i;
+	int mask;
+
+	for (i = 0; i < 8; i++) {
+		mask = 0x80 >> i;
+		if (ssd->path_mask & mask)
+			chp_new(ssd->chpid[i]);
+	}
+}
+
+void css_update_ssd_info(struct subchannel *sch)
+{
+	int ret;
+
+	ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
+	if (ret)
+		ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
+
+	ssd_register_chpids(&sch->ssd_info);
+}
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct subchannel *sch = to_subchannel(dev);
+
+	return sprintf(buf, "%01x\n", sch->st);
+}
+
+static DEVICE_ATTR_RO(type);
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct subchannel *sch = to_subchannel(dev);
+
+	return sprintf(buf, "css:t%01X\n", sch->st);
+}
+
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *subch_attrs[] = {
+	&dev_attr_type.attr,
+	&dev_attr_modalias.attr,
+	NULL,
+};
+
+static struct attribute_group subch_attr_group = {
+	.attrs = subch_attrs,
+};
+
+static const struct attribute_group *default_subch_attr_groups[] = {
+	&subch_attr_group,
+	NULL,
+};
+
+static ssize_t chpids_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct chsc_ssd_info *ssd = &sch->ssd_info;
+	ssize_t ret = 0;
+	int mask;
+	int chp;
+
+	for (chp = 0; chp < 8; chp++) {
+		mask = 0x80 >> chp;
+		if (ssd->path_mask & mask)
+			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
+		else
+			ret += sprintf(buf + ret, "00 ");
+	}
+	ret += sprintf(buf + ret, "\n");
+	return ret;
+}
+static DEVICE_ATTR_RO(chpids);
+
+static ssize_t pimpampom_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct pmcw *pmcw = &sch->schib.pmcw;
+
+	return sprintf(buf, "%02x %02x %02x\n",
+		       pmcw->pim, pmcw->pam, pmcw->pom);
+}
+static DEVICE_ATTR_RO(pimpampom);
+
+static struct attribute *io_subchannel_type_attrs[] = {
+	&dev_attr_chpids.attr,
+	&dev_attr_pimpampom.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(io_subchannel_type);
+
+static const struct device_type io_subchannel_type = {
+	.groups = io_subchannel_type_groups,
+};
+
+int css_register_subchannel(struct subchannel *sch)
+{
+	int ret;
+
+	/* Initialize the subchannel structure */
+	sch->dev.parent = &channel_subsystems[0]->device;
+	sch->dev.bus = &css_bus_type;
+	sch->dev.groups = default_subch_attr_groups;
+
+	if (sch->st == SUBCHANNEL_TYPE_IO)
+		sch->dev.type = &io_subchannel_type;
+
+	/*
+	 * We don't want to generate uevents for I/O subchannels that don't
+	 * have a working ccw device behind them since they will be
+	 * unregistered before they can be used anyway, so we delay the add
+	 * uevent until after device recognition was successful.
+	 * Note that we suppress the uevent for all subchannel types;
+	 * the subchannel driver can decide itself when it wants to inform
+	 * userspace of its existence.
+	 */
+	dev_set_uevent_suppress(&sch->dev, 1);
+	css_update_ssd_info(sch);
+	/* make it known to the system */
+	ret = css_sch_device_register(sch);
+	if (ret) {
+		CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
+			      sch->schid.ssid, sch->schid.sch_no, ret);
+		return ret;
+	}
+	if (!sch->driver) {
+		/*
+		 * No driver matched. Generate the uevent now so that
+		 * a fitting driver module may be loaded based on the
+		 * modalias.
+		 */
+		dev_set_uevent_suppress(&sch->dev, 0);
+		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+	}
+	return ret;
+}
+
+static int css_probe_device(struct subchannel_id schid, struct schib *schib)
+{
+	struct subchannel *sch;
+	int ret;
+
+	sch = css_alloc_subchannel(schid, schib);
+	if (IS_ERR(sch))
+		return PTR_ERR(sch);
+
+	ret = css_register_subchannel(sch);
+	if (ret)
+		put_device(&sch->dev);
+
+	return ret;
+}
+
+static int
+check_subchannel(struct device * dev, void * data)
+{
+	struct subchannel *sch;
+	struct subchannel_id *schid = data;
+
+	sch = to_subchannel(dev);
+	return schid_equal(&sch->schid, schid);
+}
+
+struct subchannel *
+get_subchannel_by_schid(struct subchannel_id schid)
+{
+	struct device *dev;
+
+	dev = bus_find_device(&css_bus_type, NULL,
+			      &schid, check_subchannel);
+
+	return dev ? to_subchannel(dev) : NULL;
+}
+
+/**
+ * css_sch_is_valid() - check if a subchannel is valid
+ * @schib: subchannel information block for the subchannel
+ */
+int css_sch_is_valid(struct schib *schib)
+{
+	if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
+		return 0;
+	if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
+		return 0;
+	return 1;
+}
+EXPORT_SYMBOL_GPL(css_sch_is_valid);
+
+static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
+{
+	struct schib schib;
+	int ccode;
+
+	if (!slow) {
+		/* Will be done on the slow path. */
+		return -EAGAIN;
+	}
+	/*
+	 * The first subchannel that is not-operational (ccode==3)
+	 * indicates that there aren't any more devices available.
+	 * If stsch gets an exception, it means the current subchannel set
+	 * is not valid.
+	 */
+	ccode = stsch(schid, &schib);
+	if (ccode)
+		return (ccode == 3) ? -ENXIO : ccode;
+
+	return css_probe_device(schid, &schib);
+}
+
+static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
+{
+	int ret = 0;
+
+	if (sch->driver) {
+		if (sch->driver->sch_event)
+			ret = sch->driver->sch_event(sch, slow);
+		else
+			dev_dbg(&sch->dev,
+				"Got subchannel machine check but "
+				"no sch_event handler provided.\n");
+	}
+	if (ret != 0 && ret != -EAGAIN) {
+		CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
+			      sch->schid.ssid, sch->schid.sch_no, ret);
+	}
+	return ret;
+}
+
+static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
+{
+	struct subchannel *sch;
+	int ret;
+
+	sch = get_subchannel_by_schid(schid);
+	if (sch) {
+		ret = css_evaluate_known_subchannel(sch, slow);
+		put_device(&sch->dev);
+	} else
+		ret = css_evaluate_new_subchannel(schid, slow);
+	if (ret == -EAGAIN)
+		css_schedule_eval(schid);
+}
+
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+	CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+		      sch->schid.ssid, sch->schid.sch_no, todo);
+	if (sch->todo >= todo)
+		return;
+	/* Get workqueue ref. */
+	if (!get_device(&sch->dev))
+		return;
+	sch->todo = todo;
+	if (!queue_work(cio_work_q, &sch->todo_work)) {
+		/* Already queued, release workqueue ref. */
+		put_device(&sch->dev);
+	}
+}
+EXPORT_SYMBOL_GPL(css_sched_sch_todo);
+
+static void css_sch_todo(struct work_struct *work)
+{
+	struct subchannel *sch;
+	enum sch_todo todo;
+	int ret;
+
+	sch = container_of(work, struct subchannel, todo_work);
+	/* Find out todo. */
+	spin_lock_irq(sch->lock);
+	todo = sch->todo;
+	CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+		      sch->schid.sch_no, todo);
+	sch->todo = SCH_TODO_NOTHING;
+	spin_unlock_irq(sch->lock);
+	/* Perform todo. */
+	switch (todo) {
+	case SCH_TODO_NOTHING:
+		break;
+	case SCH_TODO_EVAL:
+		ret = css_evaluate_known_subchannel(sch, 1);
+		if (ret == -EAGAIN) {
+			spin_lock_irq(sch->lock);
+			css_sched_sch_todo(sch, todo);
+			spin_unlock_irq(sch->lock);
+		}
+		break;
+	case SCH_TODO_UNREG:
+		css_sch_device_unregister(sch);
+		break;
+	}
+	/* Release workqueue ref. */
+	put_device(&sch->dev);
+}
+
+static struct idset *slow_subchannel_set;
+static spinlock_t slow_subchannel_lock;
+static wait_queue_head_t css_eval_wq;
+static atomic_t css_eval_scheduled;
+
+static int __init slow_subchannel_init(void)
+{
+	spin_lock_init(&slow_subchannel_lock);
+	atomic_set(&css_eval_scheduled, 0);
+	init_waitqueue_head(&css_eval_wq);
+	slow_subchannel_set = idset_sch_new();
+	if (!slow_subchannel_set) {
+		CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int slow_eval_known_fn(struct subchannel *sch, void *data)
+{
+	int eval;
+	int rc;
+
+	spin_lock_irq(&slow_subchannel_lock);
+	eval = idset_sch_contains(slow_subchannel_set, sch->schid);
+	idset_sch_del(slow_subchannel_set, sch->schid);
+	spin_unlock_irq(&slow_subchannel_lock);
+	if (eval) {
+		rc = css_evaluate_known_subchannel(sch, 1);
+		if (rc == -EAGAIN)
+			css_schedule_eval(sch->schid);
+	}
+	return 0;
+}
+
+static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
+{
+	int eval;
+	int rc = 0;
+
+	spin_lock_irq(&slow_subchannel_lock);
+	eval = idset_sch_contains(slow_subchannel_set, schid);
+	idset_sch_del(slow_subchannel_set, schid);
+	spin_unlock_irq(&slow_subchannel_lock);
+	if (eval) {
+		rc = css_evaluate_new_subchannel(schid, 1);
+		switch (rc) {
+		case -EAGAIN:
+			css_schedule_eval(schid);
+			rc = 0;
+			break;
+		case -ENXIO:
+		case -ENOMEM:
+		case -EIO:
+			/* These should abort looping */
+			spin_lock_irq(&slow_subchannel_lock);
+			idset_sch_del_subseq(slow_subchannel_set, schid);
+			spin_unlock_irq(&slow_subchannel_lock);
+			break;
+		default:
+			rc = 0;
+		}
+		/* Allow scheduling here since the containing loop might
+		 * take a while.  */
+		cond_resched();
+	}
+	return rc;
+}
+
+static void css_slow_path_func(struct work_struct *unused)
+{
+	unsigned long flags;
+
+	CIO_TRACE_EVENT(4, "slowpath");
+	for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
+				   NULL);
+	spin_lock_irqsave(&slow_subchannel_lock, flags);
+	if (idset_is_empty(slow_subchannel_set)) {
+		atomic_set(&css_eval_scheduled, 0);
+		wake_up(&css_eval_wq);
+	}
+	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
+static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
+struct workqueue_struct *cio_work_q;
+
+void css_schedule_eval(struct subchannel_id schid)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&slow_subchannel_lock, flags);
+	idset_sch_add(slow_subchannel_set, schid);
+	atomic_set(&css_eval_scheduled, 1);
+	queue_delayed_work(cio_work_q, &slow_path_work, 0);
+	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
+void css_schedule_eval_all(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&slow_subchannel_lock, flags);
+	idset_fill(slow_subchannel_set);
+	atomic_set(&css_eval_scheduled, 1);
+	queue_delayed_work(cio_work_q, &slow_path_work, 0);
+	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
+static int __unset_registered(struct device *dev, void *data)
+{
+	struct idset *set = data;
+	struct subchannel *sch = to_subchannel(dev);
+
+	idset_sch_del(set, sch->schid);
+	return 0;
+}
+
+void css_schedule_eval_all_unreg(unsigned long delay)
+{
+	unsigned long flags;
+	struct idset *unreg_set;
+
+	/* Find unregistered subchannels. */
+	unreg_set = idset_sch_new();
+	if (!unreg_set) {
+		/* Fallback. */
+		css_schedule_eval_all();
+		return;
+	}
+	idset_fill(unreg_set);
+	bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
+	/* Apply to slow_subchannel_set. */
+	spin_lock_irqsave(&slow_subchannel_lock, flags);
+	idset_add_set(slow_subchannel_set, unreg_set);
+	atomic_set(&css_eval_scheduled, 1);
+	queue_delayed_work(cio_work_q, &slow_path_work, delay);
+	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+	idset_free(unreg_set);
+}
+
+void css_wait_for_slow_path(void)
+{
+	flush_workqueue(cio_work_q);
+}
+
+/* Schedule reprobing of all unregistered subchannels. */
+void css_schedule_reprobe(void)
+{
+	/* Schedule with a delay to allow merging of subsequent calls. */
+	css_schedule_eval_all_unreg(1 * HZ);
+}
+EXPORT_SYMBOL_GPL(css_schedule_reprobe);
+
+/*
+ * Called from the machine check handler for subchannel report words.
+ */
+static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
+{
+	struct subchannel_id mchk_schid;
+	struct subchannel *sch;
+
+	if (overflow) {
+		css_schedule_eval_all();
+		return;
+	}
+	CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
+		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
+		      crw0->erc, crw0->rsid);
+	if (crw1)
+		CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
+			      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+			      crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
+			      crw1->anc, crw1->erc, crw1->rsid);
+	init_subchannel_id(&mchk_schid);
+	mchk_schid.sch_no = crw0->rsid;
+	if (crw1)
+		mchk_schid.ssid = (crw1->rsid >> 4) & 3;
+
+	if (crw0->erc == CRW_ERC_PMOD) {
+		sch = get_subchannel_by_schid(mchk_schid);
+		if (sch) {
+			css_update_ssd_info(sch);
+			put_device(&sch->dev);
+		}
+	}
+	/*
+	 * Since we are always presented with IPI in the CRW, we have to
+	 * use stsch() to find out if the subchannel in question has come
+	 * or gone.
+	 */
+	css_evaluate_subchannel(mchk_schid, 0);
+}
+
+static void __init
+css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
+{
+	struct cpuid cpu_id;
+
+	if (css_general_characteristics.mcss) {
+		css->global_pgid.pgid_high.ext_cssid.version = 0x80;
+		css->global_pgid.pgid_high.ext_cssid.cssid =
+			(css->cssid < 0) ? 0 : css->cssid;
+	} else {
+		css->global_pgid.pgid_high.cpu_addr = stap();
+	}
+	get_cpu_id(&cpu_id);
+	css->global_pgid.cpu_id = cpu_id.ident;
+	css->global_pgid.cpu_model = cpu_id.machine;
+	css->global_pgid.tod_high = tod_high;
+}
+
+static void channel_subsystem_release(struct device *dev)
+{
+	struct channel_subsystem *css = to_css(dev);
+
+	mutex_destroy(&css->mutex);
+	kfree(css);
+}
+
+static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
+			       char *buf)
+{
+	struct channel_subsystem *css = to_css(dev);
+
+	if (css->cssid < 0)
+		return -EINVAL;
+
+	return sprintf(buf, "%x\n", css->cssid);
+}
+static DEVICE_ATTR_RO(real_cssid);
+
+static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
+			      char *buf)
+{
+	struct channel_subsystem *css = to_css(dev);
+	int ret;
+
+	mutex_lock(&css->mutex);
+	ret = sprintf(buf, "%x\n", css->cm_enabled);
+	mutex_unlock(&css->mutex);
+	return ret;
+}
+
+static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
+			       const char *buf, size_t count)
+{
+	struct channel_subsystem *css = to_css(dev);
+	unsigned long val;
+	int ret;
+
+	ret = kstrtoul(buf, 16, &val);
+	if (ret)
+		return ret;
+	mutex_lock(&css->mutex);
+	switch (val) {
+	case 0:
+		ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
+		break;
+	case 1:
+		ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	mutex_unlock(&css->mutex);
+	return ret < 0 ? ret : count;
+}
+static DEVICE_ATTR_RW(cm_enable);
+
+static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
+			      int index)
+{
+	return css_chsc_characteristics.secm ? attr->mode : 0;
+}
+
+static struct attribute *cssdev_attrs[] = {
+	&dev_attr_real_cssid.attr,
+	NULL,
+};
+
+static struct attribute_group cssdev_attr_group = {
+	.attrs = cssdev_attrs,
+};
+
+static struct attribute *cssdev_cm_attrs[] = {
+	&dev_attr_cm_enable.attr,
+	NULL,
+};
+
+static struct attribute_group cssdev_cm_attr_group = {
+	.attrs = cssdev_cm_attrs,
+	.is_visible = cm_enable_mode,
+};
+
+static const struct attribute_group *cssdev_attr_groups[] = {
+	&cssdev_attr_group,
+	&cssdev_cm_attr_group,
+	NULL,
+};
+
+static int __init setup_css(int nr)
+{
+	struct channel_subsystem *css;
+	int ret;
+
+	css = kzalloc(sizeof(*css), GFP_KERNEL);
+	if (!css)
+		return -ENOMEM;
+
+	channel_subsystems[nr] = css;
+	dev_set_name(&css->device, "css%x", nr);
+	css->device.groups = cssdev_attr_groups;
+	css->device.release = channel_subsystem_release;
+
+	mutex_init(&css->mutex);
+	css->cssid = chsc_get_cssid(nr);
+	css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
+
+	ret = device_register(&css->device);
+	if (ret) {
+		put_device(&css->device);
+		goto out_err;
+	}
+
+	css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
+					 GFP_KERNEL);
+	if (!css->pseudo_subchannel) {
+		device_unregister(&css->device);
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	css->pseudo_subchannel->dev.parent = &css->device;
+	css->pseudo_subchannel->dev.release = css_subchannel_release;
+	mutex_init(&css->pseudo_subchannel->reg_mutex);
+	ret = css_sch_create_locks(css->pseudo_subchannel);
+	if (ret) {
+		kfree(css->pseudo_subchannel);
+		device_unregister(&css->device);
+		goto out_err;
+	}
+
+	dev_set_name(&css->pseudo_subchannel->dev, "defunct");
+	ret = device_register(&css->pseudo_subchannel->dev);
+	if (ret) {
+		put_device(&css->pseudo_subchannel->dev);
+		device_unregister(&css->device);
+		goto out_err;
+	}
+
+	return ret;
+out_err:
+	channel_subsystems[nr] = NULL;
+	return ret;
+}
+
+static int css_reboot_event(struct notifier_block *this,
+			    unsigned long event,
+			    void *ptr)
+{
+	struct channel_subsystem *css;
+	int ret;
+
+	ret = NOTIFY_DONE;
+	for_each_css(css) {
+		mutex_lock(&css->mutex);
+		if (css->cm_enabled)
+			if (chsc_secm(css, 0))
+				ret = NOTIFY_BAD;
+		mutex_unlock(&css->mutex);
+	}
+
+	return ret;
+}
+
+static struct notifier_block css_reboot_notifier = {
+	.notifier_call = css_reboot_event,
+};
+
+/*
+ * Since the css devices are neither on a bus nor have a class
+ * nor have a special device type, we cannot stop/restart channel
+ * path measurements via the normal suspend/resume callbacks, but have
+ * to use notifiers.
+ */
+static int css_power_event(struct notifier_block *this, unsigned long event,
+			   void *ptr)
+{
+	struct channel_subsystem *css;
+	int ret;
+
+	switch (event) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		ret = NOTIFY_DONE;
+		for_each_css(css) {
+			mutex_lock(&css->mutex);
+			if (!css->cm_enabled) {
+				mutex_unlock(&css->mutex);
+				continue;
+			}
+			ret = __chsc_do_secm(css, 0);
+			ret = notifier_from_errno(ret);
+			mutex_unlock(&css->mutex);
+		}
+		break;
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		ret = NOTIFY_DONE;
+		for_each_css(css) {
+			mutex_lock(&css->mutex);
+			if (!css->cm_enabled) {
+				mutex_unlock(&css->mutex);
+				continue;
+			}
+			ret = __chsc_do_secm(css, 1);
+			ret = notifier_from_errno(ret);
+			mutex_unlock(&css->mutex);
+		}
+		/* search for subchannels, which appeared during hibernation */
+		css_schedule_reprobe();
+		break;
+	default:
+		ret = NOTIFY_DONE;
+	}
+	return ret;
+
+}
+static struct notifier_block css_power_notifier = {
+	.notifier_call = css_power_event,
+};
+
+/*
+ * Now that the driver core is running, we can setup our channel subsystem.
+ * The struct subchannel's are created during probing.
+ */
+static int __init css_bus_init(void)
+{
+	int ret, i;
+
+	ret = chsc_init();
+	if (ret)
+		return ret;
+
+	chsc_determine_css_characteristics();
+	/* Try to enable MSS. */
+	ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
+	if (ret)
+		max_ssid = 0;
+	else /* Success. */
+		max_ssid = __MAX_SSID;
+
+	ret = slow_subchannel_init();
+	if (ret)
+		goto out;
+
+	ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
+	if (ret)
+		goto out;
+
+	if ((ret = bus_register(&css_bus_type)))
+		goto out;
+
+	/* Setup css structure. */
+	for (i = 0; i <= MAX_CSS_IDX; i++) {
+		ret = setup_css(i);
+		if (ret)
+			goto out_unregister;
+	}
+	ret = register_reboot_notifier(&css_reboot_notifier);
+	if (ret)
+		goto out_unregister;
+	ret = register_pm_notifier(&css_power_notifier);
+	if (ret) {
+		unregister_reboot_notifier(&css_reboot_notifier);
+		goto out_unregister;
+	}
+	css_init_done = 1;
+
+	/* Enable default isc for I/O subchannels. */
+	isc_register(IO_SCH_ISC);
+
+	return 0;
+out_unregister:
+	while (i-- > 0) {
+		struct channel_subsystem *css = channel_subsystems[i];
+		device_unregister(&css->pseudo_subchannel->dev);
+		device_unregister(&css->device);
+	}
+	bus_unregister(&css_bus_type);
+out:
+	crw_unregister_handler(CRW_RSC_SCH);
+	idset_free(slow_subchannel_set);
+	chsc_init_cleanup();
+	pr_alert("The CSS device driver initialization failed with "
+		 "errno=%d\n", ret);
+	return ret;
+}
+
+static void __init css_bus_cleanup(void)
+{
+	struct channel_subsystem *css;
+
+	for_each_css(css) {
+		device_unregister(&css->pseudo_subchannel->dev);
+		device_unregister(&css->device);
+	}
+	bus_unregister(&css_bus_type);
+	crw_unregister_handler(CRW_RSC_SCH);
+	idset_free(slow_subchannel_set);
+	chsc_init_cleanup();
+	isc_unregister(IO_SCH_ISC);
+}
+
+static int __init channel_subsystem_init(void)
+{
+	int ret;
+
+	ret = css_bus_init();
+	if (ret)
+		return ret;
+	cio_work_q = create_singlethread_workqueue("cio");
+	if (!cio_work_q) {
+		ret = -ENOMEM;
+		goto out_bus;
+	}
+	ret = io_subchannel_init();
+	if (ret)
+		goto out_wq;
+
+	/* Register subchannels which are already in use. */
+	cio_register_early_subchannels();
+	/* Start initial subchannel evaluation. */
+	css_schedule_eval_all();
+
+	return ret;
+out_wq:
+	destroy_workqueue(cio_work_q);
+out_bus:
+	css_bus_cleanup();
+	return ret;
+}
+subsys_initcall(channel_subsystem_init);
+
+static int css_settle(struct device_driver *drv, void *unused)
+{
+	struct css_driver *cssdrv = to_cssdriver(drv);
+
+	if (cssdrv->settle)
+		return cssdrv->settle();
+	return 0;
+}
+
+int css_complete_work(void)
+{
+	int ret;
+
+	/* Wait for the evaluation of subchannels to finish. */
+	ret = wait_event_interruptible(css_eval_wq,
+				       atomic_read(&css_eval_scheduled) == 0);
+	if (ret)
+		return -EINTR;
+	flush_workqueue(cio_work_q);
+	/* Wait for the subchannel type specific initialization to finish */
+	return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
+}
+
+
+/*
+ * Wait for the initialization of devices to finish, to make sure we are
+ * done with our setup if the search for the root device starts.
+ */
+static int __init channel_subsystem_init_sync(void)
+{
+	css_complete_work();
+	return 0;
+}
+subsys_initcall_sync(channel_subsystem_init_sync);
+
+void channel_subsystem_reinit(void)
+{
+	struct channel_path *chp;
+	struct chp_id chpid;
+
+	chsc_enable_facility(CHSC_SDA_OC_MSS);
+	chp_id_for_each(&chpid) {
+		chp = chpid_to_chp(chpid);
+		if (chp)
+			chp_update_desc(chp);
+	}
+	cmf_reactivate();
+}
+
+#ifdef CONFIG_PROC_FS
+static ssize_t cio_settle_write(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	int ret;
+
+	/* Handle pending CRW's. */
+	crw_wait_for_channel_report();
+	ret = css_complete_work();
+
+	return ret ? ret : count;
+}
+
+static const struct file_operations cio_settle_proc_fops = {
+	.open = nonseekable_open,
+	.write = cio_settle_write,
+	.llseek = no_llseek,
+};
+
+static int __init cio_settle_init(void)
+{
+	struct proc_dir_entry *entry;
+
+	entry = proc_create("cio_settle", S_IWUSR, NULL,
+			    &cio_settle_proc_fops);
+	if (!entry)
+		return -ENOMEM;
+	return 0;
+}
+device_initcall(cio_settle_init);
+#endif /*CONFIG_PROC_FS*/
+
+int sch_is_pseudo_sch(struct subchannel *sch)
+{
+	return sch == to_css(sch->dev.parent)->pseudo_subchannel;
+}
+
+static int css_bus_match(struct device *dev, struct device_driver *drv)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *driver = to_cssdriver(drv);
+	struct css_device_id *id;
+
+	for (id = driver->subchannel_type; id->match_flags; id++) {
+		if (sch->st == id->type)
+			return 1;
+	}
+
+	return 0;
+}
+
+static int css_probe(struct device *dev)
+{
+	struct subchannel *sch;
+	int ret;
+
+	sch = to_subchannel(dev);
+	sch->driver = to_cssdriver(dev->driver);
+	ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
+	if (ret)
+		sch->driver = NULL;
+	return ret;
+}
+
+static int css_remove(struct device *dev)
+{
+	struct subchannel *sch;
+	int ret;
+
+	sch = to_subchannel(dev);
+	ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
+	sch->driver = NULL;
+	return ret;
+}
+
+static void css_shutdown(struct device *dev)
+{
+	struct subchannel *sch;
+
+	sch = to_subchannel(dev);
+	if (sch->driver && sch->driver->shutdown)
+		sch->driver->shutdown(sch);
+}
+
+static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	int ret;
+
+	ret = add_uevent_var(env, "ST=%01X", sch->st);
+	if (ret)
+		return ret;
+	ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
+	return ret;
+}
+
+static int css_pm_prepare(struct device *dev)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *drv;
+
+	if (mutex_is_locked(&sch->reg_mutex))
+		return -EAGAIN;
+	if (!sch->dev.driver)
+		return 0;
+	drv = to_cssdriver(sch->dev.driver);
+	/* Notify drivers that they may not register children. */
+	return drv->prepare ? drv->prepare(sch) : 0;
+}
+
+static void css_pm_complete(struct device *dev)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *drv;
+
+	if (!sch->dev.driver)
+		return;
+	drv = to_cssdriver(sch->dev.driver);
+	if (drv->complete)
+		drv->complete(sch);
+}
+
+static int css_pm_freeze(struct device *dev)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *drv;
+
+	if (!sch->dev.driver)
+		return 0;
+	drv = to_cssdriver(sch->dev.driver);
+	return drv->freeze ? drv->freeze(sch) : 0;
+}
+
+static int css_pm_thaw(struct device *dev)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *drv;
+
+	if (!sch->dev.driver)
+		return 0;
+	drv = to_cssdriver(sch->dev.driver);
+	return drv->thaw ? drv->thaw(sch) : 0;
+}
+
+static int css_pm_restore(struct device *dev)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct css_driver *drv;
+
+	css_update_ssd_info(sch);
+	if (!sch->dev.driver)
+		return 0;
+	drv = to_cssdriver(sch->dev.driver);
+	return drv->restore ? drv->restore(sch) : 0;
+}
+
+static const struct dev_pm_ops css_pm_ops = {
+	.prepare = css_pm_prepare,
+	.complete = css_pm_complete,
+	.freeze = css_pm_freeze,
+	.thaw = css_pm_thaw,
+	.restore = css_pm_restore,
+};
+
+static struct bus_type css_bus_type = {
+	.name     = "css",
+	.match    = css_bus_match,
+	.probe    = css_probe,
+	.remove   = css_remove,
+	.shutdown = css_shutdown,
+	.uevent   = css_uevent,
+	.pm = &css_pm_ops,
+};
+
+/**
+ * css_driver_register - register a css driver
+ * @cdrv: css driver to register
+ *
+ * This is mainly a wrapper around driver_register that sets name
+ * and bus_type in the embedded struct device_driver correctly.
+ */
+int css_driver_register(struct css_driver *cdrv)
+{
+	cdrv->drv.bus = &css_bus_type;
+	return driver_register(&cdrv->drv);
+}
+EXPORT_SYMBOL_GPL(css_driver_register);
+
+/**
+ * css_driver_unregister - unregister a css driver
+ * @cdrv: css driver to unregister
+ *
+ * This is a wrapper around driver_unregister.
+ */
+void css_driver_unregister(struct css_driver *cdrv)
+{
+	driver_unregister(&cdrv->drv);
+}
+EXPORT_SYMBOL_GPL(css_driver_unregister);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
new file mode 100644
index 0000000..8d83290
--- /dev/null
+++ b/drivers/s390/cio/css.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CSS_H
+#define _CSS_H
+
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/types.h>
+
+#include <asm/cio.h>
+#include <asm/chpid.h>
+#include <asm/schid.h>
+
+#include "cio.h"
+
+/*
+ * path grouping stuff
+ */
+#define SPID_FUNC_SINGLE_PATH	   0x00
+#define SPID_FUNC_MULTI_PATH	   0x80
+#define SPID_FUNC_ESTABLISH	   0x00
+#define SPID_FUNC_RESIGN	   0x40
+#define SPID_FUNC_DISBAND	   0x20
+
+#define SNID_STATE1_RESET	   0
+#define SNID_STATE1_UNGROUPED	   2
+#define SNID_STATE1_GROUPED	   3
+
+#define SNID_STATE2_NOT_RESVD	   0
+#define SNID_STATE2_RESVD_ELSE	   2
+#define SNID_STATE2_RESVD_SELF	   3
+
+#define SNID_STATE3_MULTI_PATH	   1
+#define SNID_STATE3_SINGLE_PATH	   0
+
+struct path_state {
+	__u8  state1 : 2;	/* path state value 1 */
+	__u8  state2 : 2;	/* path state value 2 */
+	__u8  state3 : 1;	/* path state value 3 */
+	__u8  resvd  : 3;	/* reserved */
+} __attribute__ ((packed));
+
+struct extended_cssid {
+	u8 version;
+	u8 cssid;
+} __attribute__ ((packed));
+
+struct pgid {
+	union {
+		__u8 fc;   	/* SPID function code */
+		struct path_state ps;	/* SNID path state */
+	} __attribute__ ((packed)) inf;
+	union {
+		__u32 cpu_addr	: 16;	/* CPU address */
+		struct extended_cssid ext_cssid;
+	} __attribute__ ((packed)) pgid_high;
+	__u32 cpu_id	: 24;	/* CPU identification */
+	__u32 cpu_model : 16;	/* CPU model */
+	__u32 tod_high;		/* high word TOD clock */
+} __attribute__ ((packed));
+
+struct subchannel;
+struct chp_link;
+/**
+ * struct css_driver - device driver for subchannels
+ * @subchannel_type: subchannel type supported by this driver
+ * @drv: embedded device driver structure
+ * @irq: called on interrupts
+ * @chp_event: called for events affecting a channel path
+ * @sch_event: called for events affecting the subchannel
+ * @probe: function called on probe
+ * @remove: function called on remove
+ * @shutdown: called at device shutdown
+ * @prepare: prepare for pm state transition
+ * @complete: undo work done in @prepare
+ * @freeze: callback for freezing during hibernation snapshotting
+ * @thaw: undo work done in @freeze
+ * @restore: callback for restoring after hibernation
+ * @settle: wait for asynchronous work to finish
+ */
+struct css_driver {
+	struct css_device_id *subchannel_type;
+	struct device_driver drv;
+	void (*irq)(struct subchannel *);
+	int (*chp_event)(struct subchannel *, struct chp_link *, int);
+	int (*sch_event)(struct subchannel *, int);
+	int (*probe)(struct subchannel *);
+	int (*remove)(struct subchannel *);
+	void (*shutdown)(struct subchannel *);
+	int (*prepare) (struct subchannel *);
+	void (*complete) (struct subchannel *);
+	int (*freeze)(struct subchannel *);
+	int (*thaw) (struct subchannel *);
+	int (*restore)(struct subchannel *);
+	int (*settle)(void);
+};
+
+#define to_cssdriver(n) container_of(n, struct css_driver, drv)
+
+extern int css_driver_register(struct css_driver *);
+extern void css_driver_unregister(struct css_driver *);
+
+extern void css_sch_device_unregister(struct subchannel *);
+extern int css_register_subchannel(struct subchannel *);
+extern struct subchannel *css_alloc_subchannel(struct subchannel_id,
+					       struct schib *schib);
+extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
+extern int css_init_done;
+extern int max_ssid;
+int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
+			       int (*fn_unknown)(struct subchannel_id,
+			       void *), void *data);
+extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
+void css_update_ssd_info(struct subchannel *sch);
+
+struct channel_subsystem {
+	int cssid;
+	struct channel_path *chps[__MAX_CHPID + 1];
+	struct device device;
+	struct pgid global_pgid;
+	struct mutex mutex;
+	/* channel measurement related */
+	int cm_enabled;
+	void *cub_addr1;
+	void *cub_addr2;
+	/* for orphaned ccw devices */
+	struct subchannel *pseudo_subchannel;
+};
+#define to_css(dev) container_of(dev, struct channel_subsystem, device)
+
+extern struct channel_subsystem *channel_subsystems[];
+
+/* Dummy helper which needs to change once we support more than one css. */
+static inline struct channel_subsystem *css_by_id(u8 cssid)
+{
+	return channel_subsystems[0];
+}
+
+/* Dummy iterator which needs to change once we support more than one css. */
+#define for_each_css(css)						\
+	for ((css) = channel_subsystems[0]; (css); (css) = NULL)
+
+/* Helper functions to build lists for the slow path. */
+void css_schedule_eval(struct subchannel_id schid);
+void css_schedule_eval_all(void);
+void css_schedule_eval_all_unreg(unsigned long delay);
+int css_complete_work(void);
+
+int sch_is_pseudo_sch(struct subchannel *);
+struct schib;
+int css_sch_is_valid(struct schib *);
+
+extern struct workqueue_struct *cio_work_q;
+void css_wait_for_slow_path(void);
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
+#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
new file mode 100644
index 0000000..1540229
--- /dev/null
+++ b/drivers/s390/cio/device.c
@@ -0,0 +1,2122 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ *  bus driver for ccw devices
+ *
+ *    Copyright IBM Corp. 2002, 2008
+ *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ *		 Cornelia Huck (cornelia.huck@de.ibm.com)
+ *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/kernel_stat.h>
+#include <linux/sched/signal.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/param.h>		/* HZ */
+#include <asm/cmb.h>
+#include <asm/isc.h>
+
+#include "chp.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "io_sch.h"
+#include "blacklist.h"
+#include "chsc.h"
+
+static struct timer_list recovery_timer;
+static DEFINE_SPINLOCK(recovery_lock);
+static int recovery_phase;
+static const unsigned long recovery_delay[] = { 3, 30, 300 };
+
+static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
+static struct bus_type ccw_bus_type;
+
+/******************* bus type handling ***********************/
+
+/* The Linux driver model distinguishes between a bus type and
+ * the bus itself. Of course we only have one channel
+ * subsystem driver and one channel system per machine, but
+ * we still use the abstraction. T.R. says it's a good idea. */
+static int
+ccw_bus_match (struct device * dev, struct device_driver * drv)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_driver *cdrv = to_ccwdrv(drv);
+	const struct ccw_device_id *ids = cdrv->ids, *found;
+
+	if (!ids)
+		return 0;
+
+	found = ccw_device_id_match(ids, &cdev->id);
+	if (!found)
+		return 0;
+
+	cdev->id.driver_info = found->driver_info;
+
+	return 1;
+}
+
+/* Store modalias string delimited by prefix/suffix string into buffer with
+ * specified size. Return length of resulting string (excluding trailing '\0')
+ * even if string doesn't fit buffer (snprintf semantics). */
+static int snprint_alias(char *buf, size_t size,
+			 struct ccw_device_id *id, const char *suffix)
+{
+	int len;
+
+	len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
+	if (len > size)
+		return len;
+	buf += len;
+	size -= len;
+
+	if (id->dev_type != 0)
+		len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
+				id->dev_model, suffix);
+	else
+		len += snprintf(buf, size, "dtdm%s", suffix);
+
+	return len;
+}
+
+/* Set up environment variables for ccw device uevent. Return 0 on success,
+ * non-zero otherwise. */
+static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_device_id *id = &(cdev->id);
+	int ret;
+	char modalias_buf[30];
+
+	/* CU_TYPE= */
+	ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
+	if (ret)
+		return ret;
+
+	/* CU_MODEL= */
+	ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
+	if (ret)
+		return ret;
+
+	/* The next two can be zero, that's ok for us */
+	/* DEV_TYPE= */
+	ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
+	if (ret)
+		return ret;
+
+	/* DEV_MODEL= */
+	ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
+	if (ret)
+		return ret;
+
+	/* MODALIAS=  */
+	snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
+	ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
+	return ret;
+}
+
+static void io_subchannel_irq(struct subchannel *);
+static int io_subchannel_probe(struct subchannel *);
+static int io_subchannel_remove(struct subchannel *);
+static void io_subchannel_shutdown(struct subchannel *);
+static int io_subchannel_sch_event(struct subchannel *, int);
+static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
+				   int);
+static void recovery_func(struct timer_list *unused);
+
+static struct css_device_id io_subchannel_ids[] = {
+	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
+	{ /* end of list */ },
+};
+
+static int io_subchannel_prepare(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+	/*
+	 * Don't allow suspend while a ccw device registration
+	 * is still outstanding.
+	 */
+	cdev = sch_get_cdev(sch);
+	if (cdev && !device_is_registered(&cdev->dev))
+		return -EAGAIN;
+	return 0;
+}
+
+static int io_subchannel_settle(void)
+{
+	int ret;
+
+	ret = wait_event_interruptible(ccw_device_init_wq,
+				atomic_read(&ccw_device_init_count) == 0);
+	if (ret)
+		return -EINTR;
+	flush_workqueue(cio_work_q);
+	return 0;
+}
+
+static struct css_driver io_subchannel_driver = {
+	.drv = {
+		.owner = THIS_MODULE,
+		.name = "io_subchannel",
+	},
+	.subchannel_type = io_subchannel_ids,
+	.irq = io_subchannel_irq,
+	.sch_event = io_subchannel_sch_event,
+	.chp_event = io_subchannel_chp_event,
+	.probe = io_subchannel_probe,
+	.remove = io_subchannel_remove,
+	.shutdown = io_subchannel_shutdown,
+	.prepare = io_subchannel_prepare,
+	.settle = io_subchannel_settle,
+};
+
+int __init io_subchannel_init(void)
+{
+	int ret;
+
+	timer_setup(&recovery_timer, recovery_func, 0);
+	ret = bus_register(&ccw_bus_type);
+	if (ret)
+		return ret;
+	ret = css_driver_register(&io_subchannel_driver);
+	if (ret)
+		bus_unregister(&ccw_bus_type);
+
+	return ret;
+}
+
+
+/************************ device handling **************************/
+
+static ssize_t
+devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_device_id *id = &(cdev->id);
+
+	if (id->dev_type != 0)
+		return sprintf(buf, "%04x/%02x\n",
+				id->dev_type, id->dev_model);
+	else
+		return sprintf(buf, "n/a\n");
+}
+
+static ssize_t
+cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_device_id *id = &(cdev->id);
+
+	return sprintf(buf, "%04x/%02x\n",
+		       id->cu_type, id->cu_model);
+}
+
+static ssize_t
+modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_device_id *id = &(cdev->id);
+	int len;
+
+	len = snprint_alias(buf, PAGE_SIZE, id, "\n");
+
+	return len > PAGE_SIZE ? PAGE_SIZE : len;
+}
+
+static ssize_t
+online_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+
+	return sprintf(buf, cdev->online ? "1\n" : "0\n");
+}
+
+int ccw_device_is_orphan(struct ccw_device *cdev)
+{
+	return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
+}
+
+static void ccw_device_unregister(struct ccw_device *cdev)
+{
+	if (device_is_registered(&cdev->dev)) {
+		/* Undo device_add(). */
+		device_del(&cdev->dev);
+	}
+	if (cdev->private->flags.initialized) {
+		cdev->private->flags.initialized = 0;
+		/* Release reference from device_initialize(). */
+		put_device(&cdev->dev);
+	}
+}
+
+static void io_subchannel_quiesce(struct subchannel *);
+
+/**
+ * ccw_device_set_offline() - disable a ccw device for I/O
+ * @cdev: target ccw device
+ *
+ * This function calls the driver's set_offline() function for @cdev, if
+ * given, and then disables @cdev.
+ * Returns:
+ *   %0 on success and a negative error value on failure.
+ * Context:
+ *  enabled, ccw device lock not held
+ */
+int ccw_device_set_offline(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+	int ret, state;
+
+	if (!cdev)
+		return -ENODEV;
+	if (!cdev->online || !cdev->drv)
+		return -EINVAL;
+
+	if (cdev->drv->set_offline) {
+		ret = cdev->drv->set_offline(cdev);
+		if (ret != 0)
+			return ret;
+	}
+	spin_lock_irq(cdev->ccwlock);
+	sch = to_subchannel(cdev->dev.parent);
+	cdev->online = 0;
+	/* Wait until a final state or DISCONNECTED is reached */
+	while (!dev_fsm_final_state(cdev) &&
+	       cdev->private->state != DEV_STATE_DISCONNECTED) {
+		spin_unlock_irq(cdev->ccwlock);
+		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+			   cdev->private->state == DEV_STATE_DISCONNECTED));
+		spin_lock_irq(cdev->ccwlock);
+	}
+	do {
+		ret = ccw_device_offline(cdev);
+		if (!ret)
+			break;
+		CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
+			      "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
+			      cdev->private->dev_id.devno);
+		if (ret != -EBUSY)
+			goto error;
+		state = cdev->private->state;
+		spin_unlock_irq(cdev->ccwlock);
+		io_subchannel_quiesce(sch);
+		spin_lock_irq(cdev->ccwlock);
+		cdev->private->state = state;
+	} while (ret == -EBUSY);
+	spin_unlock_irq(cdev->ccwlock);
+	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+		   cdev->private->state == DEV_STATE_DISCONNECTED));
+	/* Inform the user if set offline failed. */
+	if (cdev->private->state == DEV_STATE_BOXED) {
+		pr_warn("%s: The device entered boxed state while being set offline\n",
+			dev_name(&cdev->dev));
+	} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
+		pr_warn("%s: The device stopped operating while being set offline\n",
+			dev_name(&cdev->dev));
+	}
+	/* Give up reference from ccw_device_set_online(). */
+	put_device(&cdev->dev);
+	return 0;
+
+error:
+	cdev->private->state = DEV_STATE_OFFLINE;
+	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+	spin_unlock_irq(cdev->ccwlock);
+	/* Give up reference from ccw_device_set_online(). */
+	put_device(&cdev->dev);
+	return -ENODEV;
+}
+
+/**
+ * ccw_device_set_online() - enable a ccw device for I/O
+ * @cdev: target ccw device
+ *
+ * This function first enables @cdev and then calls the driver's set_online()
+ * function for @cdev, if given. If set_online() returns an error, @cdev is
+ * disabled again.
+ * Returns:
+ *   %0 on success and a negative error value on failure.
+ * Context:
+ *  enabled, ccw device lock not held
+ */
+int ccw_device_set_online(struct ccw_device *cdev)
+{
+	int ret;
+	int ret2;
+
+	if (!cdev)
+		return -ENODEV;
+	if (cdev->online || !cdev->drv)
+		return -EINVAL;
+	/* Hold on to an extra reference while device is online. */
+	if (!get_device(&cdev->dev))
+		return -ENODEV;
+
+	spin_lock_irq(cdev->ccwlock);
+	ret = ccw_device_online(cdev);
+	spin_unlock_irq(cdev->ccwlock);
+	if (ret == 0)
+		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+	else {
+		CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
+			      "device 0.%x.%04x\n",
+			      ret, cdev->private->dev_id.ssid,
+			      cdev->private->dev_id.devno);
+		/* Give up online reference since onlining failed. */
+		put_device(&cdev->dev);
+		return ret;
+	}
+	spin_lock_irq(cdev->ccwlock);
+	/* Check if online processing was successful */
+	if ((cdev->private->state != DEV_STATE_ONLINE) &&
+	    (cdev->private->state != DEV_STATE_W4SENSE)) {
+		spin_unlock_irq(cdev->ccwlock);
+		/* Inform the user that set online failed. */
+		if (cdev->private->state == DEV_STATE_BOXED) {
+			pr_warn("%s: Setting the device online failed because it is boxed\n",
+				dev_name(&cdev->dev));
+		} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
+			pr_warn("%s: Setting the device online failed because it is not operational\n",
+				dev_name(&cdev->dev));
+		}
+		/* Give up online reference since onlining failed. */
+		put_device(&cdev->dev);
+		return -ENODEV;
+	}
+	spin_unlock_irq(cdev->ccwlock);
+	if (cdev->drv->set_online)
+		ret = cdev->drv->set_online(cdev);
+	if (ret)
+		goto rollback;
+
+	spin_lock_irq(cdev->ccwlock);
+	cdev->online = 1;
+	spin_unlock_irq(cdev->ccwlock);
+	return 0;
+
+rollback:
+	spin_lock_irq(cdev->ccwlock);
+	/* Wait until a final state or DISCONNECTED is reached */
+	while (!dev_fsm_final_state(cdev) &&
+	       cdev->private->state != DEV_STATE_DISCONNECTED) {
+		spin_unlock_irq(cdev->ccwlock);
+		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+			   cdev->private->state == DEV_STATE_DISCONNECTED));
+		spin_lock_irq(cdev->ccwlock);
+	}
+	ret2 = ccw_device_offline(cdev);
+	if (ret2)
+		goto error;
+	spin_unlock_irq(cdev->ccwlock);
+	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+		   cdev->private->state == DEV_STATE_DISCONNECTED));
+	/* Give up online reference since onlining failed. */
+	put_device(&cdev->dev);
+	return ret;
+
+error:
+	CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
+		      "device 0.%x.%04x\n",
+		      ret2, cdev->private->dev_id.ssid,
+		      cdev->private->dev_id.devno);
+	cdev->private->state = DEV_STATE_OFFLINE;
+	spin_unlock_irq(cdev->ccwlock);
+	/* Give up online reference since onlining failed. */
+	put_device(&cdev->dev);
+	return ret;
+}
+
+static int online_store_handle_offline(struct ccw_device *cdev)
+{
+	if (cdev->private->state == DEV_STATE_DISCONNECTED) {
+		spin_lock_irq(cdev->ccwlock);
+		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
+		spin_unlock_irq(cdev->ccwlock);
+		return 0;
+	}
+	if (cdev->drv && cdev->drv->set_offline)
+		return ccw_device_set_offline(cdev);
+	return -EINVAL;
+}
+
+static int online_store_recog_and_online(struct ccw_device *cdev)
+{
+	/* Do device recognition, if needed. */
+	if (cdev->private->state == DEV_STATE_BOXED) {
+		spin_lock_irq(cdev->ccwlock);
+		ccw_device_recognition(cdev);
+		spin_unlock_irq(cdev->ccwlock);
+		wait_event(cdev->private->wait_q,
+			   cdev->private->flags.recog_done);
+		if (cdev->private->state != DEV_STATE_OFFLINE)
+			/* recognition failed */
+			return -EAGAIN;
+	}
+	if (cdev->drv && cdev->drv->set_online)
+		return ccw_device_set_online(cdev);
+	return -EINVAL;
+}
+
+static int online_store_handle_online(struct ccw_device *cdev, int force)
+{
+	int ret;
+
+	ret = online_store_recog_and_online(cdev);
+	if (ret && !force)
+		return ret;
+	if (force && cdev->private->state == DEV_STATE_BOXED) {
+		ret = ccw_device_stlck(cdev);
+		if (ret)
+			return ret;
+		if (cdev->id.cu_type == 0)
+			cdev->private->state = DEV_STATE_NOT_OPER;
+		ret = online_store_recog_and_online(cdev);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static ssize_t online_store (struct device *dev, struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	int force, ret;
+	unsigned long i;
+
+	/* Prevent conflict between multiple on-/offline processing requests. */
+	if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
+		return -EAGAIN;
+	/* Prevent conflict between internal I/Os and on-/offline processing. */
+	if (!dev_fsm_final_state(cdev) &&
+	    cdev->private->state != DEV_STATE_DISCONNECTED) {
+		ret = -EAGAIN;
+		goto out;
+	}
+	/* Prevent conflict between pending work and on-/offline processing.*/
+	if (work_pending(&cdev->private->todo_work)) {
+		ret = -EAGAIN;
+		goto out;
+	}
+	if (!strncmp(buf, "force\n", count)) {
+		force = 1;
+		i = 1;
+		ret = 0;
+	} else {
+		force = 0;
+		ret = kstrtoul(buf, 16, &i);
+	}
+	if (ret)
+		goto out;
+
+	device_lock(dev);
+	switch (i) {
+	case 0:
+		ret = online_store_handle_offline(cdev);
+		break;
+	case 1:
+		ret = online_store_handle_online(cdev, force);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	device_unlock(dev);
+
+out:
+	atomic_set(&cdev->private->onoff, 0);
+	return (ret < 0) ? ret : count;
+}
+
+static ssize_t
+available_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct subchannel *sch;
+
+	if (ccw_device_is_orphan(cdev))
+		return sprintf(buf, "no device\n");
+	switch (cdev->private->state) {
+	case DEV_STATE_BOXED:
+		return sprintf(buf, "boxed\n");
+	case DEV_STATE_DISCONNECTED:
+	case DEV_STATE_DISCONNECTED_SENSE_ID:
+	case DEV_STATE_NOT_OPER:
+		sch = to_subchannel(dev->parent);
+		if (!sch->lpm)
+			return sprintf(buf, "no path\n");
+		else
+			return sprintf(buf, "no device\n");
+	default:
+		/* All other states considered fine. */
+		return sprintf(buf, "good\n");
+	}
+}
+
+static ssize_t
+initiate_logging(struct device *dev, struct device_attribute *attr,
+		 const char *buf, size_t count)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	int rc;
+
+	rc = chsc_siosl(sch->schid);
+	if (rc < 0) {
+		pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
+			sch->schid.ssid, sch->schid.sch_no, rc);
+		return rc;
+	}
+	pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
+		  sch->schid.ssid, sch->schid.sch_no);
+	return count;
+}
+
+static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct subchannel *sch = to_subchannel(dev);
+
+	return sprintf(buf, "%02x\n", sch->vpm);
+}
+
+static DEVICE_ATTR_RO(devtype);
+static DEVICE_ATTR_RO(cutype);
+static DEVICE_ATTR_RO(modalias);
+static DEVICE_ATTR_RW(online);
+static DEVICE_ATTR(availability, 0444, available_show, NULL);
+static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
+static DEVICE_ATTR_RO(vpm);
+
+static struct attribute *io_subchannel_attrs[] = {
+	&dev_attr_logging.attr,
+	&dev_attr_vpm.attr,
+	NULL,
+};
+
+static const struct attribute_group io_subchannel_attr_group = {
+	.attrs = io_subchannel_attrs,
+};
+
+static struct attribute * ccwdev_attrs[] = {
+	&dev_attr_devtype.attr,
+	&dev_attr_cutype.attr,
+	&dev_attr_modalias.attr,
+	&dev_attr_online.attr,
+	&dev_attr_cmb_enable.attr,
+	&dev_attr_availability.attr,
+	NULL,
+};
+
+static const struct attribute_group ccwdev_attr_group = {
+	.attrs = ccwdev_attrs,
+};
+
+static const struct attribute_group *ccwdev_attr_groups[] = {
+	&ccwdev_attr_group,
+	NULL,
+};
+
+static int ccw_device_add(struct ccw_device *cdev)
+{
+	struct device *dev = &cdev->dev;
+
+	dev->bus = &ccw_bus_type;
+	return device_add(dev);
+}
+
+static int match_dev_id(struct device *dev, void *data)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_dev_id *dev_id = data;
+
+	return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
+}
+
+/**
+ * get_ccwdev_by_dev_id() - obtain device from a ccw device id
+ * @dev_id: id of the device to be searched
+ *
+ * This function searches all devices attached to the ccw bus for a device
+ * matching @dev_id.
+ * Returns:
+ *  If a device is found its reference count is increased and returned;
+ *  else %NULL is returned.
+ */
+struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
+{
+	struct device *dev;
+
+	dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
+
+	return dev ? to_ccwdev(dev) : NULL;
+}
+EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
+
+static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
+{
+	int ret;
+
+	if (device_is_registered(&cdev->dev)) {
+		device_release_driver(&cdev->dev);
+		ret = device_attach(&cdev->dev);
+		WARN_ON(ret == -ENODEV);
+	}
+}
+
+static void
+ccw_device_release(struct device *dev)
+{
+	struct ccw_device *cdev;
+
+	cdev = to_ccwdev(dev);
+	/* Release reference of parent subchannel. */
+	put_device(cdev->dev.parent);
+	kfree(cdev->private);
+	kfree(cdev);
+}
+
+static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+
+	cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
+	if (cdev) {
+		cdev->private = kzalloc(sizeof(struct ccw_device_private),
+					GFP_KERNEL | GFP_DMA);
+		if (cdev->private)
+			return cdev;
+	}
+	kfree(cdev);
+	return ERR_PTR(-ENOMEM);
+}
+
+static void ccw_device_todo(struct work_struct *work);
+
+static int io_subchannel_initialize_dev(struct subchannel *sch,
+					struct ccw_device *cdev)
+{
+	struct ccw_device_private *priv = cdev->private;
+	int ret;
+
+	priv->cdev = cdev;
+	priv->int_class = IRQIO_CIO;
+	priv->state = DEV_STATE_NOT_OPER;
+	priv->dev_id.devno = sch->schib.pmcw.dev;
+	priv->dev_id.ssid = sch->schid.ssid;
+
+	INIT_WORK(&priv->todo_work, ccw_device_todo);
+	INIT_LIST_HEAD(&priv->cmb_list);
+	init_waitqueue_head(&priv->wait_q);
+	timer_setup(&priv->timer, ccw_device_timeout, 0);
+
+	atomic_set(&priv->onoff, 0);
+	cdev->ccwlock = sch->lock;
+	cdev->dev.parent = &sch->dev;
+	cdev->dev.release = ccw_device_release;
+	cdev->dev.groups = ccwdev_attr_groups;
+	/* Do first half of device_register. */
+	device_initialize(&cdev->dev);
+	ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
+			   cdev->private->dev_id.devno);
+	if (ret)
+		goto out_put;
+	if (!get_device(&sch->dev)) {
+		ret = -ENODEV;
+		goto out_put;
+	}
+	priv->flags.initialized = 1;
+	spin_lock_irq(sch->lock);
+	sch_set_cdev(sch, cdev);
+	spin_unlock_irq(sch->lock);
+	return 0;
+
+out_put:
+	/* Release reference from device_initialize(). */
+	put_device(&cdev->dev);
+	return ret;
+}
+
+static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+	int ret;
+
+	cdev = io_subchannel_allocate_dev(sch);
+	if (!IS_ERR(cdev)) {
+		ret = io_subchannel_initialize_dev(sch, cdev);
+		if (ret)
+			cdev = ERR_PTR(ret);
+	}
+	return cdev;
+}
+
+static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
+
+static void sch_create_and_recog_new_device(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+
+	/* Need to allocate a new ccw device. */
+	cdev = io_subchannel_create_ccwdev(sch);
+	if (IS_ERR(cdev)) {
+		/* OK, we did everything we could... */
+		css_sch_device_unregister(sch);
+		return;
+	}
+	/* Start recognition for the new ccw device. */
+	io_subchannel_recog(cdev, sch);
+}
+
+/*
+ * Register recognized device.
+ */
+static void io_subchannel_register(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+	int ret, adjust_init_count = 1;
+	unsigned long flags;
+
+	sch = to_subchannel(cdev->dev.parent);
+	/*
+	 * Check if subchannel is still registered. It may have become
+	 * unregistered if a machine check hit us after finishing
+	 * device recognition but before the register work could be
+	 * queued.
+	 */
+	if (!device_is_registered(&sch->dev))
+		goto out_err;
+	css_update_ssd_info(sch);
+	/*
+	 * io_subchannel_register() will also be called after device
+	 * recognition has been done for a boxed device (which will already
+	 * be registered). We need to reprobe since we may now have sense id
+	 * information.
+	 */
+	if (device_is_registered(&cdev->dev)) {
+		if (!cdev->drv) {
+			ret = device_reprobe(&cdev->dev);
+			if (ret)
+				/* We can't do much here. */
+				CIO_MSG_EVENT(0, "device_reprobe() returned"
+					      " %d for 0.%x.%04x\n", ret,
+					      cdev->private->dev_id.ssid,
+					      cdev->private->dev_id.devno);
+		}
+		adjust_init_count = 0;
+		goto out;
+	}
+	/*
+	 * Now we know this subchannel will stay, we can throw
+	 * our delayed uevent.
+	 */
+	dev_set_uevent_suppress(&sch->dev, 0);
+	kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+	/* make it known to the system */
+	ret = ccw_device_add(cdev);
+	if (ret) {
+		CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
+			      cdev->private->dev_id.ssid,
+			      cdev->private->dev_id.devno, ret);
+		spin_lock_irqsave(sch->lock, flags);
+		sch_set_cdev(sch, NULL);
+		spin_unlock_irqrestore(sch->lock, flags);
+		/* Release initial device reference. */
+		put_device(&cdev->dev);
+		goto out_err;
+	}
+out:
+	cdev->private->flags.recog_done = 1;
+	wake_up(&cdev->private->wait_q);
+out_err:
+	if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
+		wake_up(&ccw_device_init_wq);
+}
+
+static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+
+	/* Get subchannel reference for local processing. */
+	if (!get_device(cdev->dev.parent))
+		return;
+	sch = to_subchannel(cdev->dev.parent);
+	css_sch_device_unregister(sch);
+	/* Release subchannel reference for local processing. */
+	put_device(&sch->dev);
+}
+
+/*
+ * subchannel recognition done. Called from the state machine.
+ */
+void
+io_subchannel_recog_done(struct ccw_device *cdev)
+{
+	if (css_init_done == 0) {
+		cdev->private->flags.recog_done = 1;
+		return;
+	}
+	switch (cdev->private->state) {
+	case DEV_STATE_BOXED:
+		/* Device did not respond in time. */
+	case DEV_STATE_NOT_OPER:
+		cdev->private->flags.recog_done = 1;
+		/* Remove device found not operational. */
+		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+		if (atomic_dec_and_test(&ccw_device_init_count))
+			wake_up(&ccw_device_init_wq);
+		break;
+	case DEV_STATE_OFFLINE:
+		/* 
+		 * We can't register the device in interrupt context so
+		 * we schedule a work item.
+		 */
+		ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
+		break;
+	}
+}
+
+static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
+{
+	/* Increase counter of devices currently in recognition. */
+	atomic_inc(&ccw_device_init_count);
+
+	/* Start async. device sensing. */
+	spin_lock_irq(sch->lock);
+	ccw_device_recognition(cdev);
+	spin_unlock_irq(sch->lock);
+}
+
+static int ccw_device_move_to_sch(struct ccw_device *cdev,
+				  struct subchannel *sch)
+{
+	struct subchannel *old_sch;
+	int rc, old_enabled = 0;
+
+	old_sch = to_subchannel(cdev->dev.parent);
+	/* Obtain child reference for new parent. */
+	if (!get_device(&sch->dev))
+		return -ENODEV;
+
+	if (!sch_is_pseudo_sch(old_sch)) {
+		spin_lock_irq(old_sch->lock);
+		old_enabled = old_sch->schib.pmcw.ena;
+		rc = 0;
+		if (old_enabled)
+			rc = cio_disable_subchannel(old_sch);
+		spin_unlock_irq(old_sch->lock);
+		if (rc == -EBUSY) {
+			/* Release child reference for new parent. */
+			put_device(&sch->dev);
+			return rc;
+		}
+	}
+
+	mutex_lock(&sch->reg_mutex);
+	rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
+	mutex_unlock(&sch->reg_mutex);
+	if (rc) {
+		CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
+			      cdev->private->dev_id.ssid,
+			      cdev->private->dev_id.devno, sch->schid.ssid,
+			      sch->schib.pmcw.dev, rc);
+		if (old_enabled) {
+			/* Try to reenable the old subchannel. */
+			spin_lock_irq(old_sch->lock);
+			cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
+			spin_unlock_irq(old_sch->lock);
+		}
+		/* Release child reference for new parent. */
+		put_device(&sch->dev);
+		return rc;
+	}
+	/* Clean up old subchannel. */
+	if (!sch_is_pseudo_sch(old_sch)) {
+		spin_lock_irq(old_sch->lock);
+		sch_set_cdev(old_sch, NULL);
+		spin_unlock_irq(old_sch->lock);
+		css_schedule_eval(old_sch->schid);
+	}
+	/* Release child reference for old parent. */
+	put_device(&old_sch->dev);
+	/* Initialize new subchannel. */
+	spin_lock_irq(sch->lock);
+	cdev->ccwlock = sch->lock;
+	if (!sch_is_pseudo_sch(sch))
+		sch_set_cdev(sch, cdev);
+	spin_unlock_irq(sch->lock);
+	if (!sch_is_pseudo_sch(sch))
+		css_update_ssd_info(sch);
+	return 0;
+}
+
+static int ccw_device_move_to_orph(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct channel_subsystem *css = to_css(sch->dev.parent);
+
+	return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
+}
+
+static void io_subchannel_irq(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+
+	cdev = sch_get_cdev(sch);
+
+	CIO_TRACE_EVENT(6, "IRQ");
+	CIO_TRACE_EVENT(6, dev_name(&sch->dev));
+	if (cdev)
+		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
+	else
+		inc_irq_stat(IRQIO_CIO);
+}
+
+void io_subchannel_init_config(struct subchannel *sch)
+{
+	memset(&sch->config, 0, sizeof(sch->config));
+	sch->config.csense = 1;
+}
+
+static void io_subchannel_init_fields(struct subchannel *sch)
+{
+	if (cio_is_console(sch->schid))
+		sch->opm = 0xff;
+	else
+		sch->opm = chp_get_sch_opm(sch);
+	sch->lpm = sch->schib.pmcw.pam & sch->opm;
+	sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
+
+	CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
+		      " - PIM = %02X, PAM = %02X, POM = %02X\n",
+		      sch->schib.pmcw.dev, sch->schid.ssid,
+		      sch->schid.sch_no, sch->schib.pmcw.pim,
+		      sch->schib.pmcw.pam, sch->schib.pmcw.pom);
+
+	io_subchannel_init_config(sch);
+}
+
+/*
+ * Note: We always return 0 so that we bind to the device even on error.
+ * This is needed so that our remove function is called on unregister.
+ */
+static int io_subchannel_probe(struct subchannel *sch)
+{
+	struct io_subchannel_private *io_priv;
+	struct ccw_device *cdev;
+	int rc;
+
+	if (cio_is_console(sch->schid)) {
+		rc = sysfs_create_group(&sch->dev.kobj,
+					&io_subchannel_attr_group);
+		if (rc)
+			CIO_MSG_EVENT(0, "Failed to create io subchannel "
+				      "attributes for subchannel "
+				      "0.%x.%04x (rc=%d)\n",
+				      sch->schid.ssid, sch->schid.sch_no, rc);
+		/*
+		 * The console subchannel already has an associated ccw_device.
+		 * Throw the delayed uevent for the subchannel, register
+		 * the ccw_device and exit.
+		 */
+		dev_set_uevent_suppress(&sch->dev, 0);
+		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+		cdev = sch_get_cdev(sch);
+		rc = ccw_device_add(cdev);
+		if (rc) {
+			/* Release online reference. */
+			put_device(&cdev->dev);
+			goto out_schedule;
+		}
+		if (atomic_dec_and_test(&ccw_device_init_count))
+			wake_up(&ccw_device_init_wq);
+		return 0;
+	}
+	io_subchannel_init_fields(sch);
+	rc = cio_commit_config(sch);
+	if (rc)
+		goto out_schedule;
+	rc = sysfs_create_group(&sch->dev.kobj,
+				&io_subchannel_attr_group);
+	if (rc)
+		goto out_schedule;
+	/* Allocate I/O subchannel private data. */
+	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
+	if (!io_priv)
+		goto out_schedule;
+
+	set_io_private(sch, io_priv);
+	css_schedule_eval(sch->schid);
+	return 0;
+
+out_schedule:
+	spin_lock_irq(sch->lock);
+	css_sched_sch_todo(sch, SCH_TODO_UNREG);
+	spin_unlock_irq(sch->lock);
+	return 0;
+}
+
+static int io_subchannel_remove(struct subchannel *sch)
+{
+	struct io_subchannel_private *io_priv = to_io_private(sch);
+	struct ccw_device *cdev;
+
+	cdev = sch_get_cdev(sch);
+	if (!cdev)
+		goto out_free;
+
+	ccw_device_unregister(cdev);
+	spin_lock_irq(sch->lock);
+	sch_set_cdev(sch, NULL);
+	set_io_private(sch, NULL);
+	spin_unlock_irq(sch->lock);
+out_free:
+	kfree(io_priv);
+	sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
+	return 0;
+}
+
+static void io_subchannel_verify(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+
+	cdev = sch_get_cdev(sch);
+	if (cdev)
+		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+}
+
+static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
+{
+	struct ccw_device *cdev;
+
+	cdev = sch_get_cdev(sch);
+	if (!cdev)
+		return;
+	if (cio_update_schib(sch))
+		goto err;
+	/* Check for I/O on path. */
+	if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
+		goto out;
+	if (cdev->private->state == DEV_STATE_ONLINE) {
+		ccw_device_kill_io(cdev);
+		goto out;
+	}
+	if (cio_clear(sch))
+		goto err;
+out:
+	/* Trigger path verification. */
+	dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+	return;
+
+err:
+	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+}
+
+static int io_subchannel_chp_event(struct subchannel *sch,
+				   struct chp_link *link, int event)
+{
+	struct ccw_device *cdev = sch_get_cdev(sch);
+	int mask;
+
+	mask = chp_ssd_get_mask(&sch->ssd_info, link);
+	if (!mask)
+		return 0;
+	switch (event) {
+	case CHP_VARY_OFF:
+		sch->opm &= ~mask;
+		sch->lpm &= ~mask;
+		if (cdev)
+			cdev->private->path_gone_mask |= mask;
+		io_subchannel_terminate_path(sch, mask);
+		break;
+	case CHP_VARY_ON:
+		sch->opm |= mask;
+		sch->lpm |= mask;
+		if (cdev)
+			cdev->private->path_new_mask |= mask;
+		io_subchannel_verify(sch);
+		break;
+	case CHP_OFFLINE:
+		if (cio_update_schib(sch))
+			return -ENODEV;
+		if (cdev)
+			cdev->private->path_gone_mask |= mask;
+		io_subchannel_terminate_path(sch, mask);
+		break;
+	case CHP_ONLINE:
+		if (cio_update_schib(sch))
+			return -ENODEV;
+		sch->lpm |= mask & sch->opm;
+		if (cdev)
+			cdev->private->path_new_mask |= mask;
+		io_subchannel_verify(sch);
+		break;
+	}
+	return 0;
+}
+
+static void io_subchannel_quiesce(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+	int ret;
+
+	spin_lock_irq(sch->lock);
+	cdev = sch_get_cdev(sch);
+	if (cio_is_console(sch->schid))
+		goto out_unlock;
+	if (!sch->schib.pmcw.ena)
+		goto out_unlock;
+	ret = cio_disable_subchannel(sch);
+	if (ret != -EBUSY)
+		goto out_unlock;
+	if (cdev->handler)
+		cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
+	while (ret == -EBUSY) {
+		cdev->private->state = DEV_STATE_QUIESCE;
+		cdev->private->iretry = 255;
+		ret = ccw_device_cancel_halt_clear(cdev);
+		if (ret == -EBUSY) {
+			ccw_device_set_timeout(cdev, HZ/10);
+			spin_unlock_irq(sch->lock);
+			wait_event(cdev->private->wait_q,
+				   cdev->private->state != DEV_STATE_QUIESCE);
+			spin_lock_irq(sch->lock);
+		}
+		ret = cio_disable_subchannel(sch);
+	}
+out_unlock:
+	spin_unlock_irq(sch->lock);
+}
+
+static void io_subchannel_shutdown(struct subchannel *sch)
+{
+	io_subchannel_quiesce(sch);
+}
+
+static int device_is_disconnected(struct ccw_device *cdev)
+{
+	if (!cdev)
+		return 0;
+	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
+		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
+}
+
+static int recovery_check(struct device *dev, void *data)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct subchannel *sch;
+	int *redo = data;
+
+	spin_lock_irq(cdev->ccwlock);
+	switch (cdev->private->state) {
+	case DEV_STATE_ONLINE:
+		sch = to_subchannel(cdev->dev.parent);
+		if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
+			break;
+		/* fall through */
+	case DEV_STATE_DISCONNECTED:
+		CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
+			      cdev->private->dev_id.ssid,
+			      cdev->private->dev_id.devno);
+		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+		*redo = 1;
+		break;
+	case DEV_STATE_DISCONNECTED_SENSE_ID:
+		*redo = 1;
+		break;
+	}
+	spin_unlock_irq(cdev->ccwlock);
+
+	return 0;
+}
+
+static void recovery_work_func(struct work_struct *unused)
+{
+	int redo = 0;
+
+	bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
+	if (redo) {
+		spin_lock_irq(&recovery_lock);
+		if (!timer_pending(&recovery_timer)) {
+			if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
+				recovery_phase++;
+			mod_timer(&recovery_timer, jiffies +
+				  recovery_delay[recovery_phase] * HZ);
+		}
+		spin_unlock_irq(&recovery_lock);
+	} else
+		CIO_MSG_EVENT(3, "recovery: end\n");
+}
+
+static DECLARE_WORK(recovery_work, recovery_work_func);
+
+static void recovery_func(struct timer_list *unused)
+{
+	/*
+	 * We can't do our recovery in softirq context and it's not
+	 * performance critical, so we schedule it.
+	 */
+	schedule_work(&recovery_work);
+}
+
+void ccw_device_schedule_recovery(void)
+{
+	unsigned long flags;
+
+	CIO_MSG_EVENT(3, "recovery: schedule\n");
+	spin_lock_irqsave(&recovery_lock, flags);
+	if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
+		recovery_phase = 0;
+		mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
+	}
+	spin_unlock_irqrestore(&recovery_lock, flags);
+}
+
+static int purge_fn(struct device *dev, void *data)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_dev_id *id = &cdev->private->dev_id;
+
+	spin_lock_irq(cdev->ccwlock);
+	if (is_blacklisted(id->ssid, id->devno) &&
+	    (cdev->private->state == DEV_STATE_OFFLINE) &&
+	    (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
+		CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
+			      id->devno);
+		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+		atomic_set(&cdev->private->onoff, 0);
+	}
+	spin_unlock_irq(cdev->ccwlock);
+	/* Abort loop in case of pending signal. */
+	if (signal_pending(current))
+		return -EINTR;
+
+	return 0;
+}
+
+/**
+ * ccw_purge_blacklisted - purge unused, blacklisted devices
+ *
+ * Unregister all ccw devices that are offline and on the blacklist.
+ */
+int ccw_purge_blacklisted(void)
+{
+	CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
+	bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
+	return 0;
+}
+
+void ccw_device_set_disconnected(struct ccw_device *cdev)
+{
+	if (!cdev)
+		return;
+	ccw_device_set_timeout(cdev, 0);
+	cdev->private->flags.fake_irb = 0;
+	cdev->private->state = DEV_STATE_DISCONNECTED;
+	if (cdev->online)
+		ccw_device_schedule_recovery();
+}
+
+void ccw_device_set_notoper(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	CIO_TRACE_EVENT(2, "notoper");
+	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+	ccw_device_set_timeout(cdev, 0);
+	cio_disable_subchannel(sch);
+	cdev->private->state = DEV_STATE_NOT_OPER;
+}
+
+enum io_sch_action {
+	IO_SCH_UNREG,
+	IO_SCH_ORPH_UNREG,
+	IO_SCH_ATTACH,
+	IO_SCH_UNREG_ATTACH,
+	IO_SCH_ORPH_ATTACH,
+	IO_SCH_REPROBE,
+	IO_SCH_VERIFY,
+	IO_SCH_DISC,
+	IO_SCH_NOP,
+};
+
+static enum io_sch_action sch_get_action(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+
+	cdev = sch_get_cdev(sch);
+	if (cio_update_schib(sch)) {
+		/* Not operational. */
+		if (!cdev)
+			return IO_SCH_UNREG;
+		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+			return IO_SCH_UNREG;
+		return IO_SCH_ORPH_UNREG;
+	}
+	/* Operational. */
+	if (!cdev)
+		return IO_SCH_ATTACH;
+	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
+		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+			return IO_SCH_UNREG_ATTACH;
+		return IO_SCH_ORPH_ATTACH;
+	}
+	if ((sch->schib.pmcw.pam & sch->opm) == 0) {
+		if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
+			return IO_SCH_UNREG;
+		return IO_SCH_DISC;
+	}
+	if (device_is_disconnected(cdev))
+		return IO_SCH_REPROBE;
+	if (cdev->online && !cdev->private->flags.resuming)
+		return IO_SCH_VERIFY;
+	if (cdev->private->state == DEV_STATE_NOT_OPER)
+		return IO_SCH_UNREG_ATTACH;
+	return IO_SCH_NOP;
+}
+
+/**
+ * io_subchannel_sch_event - process subchannel event
+ * @sch: subchannel
+ * @process: non-zero if function is called in process context
+ *
+ * An unspecified event occurred for this subchannel. Adjust data according
+ * to the current operational state of the subchannel and device. Return
+ * zero when the event has been handled sufficiently or -EAGAIN when this
+ * function should be called again in process context.
+ */
+static int io_subchannel_sch_event(struct subchannel *sch, int process)
+{
+	unsigned long flags;
+	struct ccw_device *cdev;
+	struct ccw_dev_id dev_id;
+	enum io_sch_action action;
+	int rc = -EAGAIN;
+
+	spin_lock_irqsave(sch->lock, flags);
+	if (!device_is_registered(&sch->dev))
+		goto out_unlock;
+	if (work_pending(&sch->todo_work))
+		goto out_unlock;
+	cdev = sch_get_cdev(sch);
+	if (cdev && work_pending(&cdev->private->todo_work))
+		goto out_unlock;
+	action = sch_get_action(sch);
+	CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
+		      sch->schid.ssid, sch->schid.sch_no, process,
+		      action);
+	/* Perform immediate actions while holding the lock. */
+	switch (action) {
+	case IO_SCH_REPROBE:
+		/* Trigger device recognition. */
+		ccw_device_trigger_reprobe(cdev);
+		rc = 0;
+		goto out_unlock;
+	case IO_SCH_VERIFY:
+		/* Trigger path verification. */
+		io_subchannel_verify(sch);
+		rc = 0;
+		goto out_unlock;
+	case IO_SCH_DISC:
+		ccw_device_set_disconnected(cdev);
+		rc = 0;
+		goto out_unlock;
+	case IO_SCH_ORPH_UNREG:
+	case IO_SCH_ORPH_ATTACH:
+		ccw_device_set_disconnected(cdev);
+		break;
+	case IO_SCH_UNREG_ATTACH:
+	case IO_SCH_UNREG:
+		if (!cdev)
+			break;
+		if (cdev->private->state == DEV_STATE_SENSE_ID) {
+			/*
+			 * Note: delayed work triggered by this event
+			 * and repeated calls to sch_event are synchronized
+			 * by the above check for work_pending(cdev).
+			 */
+			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+		} else
+			ccw_device_set_notoper(cdev);
+		break;
+	case IO_SCH_NOP:
+		rc = 0;
+		goto out_unlock;
+	default:
+		break;
+	}
+	spin_unlock_irqrestore(sch->lock, flags);
+	/* All other actions require process context. */
+	if (!process)
+		goto out;
+	/* Handle attached ccw device. */
+	switch (action) {
+	case IO_SCH_ORPH_UNREG:
+	case IO_SCH_ORPH_ATTACH:
+		/* Move ccw device to orphanage. */
+		rc = ccw_device_move_to_orph(cdev);
+		if (rc)
+			goto out;
+		break;
+	case IO_SCH_UNREG_ATTACH:
+		spin_lock_irqsave(sch->lock, flags);
+		if (cdev->private->flags.resuming) {
+			/* Device will be handled later. */
+			rc = 0;
+			goto out_unlock;
+		}
+		sch_set_cdev(sch, NULL);
+		spin_unlock_irqrestore(sch->lock, flags);
+		/* Unregister ccw device. */
+		ccw_device_unregister(cdev);
+		break;
+	default:
+		break;
+	}
+	/* Handle subchannel. */
+	switch (action) {
+	case IO_SCH_ORPH_UNREG:
+	case IO_SCH_UNREG:
+		if (!cdev || !cdev->private->flags.resuming)
+			css_sch_device_unregister(sch);
+		break;
+	case IO_SCH_ORPH_ATTACH:
+	case IO_SCH_UNREG_ATTACH:
+	case IO_SCH_ATTACH:
+		dev_id.ssid = sch->schid.ssid;
+		dev_id.devno = sch->schib.pmcw.dev;
+		cdev = get_ccwdev_by_dev_id(&dev_id);
+		if (!cdev) {
+			sch_create_and_recog_new_device(sch);
+			break;
+		}
+		rc = ccw_device_move_to_sch(cdev, sch);
+		if (rc) {
+			/* Release reference from get_ccwdev_by_dev_id() */
+			put_device(&cdev->dev);
+			goto out;
+		}
+		spin_lock_irqsave(sch->lock, flags);
+		ccw_device_trigger_reprobe(cdev);
+		spin_unlock_irqrestore(sch->lock, flags);
+		/* Release reference from get_ccwdev_by_dev_id() */
+		put_device(&cdev->dev);
+		break;
+	default:
+		break;
+	}
+	return 0;
+
+out_unlock:
+	spin_unlock_irqrestore(sch->lock, flags);
+out:
+	return rc;
+}
+
+static void ccw_device_set_int_class(struct ccw_device *cdev)
+{
+	struct ccw_driver *cdrv = cdev->drv;
+
+	/* Note: we interpret class 0 in this context as an uninitialized
+	 * field since it translates to a non-I/O interrupt class. */
+	if (cdrv->int_class != 0)
+		cdev->private->int_class = cdrv->int_class;
+	else
+		cdev->private->int_class = IRQIO_CIO;
+}
+
+#ifdef CONFIG_CCW_CONSOLE
+int __init ccw_device_enable_console(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	int rc;
+
+	if (!cdev->drv || !cdev->handler)
+		return -EINVAL;
+
+	io_subchannel_init_fields(sch);
+	rc = cio_commit_config(sch);
+	if (rc)
+		return rc;
+	sch->driver = &io_subchannel_driver;
+	io_subchannel_recog(cdev, sch);
+	/* Now wait for the async. recognition to come to an end. */
+	spin_lock_irq(cdev->ccwlock);
+	while (!dev_fsm_final_state(cdev))
+		ccw_device_wait_idle(cdev);
+
+	/* Hold on to an extra reference while device is online. */
+	get_device(&cdev->dev);
+	rc = ccw_device_online(cdev);
+	if (rc)
+		goto out_unlock;
+
+	while (!dev_fsm_final_state(cdev))
+		ccw_device_wait_idle(cdev);
+
+	if (cdev->private->state == DEV_STATE_ONLINE)
+		cdev->online = 1;
+	else
+		rc = -EIO;
+out_unlock:
+	spin_unlock_irq(cdev->ccwlock);
+	if (rc) /* Give up online reference since onlining failed. */
+		put_device(&cdev->dev);
+	return rc;
+}
+
+struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
+{
+	struct io_subchannel_private *io_priv;
+	struct ccw_device *cdev;
+	struct subchannel *sch;
+
+	sch = cio_probe_console();
+	if (IS_ERR(sch))
+		return ERR_CAST(sch);
+
+	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
+	if (!io_priv) {
+		put_device(&sch->dev);
+		return ERR_PTR(-ENOMEM);
+	}
+	set_io_private(sch, io_priv);
+	cdev = io_subchannel_create_ccwdev(sch);
+	if (IS_ERR(cdev)) {
+		put_device(&sch->dev);
+		kfree(io_priv);
+		return cdev;
+	}
+	cdev->drv = drv;
+	ccw_device_set_int_class(cdev);
+	return cdev;
+}
+
+void __init ccw_device_destroy_console(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct io_subchannel_private *io_priv = to_io_private(sch);
+
+	set_io_private(sch, NULL);
+	put_device(&sch->dev);
+	put_device(&cdev->dev);
+	kfree(io_priv);
+}
+
+/**
+ * ccw_device_wait_idle() - busy wait for device to become idle
+ * @cdev: ccw device
+ *
+ * Poll until activity control is zero, that is, no function or data
+ * transfer is pending/active.
+ * Called with device lock being held.
+ */
+void ccw_device_wait_idle(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	while (1) {
+		cio_tsch(sch);
+		if (sch->schib.scsw.cmd.actl == 0)
+			break;
+		udelay_simple(100);
+	}
+}
+
+static int ccw_device_pm_restore(struct device *dev);
+
+int ccw_device_force_console(struct ccw_device *cdev)
+{
+	return ccw_device_pm_restore(&cdev->dev);
+}
+EXPORT_SYMBOL_GPL(ccw_device_force_console);
+#endif
+
+/*
+ * get ccw_device matching the busid, but only if owned by cdrv
+ */
+static int
+__ccwdev_check_busid(struct device *dev, void *id)
+{
+	char *bus_id;
+
+	bus_id = id;
+
+	return (strcmp(bus_id, dev_name(dev)) == 0);
+}
+
+
+/**
+ * get_ccwdev_by_busid() - obtain device from a bus id
+ * @cdrv: driver the device is owned by
+ * @bus_id: bus id of the device to be searched
+ *
+ * This function searches all devices owned by @cdrv for a device with a bus
+ * id matching @bus_id.
+ * Returns:
+ *  If a match is found, its reference count of the found device is increased
+ *  and it is returned; else %NULL is returned.
+ */
+struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
+				       const char *bus_id)
+{
+	struct device *dev;
+
+	dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id,
+				 __ccwdev_check_busid);
+
+	return dev ? to_ccwdev(dev) : NULL;
+}
+
+/************************** device driver handling ************************/
+
+/* This is the implementation of the ccw_driver class. The probe, remove
+ * and release methods are initially very similar to the device_driver
+ * implementations, with the difference that they have ccw_device
+ * arguments.
+ *
+ * A ccw driver also contains the information that is needed for
+ * device matching.
+ */
+static int
+ccw_device_probe (struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
+	int ret;
+
+	cdev->drv = cdrv; /* to let the driver call _set_online */
+	ccw_device_set_int_class(cdev);
+	ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
+	if (ret) {
+		cdev->drv = NULL;
+		cdev->private->int_class = IRQIO_CIO;
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ccw_device_remove(struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct ccw_driver *cdrv = cdev->drv;
+	struct subchannel *sch;
+	int ret;
+
+	if (cdrv->remove)
+		cdrv->remove(cdev);
+
+	spin_lock_irq(cdev->ccwlock);
+	if (cdev->online) {
+		cdev->online = 0;
+		ret = ccw_device_offline(cdev);
+		spin_unlock_irq(cdev->ccwlock);
+		if (ret == 0)
+			wait_event(cdev->private->wait_q,
+				   dev_fsm_final_state(cdev));
+		else
+			CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
+				      "device 0.%x.%04x\n",
+				      ret, cdev->private->dev_id.ssid,
+				      cdev->private->dev_id.devno);
+		/* Give up reference obtained in ccw_device_set_online(). */
+		put_device(&cdev->dev);
+		spin_lock_irq(cdev->ccwlock);
+	}
+	ccw_device_set_timeout(cdev, 0);
+	cdev->drv = NULL;
+	cdev->private->int_class = IRQIO_CIO;
+	sch = to_subchannel(cdev->dev.parent);
+	spin_unlock_irq(cdev->ccwlock);
+	io_subchannel_quiesce(sch);
+	__disable_cmf(cdev);
+
+	return 0;
+}
+
+static void ccw_device_shutdown(struct device *dev)
+{
+	struct ccw_device *cdev;
+
+	cdev = to_ccwdev(dev);
+	if (cdev->drv && cdev->drv->shutdown)
+		cdev->drv->shutdown(cdev);
+	__disable_cmf(cdev);
+}
+
+static int ccw_device_pm_prepare(struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+
+	if (work_pending(&cdev->private->todo_work))
+		return -EAGAIN;
+	/* Fail while device is being set online/offline. */
+	if (atomic_read(&cdev->private->onoff))
+		return -EAGAIN;
+
+	if (cdev->online && cdev->drv && cdev->drv->prepare)
+		return cdev->drv->prepare(cdev);
+
+	return 0;
+}
+
+static void ccw_device_pm_complete(struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+
+	if (cdev->online && cdev->drv && cdev->drv->complete)
+		cdev->drv->complete(cdev);
+}
+
+static int ccw_device_pm_freeze(struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	int ret, cm_enabled;
+
+	/* Fail suspend while device is in transistional state. */
+	if (!dev_fsm_final_state(cdev))
+		return -EAGAIN;
+	if (!cdev->online)
+		return 0;
+	if (cdev->drv && cdev->drv->freeze) {
+		ret = cdev->drv->freeze(cdev);
+		if (ret)
+			return ret;
+	}
+
+	spin_lock_irq(sch->lock);
+	cm_enabled = cdev->private->cmb != NULL;
+	spin_unlock_irq(sch->lock);
+	if (cm_enabled) {
+		/* Don't have the css write on memory. */
+		ret = ccw_set_cmf(cdev, 0);
+		if (ret)
+			return ret;
+	}
+	/* From here on, disallow device driver I/O. */
+	spin_lock_irq(sch->lock);
+	ret = cio_disable_subchannel(sch);
+	spin_unlock_irq(sch->lock);
+
+	return ret;
+}
+
+static int ccw_device_pm_thaw(struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	int ret, cm_enabled;
+
+	if (!cdev->online)
+		return 0;
+
+	spin_lock_irq(sch->lock);
+	/* Allow device driver I/O again. */
+	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
+	cm_enabled = cdev->private->cmb != NULL;
+	spin_unlock_irq(sch->lock);
+	if (ret)
+		return ret;
+
+	if (cm_enabled) {
+		ret = ccw_set_cmf(cdev, 1);
+		if (ret)
+			return ret;
+	}
+
+	if (cdev->drv && cdev->drv->thaw)
+		ret = cdev->drv->thaw(cdev);
+
+	return ret;
+}
+
+static void __ccw_device_pm_restore(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	spin_lock_irq(sch->lock);
+	if (cio_is_console(sch->schid)) {
+		cio_enable_subchannel(sch, (u32)(addr_t)sch);
+		goto out_unlock;
+	}
+	/*
+	 * While we were sleeping, devices may have gone or become
+	 * available again. Kick re-detection.
+	 */
+	cdev->private->flags.resuming = 1;
+	cdev->private->path_new_mask = LPM_ANYPATH;
+	css_sched_sch_todo(sch, SCH_TODO_EVAL);
+	spin_unlock_irq(sch->lock);
+	css_wait_for_slow_path();
+
+	/* cdev may have been moved to a different subchannel. */
+	sch = to_subchannel(cdev->dev.parent);
+	spin_lock_irq(sch->lock);
+	if (cdev->private->state != DEV_STATE_ONLINE &&
+	    cdev->private->state != DEV_STATE_OFFLINE)
+		goto out_unlock;
+
+	ccw_device_recognition(cdev);
+	spin_unlock_irq(sch->lock);
+	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
+		   cdev->private->state == DEV_STATE_DISCONNECTED);
+	spin_lock_irq(sch->lock);
+
+out_unlock:
+	cdev->private->flags.resuming = 0;
+	spin_unlock_irq(sch->lock);
+}
+
+static int resume_handle_boxed(struct ccw_device *cdev)
+{
+	cdev->private->state = DEV_STATE_BOXED;
+	if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
+		return 0;
+	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+	return -ENODEV;
+}
+
+static int resume_handle_disc(struct ccw_device *cdev)
+{
+	cdev->private->state = DEV_STATE_DISCONNECTED;
+	if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
+		return 0;
+	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+	return -ENODEV;
+}
+
+static int ccw_device_pm_restore(struct device *dev)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct subchannel *sch;
+	int ret = 0;
+
+	__ccw_device_pm_restore(cdev);
+	sch = to_subchannel(cdev->dev.parent);
+	spin_lock_irq(sch->lock);
+	if (cio_is_console(sch->schid))
+		goto out_restore;
+
+	/* check recognition results */
+	switch (cdev->private->state) {
+	case DEV_STATE_OFFLINE:
+	case DEV_STATE_ONLINE:
+		cdev->private->flags.donotify = 0;
+		break;
+	case DEV_STATE_BOXED:
+		ret = resume_handle_boxed(cdev);
+		if (ret)
+			goto out_unlock;
+		goto out_restore;
+	default:
+		ret = resume_handle_disc(cdev);
+		if (ret)
+			goto out_unlock;
+		goto out_restore;
+	}
+	/* check if the device type has changed */
+	if (!ccw_device_test_sense_data(cdev)) {
+		ccw_device_update_sense_data(cdev);
+		ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
+		ret = -ENODEV;
+		goto out_unlock;
+	}
+	if (!cdev->online)
+		goto out_unlock;
+
+	if (ccw_device_online(cdev)) {
+		ret = resume_handle_disc(cdev);
+		if (ret)
+			goto out_unlock;
+		goto out_restore;
+	}
+	spin_unlock_irq(sch->lock);
+	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+	spin_lock_irq(sch->lock);
+
+	if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
+		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+		ret = -ENODEV;
+		goto out_unlock;
+	}
+
+	/* reenable cmf, if needed */
+	if (cdev->private->cmb) {
+		spin_unlock_irq(sch->lock);
+		ret = ccw_set_cmf(cdev, 1);
+		spin_lock_irq(sch->lock);
+		if (ret) {
+			CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
+				      "(rc=%d)\n", cdev->private->dev_id.ssid,
+				      cdev->private->dev_id.devno, ret);
+			ret = 0;
+		}
+	}
+
+out_restore:
+	spin_unlock_irq(sch->lock);
+	if (cdev->online && cdev->drv && cdev->drv->restore)
+		ret = cdev->drv->restore(cdev);
+	return ret;
+
+out_unlock:
+	spin_unlock_irq(sch->lock);
+	return ret;
+}
+
+static const struct dev_pm_ops ccw_pm_ops = {
+	.prepare = ccw_device_pm_prepare,
+	.complete = ccw_device_pm_complete,
+	.freeze = ccw_device_pm_freeze,
+	.thaw = ccw_device_pm_thaw,
+	.restore = ccw_device_pm_restore,
+};
+
+static struct bus_type ccw_bus_type = {
+	.name   = "ccw",
+	.match  = ccw_bus_match,
+	.uevent = ccw_uevent,
+	.probe  = ccw_device_probe,
+	.remove = ccw_device_remove,
+	.shutdown = ccw_device_shutdown,
+	.pm = &ccw_pm_ops,
+};
+
+/**
+ * ccw_driver_register() - register a ccw driver
+ * @cdriver: driver to be registered
+ *
+ * This function is mainly a wrapper around driver_register().
+ * Returns:
+ *   %0 on success and a negative error value on failure.
+ */
+int ccw_driver_register(struct ccw_driver *cdriver)
+{
+	struct device_driver *drv = &cdriver->driver;
+
+	drv->bus = &ccw_bus_type;
+
+	return driver_register(drv);
+}
+
+/**
+ * ccw_driver_unregister() - deregister a ccw driver
+ * @cdriver: driver to be deregistered
+ *
+ * This function is mainly a wrapper around driver_unregister().
+ */
+void ccw_driver_unregister(struct ccw_driver *cdriver)
+{
+	driver_unregister(&cdriver->driver);
+}
+
+static void ccw_device_todo(struct work_struct *work)
+{
+	struct ccw_device_private *priv;
+	struct ccw_device *cdev;
+	struct subchannel *sch;
+	enum cdev_todo todo;
+
+	priv = container_of(work, struct ccw_device_private, todo_work);
+	cdev = priv->cdev;
+	sch = to_subchannel(cdev->dev.parent);
+	/* Find out todo. */
+	spin_lock_irq(cdev->ccwlock);
+	todo = priv->todo;
+	priv->todo = CDEV_TODO_NOTHING;
+	CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
+		      priv->dev_id.ssid, priv->dev_id.devno, todo);
+	spin_unlock_irq(cdev->ccwlock);
+	/* Perform todo. */
+	switch (todo) {
+	case CDEV_TODO_ENABLE_CMF:
+		cmf_reenable(cdev);
+		break;
+	case CDEV_TODO_REBIND:
+		ccw_device_do_unbind_bind(cdev);
+		break;
+	case CDEV_TODO_REGISTER:
+		io_subchannel_register(cdev);
+		break;
+	case CDEV_TODO_UNREG_EVAL:
+		if (!sch_is_pseudo_sch(sch))
+			css_schedule_eval(sch->schid);
+		/* fall-through */
+	case CDEV_TODO_UNREG:
+		if (sch_is_pseudo_sch(sch))
+			ccw_device_unregister(cdev);
+		else
+			ccw_device_call_sch_unregister(cdev);
+		break;
+	default:
+		break;
+	}
+	/* Release workqueue ref. */
+	put_device(&cdev->dev);
+}
+
+/**
+ * ccw_device_sched_todo - schedule ccw device operation
+ * @cdev: ccw device
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with ccwdev lock held.
+ */
+void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
+{
+	CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
+		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
+		      todo);
+	if (cdev->private->todo >= todo)
+		return;
+	cdev->private->todo = todo;
+	/* Get workqueue ref. */
+	if (!get_device(&cdev->dev))
+		return;
+	if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
+		/* Already queued, release workqueue ref. */
+		put_device(&cdev->dev);
+	}
+}
+
+/**
+ * ccw_device_siosl() - initiate logging
+ * @cdev: ccw device
+ *
+ * This function is used to invoke model-dependent logging within the channel
+ * subsystem.
+ */
+int ccw_device_siosl(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	return chsc_siosl(sch->schid);
+}
+EXPORT_SYMBOL_GPL(ccw_device_siosl);
+
+EXPORT_SYMBOL(ccw_device_set_online);
+EXPORT_SYMBOL(ccw_device_set_offline);
+EXPORT_SYMBOL(ccw_driver_register);
+EXPORT_SYMBOL(ccw_driver_unregister);
+EXPORT_SYMBOL(get_ccwdev_by_busid);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
new file mode 100644
index 0000000..f5c427e
--- /dev/null
+++ b/drivers/s390/cio/device.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_DEVICE_H
+#define S390_DEVICE_H
+
+#include <asm/ccwdev.h>
+#include <linux/atomic.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+#include <linux/notifier.h>
+#include <linux/kernel_stat.h>
+#include "io_sch.h"
+
+/*
+ * states of the device statemachine
+ */
+enum dev_state {
+	DEV_STATE_NOT_OPER,
+	DEV_STATE_SENSE_ID,
+	DEV_STATE_OFFLINE,
+	DEV_STATE_VERIFY,
+	DEV_STATE_ONLINE,
+	DEV_STATE_W4SENSE,
+	DEV_STATE_DISBAND_PGID,
+	DEV_STATE_BOXED,
+	/* states to wait for i/o completion before doing something */
+	DEV_STATE_TIMEOUT_KILL,
+	DEV_STATE_QUIESCE,
+	/* special states for devices gone not operational */
+	DEV_STATE_DISCONNECTED,
+	DEV_STATE_DISCONNECTED_SENSE_ID,
+	DEV_STATE_CMFCHANGE,
+	DEV_STATE_CMFUPDATE,
+	DEV_STATE_STEAL_LOCK,
+	/* last element! */
+	NR_DEV_STATES
+};
+
+/*
+ * asynchronous events of the device statemachine
+ */
+enum dev_event {
+	DEV_EVENT_NOTOPER,
+	DEV_EVENT_INTERRUPT,
+	DEV_EVENT_TIMEOUT,
+	DEV_EVENT_VERIFY,
+	/* last element! */
+	NR_DEV_EVENTS
+};
+
+struct ccw_device;
+
+/*
+ * action called through jumptable
+ */
+typedef void (fsm_func_t)(struct ccw_device *, enum dev_event);
+extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
+
+static inline void
+dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	int state = cdev->private->state;
+
+	if (dev_event == DEV_EVENT_INTERRUPT) {
+		if (state == DEV_STATE_ONLINE)
+			inc_irq_stat(cdev->private->int_class);
+		else if (state != DEV_STATE_CMFCHANGE &&
+			 state != DEV_STATE_CMFUPDATE)
+			inc_irq_stat(IRQIO_CIO);
+	}
+	dev_jumptable[state][dev_event](cdev, dev_event);
+}
+
+/*
+ * Delivers 1 if the device state is final.
+ */
+static inline int
+dev_fsm_final_state(struct ccw_device *cdev)
+{
+	return (cdev->private->state == DEV_STATE_NOT_OPER ||
+		cdev->private->state == DEV_STATE_OFFLINE ||
+		cdev->private->state == DEV_STATE_ONLINE ||
+		cdev->private->state == DEV_STATE_BOXED);
+}
+
+int __init io_subchannel_init(void);
+
+void io_subchannel_recog_done(struct ccw_device *cdev);
+void io_subchannel_init_config(struct subchannel *sch);
+
+int ccw_device_cancel_halt_clear(struct ccw_device *);
+
+int ccw_device_is_orphan(struct ccw_device *);
+
+void ccw_device_recognition(struct ccw_device *);
+int ccw_device_online(struct ccw_device *);
+int ccw_device_offline(struct ccw_device *);
+void ccw_device_update_sense_data(struct ccw_device *);
+int ccw_device_test_sense_data(struct ccw_device *);
+void ccw_device_schedule_sch_unregister(struct ccw_device *);
+int ccw_purge_blacklisted(void);
+void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
+struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id);
+
+/* Function prototypes for device status and basic sense stuff. */
+void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
+void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
+int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
+int ccw_device_do_sense(struct ccw_device *, struct irb *);
+
+/* Function prototype for internal request handling. */
+int lpm_adjust(int lpm, int mask);
+void ccw_request_start(struct ccw_device *);
+int ccw_request_cancel(struct ccw_device *cdev);
+void ccw_request_handler(struct ccw_device *cdev);
+void ccw_request_timeout(struct ccw_device *cdev);
+void ccw_request_notoper(struct ccw_device *cdev);
+
+/* Function prototypes for sense id stuff. */
+void ccw_device_sense_id_start(struct ccw_device *);
+void ccw_device_sense_id_done(struct ccw_device *, int);
+
+/* Function prototypes for path grouping stuff. */
+void ccw_device_verify_start(struct ccw_device *);
+void ccw_device_verify_done(struct ccw_device *, int);
+
+void ccw_device_disband_start(struct ccw_device *);
+void ccw_device_disband_done(struct ccw_device *, int);
+
+int ccw_device_stlck(struct ccw_device *);
+
+/* Helper function for machine check handling. */
+void ccw_device_trigger_reprobe(struct ccw_device *);
+void ccw_device_kill_io(struct ccw_device *);
+int ccw_device_notify(struct ccw_device *, int);
+void ccw_device_set_disconnected(struct ccw_device *cdev);
+void ccw_device_set_notoper(struct ccw_device *cdev);
+
+void ccw_device_timeout(struct timer_list *t);
+void ccw_device_set_timeout(struct ccw_device *, int);
+void ccw_device_schedule_recovery(void);
+
+/* Channel measurement facility related */
+void retry_set_schib(struct ccw_device *cdev);
+void cmf_retry_copy_block(struct ccw_device *);
+int cmf_reenable(struct ccw_device *);
+void cmf_reactivate(void);
+int ccw_set_cmf(struct ccw_device *cdev, int enable);
+extern struct device_attribute dev_attr_cmb_enable;
+#endif
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
new file mode 100644
index 0000000..9169af7
--- /dev/null
+++ b/drivers/s390/cio/device_fsm.c
@@ -0,0 +1,1125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * finite state machine for device handling
+ *
+ *    Copyright IBM Corp. 2002, 2008
+ *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
+ *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/chpid.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+#include "chsc.h"
+#include "ioasm.h"
+#include "chp.h"
+
+static int timeout_log_enabled;
+
+static int __init ccw_timeout_log_setup(char *unused)
+{
+	timeout_log_enabled = 1;
+	return 1;
+}
+
+__setup("ccw_timeout_log", ccw_timeout_log_setup);
+
+static void ccw_timeout_log(struct ccw_device *cdev)
+{
+	struct schib schib;
+	struct subchannel *sch;
+	struct io_subchannel_private *private;
+	union orb *orb;
+	int cc;
+
+	sch = to_subchannel(cdev->dev.parent);
+	private = to_io_private(sch);
+	orb = &private->orb;
+	cc = stsch(sch->schid, &schib);
+
+	printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
+	       "device information:\n", get_tod_clock());
+	printk(KERN_WARNING "cio: orb:\n");
+	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
+		       orb, sizeof(*orb), 0);
+	printk(KERN_WARNING "cio: ccw device bus id: %s\n",
+	       dev_name(&cdev->dev));
+	printk(KERN_WARNING "cio: subchannel bus id: %s\n",
+	       dev_name(&sch->dev));
+	printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
+	       "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
+
+	if (orb->tm.b) {
+		printk(KERN_WARNING "cio: orb indicates transport mode\n");
+		printk(KERN_WARNING "cio: last tcw:\n");
+		print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
+			       (void *)(addr_t)orb->tm.tcw,
+			       sizeof(struct tcw), 0);
+	} else {
+		printk(KERN_WARNING "cio: orb indicates command mode\n");
+		if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
+		    (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
+			printk(KERN_WARNING "cio: last channel program "
+			       "(intern):\n");
+		else
+			printk(KERN_WARNING "cio: last channel program:\n");
+
+		print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
+			       (void *)(addr_t)orb->cmd.cpa,
+			       sizeof(struct ccw1), 0);
+	}
+	printk(KERN_WARNING "cio: ccw device state: %d\n",
+	       cdev->private->state);
+	printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
+	printk(KERN_WARNING "cio: schib:\n");
+	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
+		       &schib, sizeof(schib), 0);
+	printk(KERN_WARNING "cio: ccw device flags:\n");
+	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
+		       &cdev->private->flags, sizeof(cdev->private->flags), 0);
+}
+
+/*
+ * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
+ */
+void
+ccw_device_timeout(struct timer_list *t)
+{
+	struct ccw_device_private *priv = from_timer(priv, t, timer);
+	struct ccw_device *cdev = priv->cdev;
+
+	spin_lock_irq(cdev->ccwlock);
+	if (timeout_log_enabled)
+		ccw_timeout_log(cdev);
+	dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
+	spin_unlock_irq(cdev->ccwlock);
+}
+
+/*
+ * Set timeout
+ */
+void
+ccw_device_set_timeout(struct ccw_device *cdev, int expires)
+{
+	if (expires == 0) {
+		del_timer(&cdev->private->timer);
+		return;
+	}
+	if (timer_pending(&cdev->private->timer)) {
+		if (mod_timer(&cdev->private->timer, jiffies + expires))
+			return;
+	}
+	cdev->private->timer.expires = jiffies + expires;
+	add_timer(&cdev->private->timer);
+}
+
+int
+ccw_device_cancel_halt_clear(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+	int ret;
+
+	sch = to_subchannel(cdev->dev.parent);
+	ret = cio_cancel_halt_clear(sch, &cdev->private->iretry);
+
+	if (ret == -EIO)
+		CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
+			      cdev->private->dev_id.ssid,
+			      cdev->private->dev_id.devno);
+
+	return ret;
+}
+
+void ccw_device_update_sense_data(struct ccw_device *cdev)
+{
+	memset(&cdev->id, 0, sizeof(cdev->id));
+	cdev->id.cu_type   = cdev->private->senseid.cu_type;
+	cdev->id.cu_model  = cdev->private->senseid.cu_model;
+	cdev->id.dev_type  = cdev->private->senseid.dev_type;
+	cdev->id.dev_model = cdev->private->senseid.dev_model;
+}
+
+int ccw_device_test_sense_data(struct ccw_device *cdev)
+{
+	return cdev->id.cu_type == cdev->private->senseid.cu_type &&
+		cdev->id.cu_model == cdev->private->senseid.cu_model &&
+		cdev->id.dev_type == cdev->private->senseid.dev_type &&
+		cdev->id.dev_model == cdev->private->senseid.dev_model;
+}
+
+/*
+ * The machine won't give us any notification by machine check if a chpid has
+ * been varied online on the SE so we have to find out by magic (i. e. driving
+ * the channel subsystem to device selection and updating our path masks).
+ */
+static void
+__recover_lost_chpids(struct subchannel *sch, int old_lpm)
+{
+	int mask, i;
+	struct chp_id chpid;
+
+	chp_id_init(&chpid);
+	for (i = 0; i<8; i++) {
+		mask = 0x80 >> i;
+		if (!(sch->lpm & mask))
+			continue;
+		if (old_lpm & mask)
+			continue;
+		chpid.id = sch->schib.pmcw.chpid[i];
+		if (!chp_is_registered(chpid))
+			css_schedule_eval_all();
+	}
+}
+
+/*
+ * Stop device recognition.
+ */
+static void
+ccw_device_recog_done(struct ccw_device *cdev, int state)
+{
+	struct subchannel *sch;
+	int old_lpm;
+
+	sch = to_subchannel(cdev->dev.parent);
+
+	if (cio_disable_subchannel(sch))
+		state = DEV_STATE_NOT_OPER;
+	/*
+	 * Now that we tried recognition, we have performed device selection
+	 * through ssch() and the path information is up to date.
+	 */
+	old_lpm = sch->lpm;
+
+	/* Check since device may again have become not operational. */
+	if (cio_update_schib(sch))
+		state = DEV_STATE_NOT_OPER;
+	else
+		sch->lpm = sch->schib.pmcw.pam & sch->opm;
+
+	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
+		/* Force reprobe on all chpids. */
+		old_lpm = 0;
+	if (sch->lpm != old_lpm)
+		__recover_lost_chpids(sch, old_lpm);
+	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID &&
+	    (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) {
+		cdev->private->flags.recog_done = 1;
+		cdev->private->state = DEV_STATE_DISCONNECTED;
+		wake_up(&cdev->private->wait_q);
+		return;
+	}
+	if (cdev->private->flags.resuming) {
+		cdev->private->state = state;
+		cdev->private->flags.recog_done = 1;
+		wake_up(&cdev->private->wait_q);
+		return;
+	}
+	switch (state) {
+	case DEV_STATE_NOT_OPER:
+		break;
+	case DEV_STATE_OFFLINE:
+		if (!cdev->online) {
+			ccw_device_update_sense_data(cdev);
+			break;
+		}
+		cdev->private->state = DEV_STATE_OFFLINE;
+		cdev->private->flags.recog_done = 1;
+		if (ccw_device_test_sense_data(cdev)) {
+			cdev->private->flags.donotify = 1;
+			ccw_device_online(cdev);
+			wake_up(&cdev->private->wait_q);
+		} else {
+			ccw_device_update_sense_data(cdev);
+			ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
+		}
+		return;
+	case DEV_STATE_BOXED:
+		if (cdev->id.cu_type != 0) { /* device was recognized before */
+			cdev->private->flags.recog_done = 1;
+			cdev->private->state = DEV_STATE_BOXED;
+			wake_up(&cdev->private->wait_q);
+			return;
+		}
+		break;
+	}
+	cdev->private->state = state;
+	io_subchannel_recog_done(cdev);
+	wake_up(&cdev->private->wait_q);
+}
+
+/*
+ * Function called from device_id.c after sense id has completed.
+ */
+void
+ccw_device_sense_id_done(struct ccw_device *cdev, int err)
+{
+	switch (err) {
+	case 0:
+		ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
+		break;
+	case -ETIME:		/* Sense id stopped by timeout. */
+		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
+		break;
+	default:
+		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+		break;
+	}
+}
+
+/**
+  * ccw_device_notify() - inform the device's driver about an event
+  * @cdev: device for which an event occurred
+  * @event: event that occurred
+  *
+  * Returns:
+  *   -%EINVAL if the device is offline or has no driver.
+  *   -%EOPNOTSUPP if the device's driver has no notifier registered.
+  *   %NOTIFY_OK if the driver wants to keep the device.
+  *   %NOTIFY_BAD if the driver doesn't want to keep the device.
+  */
+int ccw_device_notify(struct ccw_device *cdev, int event)
+{
+	int ret = -EINVAL;
+
+	if (!cdev->drv)
+		goto out;
+	if (!cdev->online)
+		goto out;
+	CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
+		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
+		      event);
+	if (!cdev->drv->notify) {
+		ret = -EOPNOTSUPP;
+		goto out;
+	}
+	if (cdev->drv->notify(cdev, event))
+		ret = NOTIFY_OK;
+	else
+		ret = NOTIFY_BAD;
+out:
+	return ret;
+}
+
+static void ccw_device_oper_notify(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
+		/* Reenable channel measurements, if needed. */
+		ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
+		/* Save indication for new paths. */
+		cdev->private->path_new_mask = sch->vpm;
+		return;
+	}
+	/* Driver doesn't want device back. */
+	ccw_device_set_notoper(cdev);
+	ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
+}
+
+/*
+ * Finished with online/offline processing.
+ */
+static void
+ccw_device_done(struct ccw_device *cdev, int state)
+{
+	struct subchannel *sch;
+
+	sch = to_subchannel(cdev->dev.parent);
+
+	ccw_device_set_timeout(cdev, 0);
+
+	if (state != DEV_STATE_ONLINE)
+		cio_disable_subchannel(sch);
+
+	/* Reset device status. */
+	memset(&cdev->private->irb, 0, sizeof(struct irb));
+
+	cdev->private->state = state;
+
+	switch (state) {
+	case DEV_STATE_BOXED:
+		CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
+			      cdev->private->dev_id.devno, sch->schid.sch_no);
+		if (cdev->online &&
+		    ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
+			ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+		cdev->private->flags.donotify = 0;
+		break;
+	case DEV_STATE_NOT_OPER:
+		CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
+			      cdev->private->dev_id.devno, sch->schid.sch_no);
+		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+			ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+		else
+			ccw_device_set_disconnected(cdev);
+		cdev->private->flags.donotify = 0;
+		break;
+	case DEV_STATE_DISCONNECTED:
+		CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
+			      "%04x\n", cdev->private->dev_id.devno,
+			      sch->schid.sch_no);
+		if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
+			cdev->private->state = DEV_STATE_NOT_OPER;
+			ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+		} else
+			ccw_device_set_disconnected(cdev);
+		cdev->private->flags.donotify = 0;
+		break;
+	default:
+		break;
+	}
+
+	if (cdev->private->flags.donotify) {
+		cdev->private->flags.donotify = 0;
+		ccw_device_oper_notify(cdev);
+	}
+	wake_up(&cdev->private->wait_q);
+}
+
+/*
+ * Start device recognition.
+ */
+void ccw_device_recognition(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	/*
+	 * We used to start here with a sense pgid to find out whether a device
+	 * is locked by someone else. Unfortunately, the sense pgid command
+	 * code has other meanings on devices predating the path grouping
+	 * algorithm, so we start with sense id and box the device after an
+	 * timeout (or if sense pgid during path verification detects the device
+	 * is locked, as may happen on newer devices).
+	 */
+	cdev->private->flags.recog_done = 0;
+	cdev->private->state = DEV_STATE_SENSE_ID;
+	if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
+		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+		return;
+	}
+	ccw_device_sense_id_start(cdev);
+}
+
+/*
+ * Handle events for states that use the ccw request infrastructure.
+ */
+static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
+{
+	switch (e) {
+	case DEV_EVENT_NOTOPER:
+		ccw_request_notoper(cdev);
+		break;
+	case DEV_EVENT_INTERRUPT:
+		ccw_request_handler(cdev);
+		break;
+	case DEV_EVENT_TIMEOUT:
+		ccw_request_timeout(cdev);
+		break;
+	default:
+		break;
+	}
+}
+
+static void ccw_device_report_path_events(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	int path_event[8];
+	int chp, mask;
+
+	for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
+		path_event[chp] = PE_NONE;
+		if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
+			path_event[chp] |= PE_PATH_GONE;
+		if (mask & cdev->private->path_new_mask & sch->vpm)
+			path_event[chp] |= PE_PATH_AVAILABLE;
+		if (mask & cdev->private->pgid_reset_mask & sch->vpm)
+			path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
+	}
+	if (cdev->online && cdev->drv->path_event)
+		cdev->drv->path_event(cdev, path_event);
+}
+
+static void ccw_device_reset_path_events(struct ccw_device *cdev)
+{
+	cdev->private->path_gone_mask = 0;
+	cdev->private->path_new_mask = 0;
+	cdev->private->pgid_reset_mask = 0;
+}
+
+static void create_fake_irb(struct irb *irb, int type)
+{
+	memset(irb, 0, sizeof(*irb));
+	if (type == FAKE_CMD_IRB) {
+		struct cmd_scsw *scsw = &irb->scsw.cmd;
+		scsw->cc = 1;
+		scsw->fctl = SCSW_FCTL_START_FUNC;
+		scsw->actl = SCSW_ACTL_START_PEND;
+		scsw->stctl = SCSW_STCTL_STATUS_PEND;
+	} else if (type == FAKE_TM_IRB) {
+		struct tm_scsw *scsw = &irb->scsw.tm;
+		scsw->x = 1;
+		scsw->cc = 1;
+		scsw->fctl = SCSW_FCTL_START_FUNC;
+		scsw->actl = SCSW_ACTL_START_PEND;
+		scsw->stctl = SCSW_STCTL_STATUS_PEND;
+	}
+}
+
+static void ccw_device_handle_broken_paths(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm;
+
+	if (broken_paths && (cdev->private->path_broken_mask != broken_paths))
+		ccw_device_schedule_recovery();
+
+	cdev->private->path_broken_mask = broken_paths;
+}
+
+void ccw_device_verify_done(struct ccw_device *cdev, int err)
+{
+	struct subchannel *sch;
+
+	sch = to_subchannel(cdev->dev.parent);
+	/* Update schib - pom may have changed. */
+	if (cio_update_schib(sch)) {
+		err = -ENODEV;
+		goto callback;
+	}
+	/* Update lpm with verified path mask. */
+	sch->lpm = sch->vpm;
+	/* Repeat path verification? */
+	if (cdev->private->flags.doverify) {
+		ccw_device_verify_start(cdev);
+		return;
+	}
+callback:
+	switch (err) {
+	case 0:
+		ccw_device_done(cdev, DEV_STATE_ONLINE);
+		/* Deliver fake irb to device driver, if needed. */
+		if (cdev->private->flags.fake_irb) {
+			create_fake_irb(&cdev->private->irb,
+					cdev->private->flags.fake_irb);
+			cdev->private->flags.fake_irb = 0;
+			if (cdev->handler)
+				cdev->handler(cdev, cdev->private->intparm,
+					      &cdev->private->irb);
+			memset(&cdev->private->irb, 0, sizeof(struct irb));
+		}
+		ccw_device_report_path_events(cdev);
+		ccw_device_handle_broken_paths(cdev);
+		break;
+	case -ETIME:
+	case -EUSERS:
+		/* Reset oper notify indication after verify error. */
+		cdev->private->flags.donotify = 0;
+		ccw_device_done(cdev, DEV_STATE_BOXED);
+		break;
+	case -EACCES:
+		/* Reset oper notify indication after verify error. */
+		cdev->private->flags.donotify = 0;
+		ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
+		break;
+	default:
+		/* Reset oper notify indication after verify error. */
+		cdev->private->flags.donotify = 0;
+		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+		break;
+	}
+	ccw_device_reset_path_events(cdev);
+}
+
+/*
+ * Get device online.
+ */
+int
+ccw_device_online(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+	int ret;
+
+	if ((cdev->private->state != DEV_STATE_OFFLINE) &&
+	    (cdev->private->state != DEV_STATE_BOXED))
+		return -EINVAL;
+	sch = to_subchannel(cdev->dev.parent);
+	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
+	if (ret != 0) {
+		/* Couldn't enable the subchannel for i/o. Sick device. */
+		if (ret == -ENODEV)
+			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+		return ret;
+	}
+	/* Start initial path verification. */
+	cdev->private->state = DEV_STATE_VERIFY;
+	ccw_device_verify_start(cdev);
+	return 0;
+}
+
+void
+ccw_device_disband_done(struct ccw_device *cdev, int err)
+{
+	switch (err) {
+	case 0:
+		ccw_device_done(cdev, DEV_STATE_OFFLINE);
+		break;
+	case -ETIME:
+		ccw_device_done(cdev, DEV_STATE_BOXED);
+		break;
+	default:
+		cdev->private->flags.donotify = 0;
+		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+		break;
+	}
+}
+
+/*
+ * Shutdown device.
+ */
+int
+ccw_device_offline(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+
+	/* Allow ccw_device_offline while disconnected. */
+	if (cdev->private->state == DEV_STATE_DISCONNECTED ||
+	    cdev->private->state == DEV_STATE_NOT_OPER) {
+		cdev->private->flags.donotify = 0;
+		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+		return 0;
+	}
+	if (cdev->private->state == DEV_STATE_BOXED) {
+		ccw_device_done(cdev, DEV_STATE_BOXED);
+		return 0;
+	}
+	if (ccw_device_is_orphan(cdev)) {
+		ccw_device_done(cdev, DEV_STATE_OFFLINE);
+		return 0;
+	}
+	sch = to_subchannel(cdev->dev.parent);
+	if (cio_update_schib(sch))
+		return -ENODEV;
+	if (scsw_actl(&sch->schib.scsw) != 0)
+		return -EBUSY;
+	if (cdev->private->state != DEV_STATE_ONLINE)
+		return -EINVAL;
+	/* Are we doing path grouping? */
+	if (!cdev->private->flags.pgroup) {
+		/* No, set state offline immediately. */
+		ccw_device_done(cdev, DEV_STATE_OFFLINE);
+		return 0;
+	}
+	/* Start Set Path Group commands. */
+	cdev->private->state = DEV_STATE_DISBAND_PGID;
+	ccw_device_disband_start(cdev);
+	return 0;
+}
+
+/*
+ * Handle not operational event in non-special state.
+ */
+static void ccw_device_generic_notoper(struct ccw_device *cdev,
+				       enum dev_event dev_event)
+{
+	if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+	else
+		ccw_device_set_disconnected(cdev);
+}
+
+/*
+ * Handle path verification event in offline state.
+ */
+static void ccw_device_offline_verify(struct ccw_device *cdev,
+				      enum dev_event dev_event)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	css_schedule_eval(sch->schid);
+}
+
+/*
+ * Handle path verification event.
+ */
+static void
+ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	struct subchannel *sch;
+
+	if (cdev->private->state == DEV_STATE_W4SENSE) {
+		cdev->private->flags.doverify = 1;
+		return;
+	}
+	sch = to_subchannel(cdev->dev.parent);
+	/*
+	 * Since we might not just be coming from an interrupt from the
+	 * subchannel we have to update the schib.
+	 */
+	if (cio_update_schib(sch)) {
+		ccw_device_verify_done(cdev, -ENODEV);
+		return;
+	}
+
+	if (scsw_actl(&sch->schib.scsw) != 0 ||
+	    (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
+	    (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
+		/*
+		 * No final status yet or final status not yet delivered
+		 * to the device driver. Can't do path verification now,
+		 * delay until final status was delivered.
+		 */
+		cdev->private->flags.doverify = 1;
+		return;
+	}
+	/* Device is idle, we can do the path verification. */
+	cdev->private->state = DEV_STATE_VERIFY;
+	ccw_device_verify_start(cdev);
+}
+
+/*
+ * Handle path verification event in boxed state.
+ */
+static void ccw_device_boxed_verify(struct ccw_device *cdev,
+				    enum dev_event dev_event)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	if (cdev->online) {
+		if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
+			ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+		else
+			ccw_device_online_verify(cdev, dev_event);
+	} else
+		css_schedule_eval(sch->schid);
+}
+
+/*
+ * Pass interrupt to device driver.
+ */
+static int ccw_device_call_handler(struct ccw_device *cdev)
+{
+	unsigned int stctl;
+	int ending_status;
+
+	/*
+	 * we allow for the device action handler if .
+	 *  - we received ending status
+	 *  - the action handler requested to see all interrupts
+	 *  - we received an intermediate status
+	 *  - fast notification was requested (primary status)
+	 *  - unsolicited interrupts
+	 */
+	stctl = scsw_stctl(&cdev->private->irb.scsw);
+	ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
+		(stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
+		(stctl == SCSW_STCTL_STATUS_PEND);
+	if (!ending_status &&
+	    !cdev->private->options.repall &&
+	    !(stctl & SCSW_STCTL_INTER_STATUS) &&
+	    !(cdev->private->options.fast &&
+	      (stctl & SCSW_STCTL_PRIM_STATUS)))
+		return 0;
+
+	if (ending_status)
+		ccw_device_set_timeout(cdev, 0);
+
+	if (cdev->handler)
+		cdev->handler(cdev, cdev->private->intparm,
+			      &cdev->private->irb);
+
+	memset(&cdev->private->irb, 0, sizeof(struct irb));
+	return 1;
+}
+
+/*
+ * Got an interrupt for a normal io (state online).
+ */
+static void
+ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	struct irb *irb;
+	int is_cmd;
+
+	irb = this_cpu_ptr(&cio_irb);
+	is_cmd = !scsw_is_tm(&irb->scsw);
+	/* Check for unsolicited interrupt. */
+	if (!scsw_is_solicited(&irb->scsw)) {
+		if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+		    !irb->esw.esw0.erw.cons) {
+			/* Unit check but no sense data. Need basic sense. */
+			if (ccw_device_do_sense(cdev, irb) != 0)
+				goto call_handler_unsol;
+			memcpy(&cdev->private->irb, irb, sizeof(struct irb));
+			cdev->private->state = DEV_STATE_W4SENSE;
+			cdev->private->intparm = 0;
+			return;
+		}
+call_handler_unsol:
+		if (cdev->handler)
+			cdev->handler (cdev, 0, irb);
+		if (cdev->private->flags.doverify)
+			ccw_device_online_verify(cdev, 0);
+		return;
+	}
+	/* Accumulate status and find out if a basic sense is needed. */
+	ccw_device_accumulate_irb(cdev, irb);
+	if (is_cmd && cdev->private->flags.dosense) {
+		if (ccw_device_do_sense(cdev, irb) == 0) {
+			cdev->private->state = DEV_STATE_W4SENSE;
+		}
+		return;
+	}
+	/* Call the handler. */
+	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
+		/* Start delayed path verification. */
+		ccw_device_online_verify(cdev, 0);
+}
+
+/*
+ * Got an timeout in online state.
+ */
+static void
+ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	int ret;
+
+	ccw_device_set_timeout(cdev, 0);
+	cdev->private->iretry = 255;
+	cdev->private->async_kill_io_rc = -ETIMEDOUT;
+	ret = ccw_device_cancel_halt_clear(cdev);
+	if (ret == -EBUSY) {
+		ccw_device_set_timeout(cdev, 3*HZ);
+		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
+		return;
+	}
+	if (ret)
+		dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+	else if (cdev->handler)
+		cdev->handler(cdev, cdev->private->intparm,
+			      ERR_PTR(-ETIMEDOUT));
+}
+
+/*
+ * Got an interrupt for a basic sense.
+ */
+static void
+ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	struct irb *irb;
+
+	irb = this_cpu_ptr(&cio_irb);
+	/* Check for unsolicited interrupt. */
+	if (scsw_stctl(&irb->scsw) ==
+	    (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
+		if (scsw_cc(&irb->scsw) == 1)
+			/* Basic sense hasn't started. Try again. */
+			ccw_device_do_sense(cdev, irb);
+		else {
+			CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
+				      "interrupt during w4sense...\n",
+				      cdev->private->dev_id.ssid,
+				      cdev->private->dev_id.devno);
+			if (cdev->handler)
+				cdev->handler (cdev, 0, irb);
+		}
+		return;
+	}
+	/*
+	 * Check if a halt or clear has been issued in the meanwhile. If yes,
+	 * only deliver the halt/clear interrupt to the device driver as if it
+	 * had killed the original request.
+	 */
+	if (scsw_fctl(&irb->scsw) &
+	    (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
+		cdev->private->flags.dosense = 0;
+		memset(&cdev->private->irb, 0, sizeof(struct irb));
+		ccw_device_accumulate_irb(cdev, irb);
+		goto call_handler;
+	}
+	/* Add basic sense info to irb. */
+	ccw_device_accumulate_basic_sense(cdev, irb);
+	if (cdev->private->flags.dosense) {
+		/* Another basic sense is needed. */
+		ccw_device_do_sense(cdev, irb);
+		return;
+	}
+call_handler:
+	cdev->private->state = DEV_STATE_ONLINE;
+	/* In case sensing interfered with setting the device online */
+	wake_up(&cdev->private->wait_q);
+	/* Call the handler. */
+	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
+		/* Start delayed path verification. */
+		ccw_device_online_verify(cdev, 0);
+}
+
+static void
+ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	ccw_device_set_timeout(cdev, 0);
+	/* Start delayed path verification. */
+	ccw_device_online_verify(cdev, 0);
+	/* OK, i/o is dead now. Call interrupt handler. */
+	if (cdev->handler)
+		cdev->handler(cdev, cdev->private->intparm,
+			      ERR_PTR(cdev->private->async_kill_io_rc));
+}
+
+static void
+ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	int ret;
+
+	ret = ccw_device_cancel_halt_clear(cdev);
+	if (ret == -EBUSY) {
+		ccw_device_set_timeout(cdev, 3*HZ);
+		return;
+	}
+	/* Start delayed path verification. */
+	ccw_device_online_verify(cdev, 0);
+	if (cdev->handler)
+		cdev->handler(cdev, cdev->private->intparm,
+			      ERR_PTR(cdev->private->async_kill_io_rc));
+}
+
+void ccw_device_kill_io(struct ccw_device *cdev)
+{
+	int ret;
+
+	ccw_device_set_timeout(cdev, 0);
+	cdev->private->iretry = 255;
+	cdev->private->async_kill_io_rc = -EIO;
+	ret = ccw_device_cancel_halt_clear(cdev);
+	if (ret == -EBUSY) {
+		ccw_device_set_timeout(cdev, 3*HZ);
+		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
+		return;
+	}
+	/* Start delayed path verification. */
+	ccw_device_online_verify(cdev, 0);
+	if (cdev->handler)
+		cdev->handler(cdev, cdev->private->intparm,
+			      ERR_PTR(-EIO));
+}
+
+static void
+ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	/* Start verification after current task finished. */
+	cdev->private->flags.doverify = 1;
+}
+
+static void
+ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	struct subchannel *sch;
+
+	sch = to_subchannel(cdev->dev.parent);
+	if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
+		/* Couldn't enable the subchannel for i/o. Sick device. */
+		return;
+	cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
+	ccw_device_sense_id_start(cdev);
+}
+
+void ccw_device_trigger_reprobe(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+
+	if (cdev->private->state != DEV_STATE_DISCONNECTED)
+		return;
+
+	sch = to_subchannel(cdev->dev.parent);
+	/* Update some values. */
+	if (cio_update_schib(sch))
+		return;
+	/*
+	 * The pim, pam, pom values may not be accurate, but they are the best
+	 * we have before performing device selection :/
+	 */
+	sch->lpm = sch->schib.pmcw.pam & sch->opm;
+	/*
+	 * Use the initial configuration since we can't be shure that the old
+	 * paths are valid.
+	 */
+	io_subchannel_init_config(sch);
+	if (cio_commit_config(sch))
+		return;
+
+	/* We should also udate ssd info, but this has to wait. */
+	/* Check if this is another device which appeared on the same sch. */
+	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
+		css_schedule_eval(sch->schid);
+	else
+		ccw_device_start_id(cdev, 0);
+}
+
+static void ccw_device_disabled_irq(struct ccw_device *cdev,
+				    enum dev_event dev_event)
+{
+	struct subchannel *sch;
+
+	sch = to_subchannel(cdev->dev.parent);
+	/*
+	 * An interrupt in a disabled state means a previous disable was not
+	 * successful - should not happen, but we try to disable again.
+	 */
+	cio_disable_subchannel(sch);
+}
+
+static void
+ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	retry_set_schib(cdev);
+	cdev->private->state = DEV_STATE_ONLINE;
+	dev_fsm_event(cdev, dev_event);
+}
+
+static void ccw_device_update_cmfblock(struct ccw_device *cdev,
+				       enum dev_event dev_event)
+{
+	cmf_retry_copy_block(cdev);
+	cdev->private->state = DEV_STATE_ONLINE;
+	dev_fsm_event(cdev, dev_event);
+}
+
+static void
+ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	ccw_device_set_timeout(cdev, 0);
+	cdev->private->state = DEV_STATE_NOT_OPER;
+	wake_up(&cdev->private->wait_q);
+}
+
+static void
+ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+	int ret;
+
+	ret = ccw_device_cancel_halt_clear(cdev);
+	if (ret == -EBUSY) {
+		ccw_device_set_timeout(cdev, HZ/10);
+	} else {
+		cdev->private->state = DEV_STATE_NOT_OPER;
+		wake_up(&cdev->private->wait_q);
+	}
+}
+
+/*
+ * No operation action. This is used e.g. to ignore a timeout event in
+ * state offline.
+ */
+static void
+ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
+{
+}
+
+/*
+ * device statemachine
+ */
+fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
+	[DEV_STATE_NOT_OPER] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_disabled_irq,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
+		[DEV_EVENT_VERIFY]	= ccw_device_nop,
+	},
+	[DEV_STATE_SENSE_ID] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
+		[DEV_EVENT_VERIFY]	= ccw_device_nop,
+	},
+	[DEV_STATE_OFFLINE] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_disabled_irq,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
+		[DEV_EVENT_VERIFY]	= ccw_device_offline_verify,
+	},
+	[DEV_STATE_VERIFY] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
+		[DEV_EVENT_VERIFY]	= ccw_device_delay_verify,
+	},
+	[DEV_STATE_ONLINE] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_irq,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_online_timeout,
+		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
+	},
+	[DEV_STATE_W4SENSE] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_w4sense,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
+		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
+	},
+	[DEV_STATE_DISBAND_PGID] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
+		[DEV_EVENT_VERIFY]	= ccw_device_nop,
+	},
+	[DEV_STATE_BOXED] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_nop,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
+		[DEV_EVENT_VERIFY]	= ccw_device_boxed_verify,
+	},
+	/* states to wait for i/o completion before doing something */
+	[DEV_STATE_TIMEOUT_KILL] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_killing_irq,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_killing_timeout,
+		[DEV_EVENT_VERIFY]	= ccw_device_nop, //FIXME
+	},
+	[DEV_STATE_QUIESCE] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_quiesce_done,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_quiesce_done,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_quiesce_timeout,
+		[DEV_EVENT_VERIFY]	= ccw_device_nop,
+	},
+	/* special states for devices gone not operational */
+	[DEV_STATE_DISCONNECTED] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_start_id,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
+		[DEV_EVENT_VERIFY]	= ccw_device_start_id,
+	},
+	[DEV_STATE_DISCONNECTED_SENSE_ID] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
+		[DEV_EVENT_VERIFY]	= ccw_device_nop,
+	},
+	[DEV_STATE_CMFCHANGE] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_change_cmfstate,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_change_cmfstate,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_change_cmfstate,
+		[DEV_EVENT_VERIFY]	= ccw_device_change_cmfstate,
+	},
+	[DEV_STATE_CMFUPDATE] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_update_cmfblock,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_update_cmfblock,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_update_cmfblock,
+		[DEV_EVENT_VERIFY]	= ccw_device_update_cmfblock,
+	},
+	[DEV_STATE_STEAL_LOCK] = {
+		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
+		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
+		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
+		[DEV_EVENT_VERIFY]	= ccw_device_nop,
+	},
+};
+
+EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
new file mode 100644
index 0000000..f6df83a
--- /dev/null
+++ b/drivers/s390/cio/device_id.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  CCW device SENSE ID I/O handling.
+ *
+ *    Copyright IBM Corp. 2002, 2009
+ *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *		 Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/ccwdev.h>
+#include <asm/setup.h>
+#include <asm/cio.h>
+#include <asm/diag.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "device.h"
+#include "io_sch.h"
+
+#define SENSE_ID_RETRIES	256
+#define SENSE_ID_TIMEOUT	(10 * HZ)
+#define SENSE_ID_MIN_LEN	4
+#define SENSE_ID_BASIC_LEN	7
+
+/**
+ * diag210_to_senseid - convert diag 0x210 data to sense id information
+ * @senseid: sense id
+ * @diag: diag 0x210 data
+ *
+ * Return 0 on success, non-zero otherwise.
+ */
+static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
+{
+	static struct {
+		int class, type, cu_type;
+	} vm_devices[] = {
+		{ 0x08, 0x01, 0x3480 },
+		{ 0x08, 0x02, 0x3430 },
+		{ 0x08, 0x10, 0x3420 },
+		{ 0x08, 0x42, 0x3424 },
+		{ 0x08, 0x44, 0x9348 },
+		{ 0x08, 0x81, 0x3490 },
+		{ 0x08, 0x82, 0x3422 },
+		{ 0x10, 0x41, 0x1403 },
+		{ 0x10, 0x42, 0x3211 },
+		{ 0x10, 0x43, 0x3203 },
+		{ 0x10, 0x45, 0x3800 },
+		{ 0x10, 0x47, 0x3262 },
+		{ 0x10, 0x48, 0x3820 },
+		{ 0x10, 0x49, 0x3800 },
+		{ 0x10, 0x4a, 0x4245 },
+		{ 0x10, 0x4b, 0x4248 },
+		{ 0x10, 0x4d, 0x3800 },
+		{ 0x10, 0x4e, 0x3820 },
+		{ 0x10, 0x4f, 0x3820 },
+		{ 0x10, 0x82, 0x2540 },
+		{ 0x10, 0x84, 0x3525 },
+		{ 0x20, 0x81, 0x2501 },
+		{ 0x20, 0x82, 0x2540 },
+		{ 0x20, 0x84, 0x3505 },
+		{ 0x40, 0x01, 0x3278 },
+		{ 0x40, 0x04, 0x3277 },
+		{ 0x40, 0x80, 0x2250 },
+		{ 0x40, 0xc0, 0x5080 },
+		{ 0x80, 0x00, 0x3215 },
+	};
+	int i;
+
+	/* Special case for osa devices. */
+	if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) {
+		senseid->cu_type = 0x3088;
+		senseid->cu_model = 0x60;
+		senseid->reserved = 0xff;
+		return 0;
+	}
+	for (i = 0; i < ARRAY_SIZE(vm_devices); i++) {
+		if (diag->vrdcvcla == vm_devices[i].class &&
+		    diag->vrdcvtyp == vm_devices[i].type) {
+			senseid->cu_type = vm_devices[i].cu_type;
+			senseid->reserved = 0xff;
+			return 0;
+		}
+	}
+
+	return -ENODEV;
+}
+
+/**
+ * diag_get_dev_info - retrieve device information via diag 0x210
+ * @cdev: ccw device
+ *
+ * Returns zero on success, non-zero otherwise.
+ */
+static int diag210_get_dev_info(struct ccw_device *cdev)
+{
+	struct ccw_dev_id *dev_id = &cdev->private->dev_id;
+	struct senseid *senseid = &cdev->private->senseid;
+	struct diag210 diag_data;
+	int rc;
+
+	if (dev_id->ssid != 0)
+		return -ENODEV;
+	memset(&diag_data, 0, sizeof(diag_data));
+	diag_data.vrdcdvno	= dev_id->devno;
+	diag_data.vrdclen	= sizeof(diag_data);
+	rc = diag210(&diag_data);
+	CIO_TRACE_EVENT(4, "diag210");
+	CIO_HEX_EVENT(4, &rc, sizeof(rc));
+	CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data));
+	if (rc != 0 && rc != 2)
+		goto err_failed;
+	if (diag210_to_senseid(senseid, &diag_data))
+		goto err_unknown;
+	return 0;
+
+err_unknown:
+	CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n",
+		      dev_id->ssid, dev_id->devno);
+	return -ENODEV;
+err_failed:
+	CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n",
+		      dev_id->ssid, dev_id->devno, rc);
+	return -ENODEV;
+}
+
+/*
+ * Initialize SENSE ID data.
+ */
+static void snsid_init(struct ccw_device *cdev)
+{
+	cdev->private->flags.esid = 0;
+	memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
+	cdev->private->senseid.cu_type = 0xffff;
+}
+
+/*
+ * Check for complete SENSE ID data.
+ */
+static int snsid_check(struct ccw_device *cdev, void *data)
+{
+	struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
+	int len = sizeof(struct senseid) - scsw->count;
+
+	/* Check for incomplete SENSE ID data. */
+	if (len < SENSE_ID_MIN_LEN)
+		goto out_restart;
+	if (cdev->private->senseid.cu_type == 0xffff)
+		goto out_restart;
+	/* Check for incompatible SENSE ID data. */
+	if (cdev->private->senseid.reserved != 0xff)
+		return -EOPNOTSUPP;
+	/* Check for extended-identification information. */
+	if (len > SENSE_ID_BASIC_LEN)
+		cdev->private->flags.esid = 1;
+	return 0;
+
+out_restart:
+	snsid_init(cdev);
+	return -EAGAIN;
+}
+
+/*
+ * Process SENSE ID request result.
+ */
+static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
+{
+	struct ccw_dev_id *id = &cdev->private->dev_id;
+	struct senseid *senseid = &cdev->private->senseid;
+	int vm = 0;
+
+	if (rc && MACHINE_IS_VM) {
+		/* Try diag 0x210 fallback on z/VM. */
+		snsid_init(cdev);
+		if (diag210_get_dev_info(cdev) == 0) {
+			rc = 0;
+			vm = 1;
+		}
+	}
+	CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x "
+		      "%04x/%02x%s\n", id->ssid, id->devno, rc,
+		      senseid->cu_type, senseid->cu_model, senseid->dev_type,
+		      senseid->dev_model, vm ? " (diag210)" : "");
+	ccw_device_sense_id_done(cdev, rc);
+}
+
+/**
+ * ccw_device_sense_id_start - perform SENSE ID
+ * @cdev: ccw device
+ *
+ * Execute a SENSE ID channel program on @cdev to update its sense id
+ * information. When finished, call ccw_device_sense_id_done with a
+ * return code specifying the result.
+ */
+void ccw_device_sense_id_start(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+	struct ccw1 *cp = cdev->private->iccws;
+
+	CIO_TRACE_EVENT(4, "snsid");
+	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+	/* Data setup. */
+	snsid_init(cdev);
+	/* Channel program setup. */
+	cp->cmd_code	= CCW_CMD_SENSE_ID;
+	cp->cda		= (u32) (addr_t) &cdev->private->senseid;
+	cp->count	= sizeof(struct senseid);
+	cp->flags	= CCW_FLAG_SLI;
+	/* Request setup. */
+	memset(req, 0, sizeof(*req));
+	req->cp		= cp;
+	req->timeout	= SENSE_ID_TIMEOUT;
+	req->maxretries	= SENSE_ID_RETRIES;
+	req->lpm	= sch->schib.pmcw.pam & sch->opm;
+	req->check	= snsid_check;
+	req->callback	= snsid_callback;
+	ccw_request_start(cdev);
+}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
new file mode 100644
index 0000000..4435ae0
--- /dev/null
+++ b/drivers/s390/cio/device_ops.c
@@ -0,0 +1,715 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * Copyright IBM Corp. 2002, 2009
+ *
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *	      Cornelia Huck (cornelia.huck@de.ibm.com)
+ */
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+
+#include <asm/ccwdev.h>
+#include <asm/idals.h>
+#include <asm/chpid.h>
+#include <asm/fcx.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "chsc.h"
+#include "device.h"
+#include "chp.h"
+
+/**
+ * ccw_device_set_options_mask() - set some options and unset the rest
+ * @cdev: device for which the options are to be set
+ * @flags: options to be set
+ *
+ * All flags specified in @flags are set, all flags not specified in @flags
+ * are cleared.
+ * Returns:
+ *   %0 on success, -%EINVAL on an invalid flag combination.
+ */
+int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
+{
+       /*
+	* The flag usage is mutal exclusive ...
+	*/
+	if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
+	    (flags & CCWDEV_REPORT_ALL))
+		return -EINVAL;
+	cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
+	cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
+	cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
+	cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
+	cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
+	return 0;
+}
+
+/**
+ * ccw_device_set_options() - set some options
+ * @cdev: device for which the options are to be set
+ * @flags: options to be set
+ *
+ * All flags specified in @flags are set, the remainder is left untouched.
+ * Returns:
+ *   %0 on success, -%EINVAL if an invalid flag combination would ensue.
+ */
+int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
+{
+       /*
+	* The flag usage is mutal exclusive ...
+	*/
+	if (((flags & CCWDEV_EARLY_NOTIFICATION) &&
+	    (flags & CCWDEV_REPORT_ALL)) ||
+	    ((flags & CCWDEV_EARLY_NOTIFICATION) &&
+	     cdev->private->options.repall) ||
+	    ((flags & CCWDEV_REPORT_ALL) &&
+	     cdev->private->options.fast))
+		return -EINVAL;
+	cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
+	cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
+	cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
+	cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
+	cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
+	return 0;
+}
+
+/**
+ * ccw_device_clear_options() - clear some options
+ * @cdev: device for which the options are to be cleared
+ * @flags: options to be cleared
+ *
+ * All flags specified in @flags are cleared, the remainder is left untouched.
+ */
+void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
+{
+	cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
+	cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
+	cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
+	cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
+	cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
+}
+
+/**
+ * ccw_device_is_pathgroup() - determine if paths to this device are grouped
+ * @cdev: ccw device
+ *
+ * Return non-zero if there is a path group, zero otherwise.
+ */
+int ccw_device_is_pathgroup(struct ccw_device *cdev)
+{
+	return cdev->private->flags.pgroup;
+}
+EXPORT_SYMBOL(ccw_device_is_pathgroup);
+
+/**
+ * ccw_device_is_multipath() - determine if device is operating in multipath mode
+ * @cdev: ccw device
+ *
+ * Return non-zero if device is operating in multipath mode, zero otherwise.
+ */
+int ccw_device_is_multipath(struct ccw_device *cdev)
+{
+	return cdev->private->flags.mpath;
+}
+EXPORT_SYMBOL(ccw_device_is_multipath);
+
+/**
+ * ccw_device_clear() - terminate I/O request processing
+ * @cdev: target ccw device
+ * @intparm: interruption parameter; value is only used if no I/O is
+ *	     outstanding, otherwise the intparm associated with the I/O request
+ *	     is returned
+ *
+ * ccw_device_clear() calls csch on @cdev's subchannel.
+ * Returns:
+ *  %0 on success,
+ *  -%ENODEV on device not operational,
+ *  -%EINVAL on invalid device state.
+ * Context:
+ *  Interrupts disabled, ccw device lock held
+ */
+int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
+{
+	struct subchannel *sch;
+	int ret;
+
+	if (!cdev || !cdev->dev.parent)
+		return -ENODEV;
+	sch = to_subchannel(cdev->dev.parent);
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
+	if (cdev->private->state == DEV_STATE_NOT_OPER)
+		return -ENODEV;
+	if (cdev->private->state != DEV_STATE_ONLINE &&
+	    cdev->private->state != DEV_STATE_W4SENSE)
+		return -EINVAL;
+
+	ret = cio_clear(sch);
+	if (ret == 0)
+		cdev->private->intparm = intparm;
+	return ret;
+}
+
+/**
+ * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ *	     @cdev's interrupt handler. Allows a device driver to associate
+ *	     the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ *	 value of 0 will make cio use the opm.
+ * @key: storage key to be used for the I/O
+ * @flags: additional flags; defines the action to be performed for I/O
+ *	   processing.
+ * @expires: timeout value in jiffies
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * This function notifies the device driver if the channel program has not
+ * completed during the time specified by @expires. If a timeout occurs, the
+ * channel program is terminated via xsch, hsch or csch, and the device's
+ * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
+ * Returns:
+ *  %0, if the operation was successful;
+ *  -%EBUSY, if the device is busy, or status pending;
+ *  -%EACCES, if no path specified in @lpm is operational;
+ *  -%ENODEV, if the device is not operational.
+ * Context:
+ *  Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
+				 unsigned long intparm, __u8 lpm, __u8 key,
+				 unsigned long flags, int expires)
+{
+	struct subchannel *sch;
+	int ret;
+
+	if (!cdev || !cdev->dev.parent)
+		return -ENODEV;
+	sch = to_subchannel(cdev->dev.parent);
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
+	if (cdev->private->state == DEV_STATE_NOT_OPER)
+		return -ENODEV;
+	if (cdev->private->state == DEV_STATE_VERIFY) {
+		/* Remember to fake irb when finished. */
+		if (!cdev->private->flags.fake_irb) {
+			cdev->private->flags.fake_irb = FAKE_CMD_IRB;
+			cdev->private->intparm = intparm;
+			return 0;
+		} else
+			/* There's already a fake I/O around. */
+			return -EBUSY;
+	}
+	if (cdev->private->state != DEV_STATE_ONLINE ||
+	    ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
+	     !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
+	    cdev->private->flags.doverify)
+		return -EBUSY;
+	ret = cio_set_options (sch, flags);
+	if (ret)
+		return ret;
+	/* Adjust requested path mask to exclude unusable paths. */
+	if (lpm) {
+		lpm &= sch->lpm;
+		if (lpm == 0)
+			return -EACCES;
+	}
+	ret = cio_start_key (sch, cpa, lpm, key);
+	switch (ret) {
+	case 0:
+		cdev->private->intparm = intparm;
+		if (expires)
+			ccw_device_set_timeout(cdev, expires);
+		break;
+	case -EACCES:
+	case -ENODEV:
+		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+		break;
+	}
+	return ret;
+}
+
+/**
+ * ccw_device_start_key() - start a s390 channel program with key
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ *	     @cdev's interrupt handler. Allows a device driver to associate
+ *	     the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ *	 value of 0 will make cio use the opm.
+ * @key: storage key to be used for the I/O
+ * @flags: additional flags; defines the action to be performed for I/O
+ *	   processing.
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * Returns:
+ *  %0, if the operation was successful;
+ *  -%EBUSY, if the device is busy, or status pending;
+ *  -%EACCES, if no path specified in @lpm is operational;
+ *  -%ENODEV, if the device is not operational.
+ * Context:
+ *  Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
+			 unsigned long intparm, __u8 lpm, __u8 key,
+			 unsigned long flags)
+{
+	return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key,
+					    flags, 0);
+}
+
+/**
+ * ccw_device_start() - start a s390 channel program
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ *	     @cdev's interrupt handler. Allows a device driver to associate
+ *	     the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ *	 value of 0 will make cio use the opm.
+ * @flags: additional flags; defines the action to be performed for I/O
+ *	   processing.
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * Returns:
+ *  %0, if the operation was successful;
+ *  -%EBUSY, if the device is busy, or status pending;
+ *  -%EACCES, if no path specified in @lpm is operational;
+ *  -%ENODEV, if the device is not operational.
+ * Context:
+ *  Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
+		     unsigned long intparm, __u8 lpm, unsigned long flags)
+{
+	return ccw_device_start_key(cdev, cpa, intparm, lpm,
+				    PAGE_DEFAULT_KEY, flags);
+}
+
+/**
+ * ccw_device_start_timeout() - start a s390 channel program with timeout
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ *	     @cdev's interrupt handler. Allows a device driver to associate
+ *	     the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ *	 value of 0 will make cio use the opm.
+ * @flags: additional flags; defines the action to be performed for I/O
+ *	   processing.
+ * @expires: timeout value in jiffies
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * This function notifies the device driver if the channel program has not
+ * completed during the time specified by @expires. If a timeout occurs, the
+ * channel program is terminated via xsch, hsch or csch, and the device's
+ * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
+ * Returns:
+ *  %0, if the operation was successful;
+ *  -%EBUSY, if the device is busy, or status pending;
+ *  -%EACCES, if no path specified in @lpm is operational;
+ *  -%ENODEV, if the device is not operational.
+ * Context:
+ *  Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
+			     unsigned long intparm, __u8 lpm,
+			     unsigned long flags, int expires)
+{
+	return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
+					    PAGE_DEFAULT_KEY, flags,
+					    expires);
+}
+
+
+/**
+ * ccw_device_halt() - halt I/O request processing
+ * @cdev: target ccw device
+ * @intparm: interruption parameter; value is only used if no I/O is
+ *	     outstanding, otherwise the intparm associated with the I/O request
+ *	     is returned
+ *
+ * ccw_device_halt() calls hsch on @cdev's subchannel.
+ * Returns:
+ *  %0 on success,
+ *  -%ENODEV on device not operational,
+ *  -%EINVAL on invalid device state,
+ *  -%EBUSY on device busy or interrupt pending.
+ * Context:
+ *  Interrupts disabled, ccw device lock held
+ */
+int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
+{
+	struct subchannel *sch;
+	int ret;
+
+	if (!cdev || !cdev->dev.parent)
+		return -ENODEV;
+	sch = to_subchannel(cdev->dev.parent);
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
+	if (cdev->private->state == DEV_STATE_NOT_OPER)
+		return -ENODEV;
+	if (cdev->private->state != DEV_STATE_ONLINE &&
+	    cdev->private->state != DEV_STATE_W4SENSE)
+		return -EINVAL;
+
+	ret = cio_halt(sch);
+	if (ret == 0)
+		cdev->private->intparm = intparm;
+	return ret;
+}
+
+/**
+ * ccw_device_resume() - resume channel program execution
+ * @cdev: target ccw device
+ *
+ * ccw_device_resume() calls rsch on @cdev's subchannel.
+ * Returns:
+ *  %0 on success,
+ *  -%ENODEV on device not operational,
+ *  -%EINVAL on invalid device state,
+ *  -%EBUSY on device busy or interrupt pending.
+ * Context:
+ *  Interrupts disabled, ccw device lock held
+ */
+int ccw_device_resume(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+
+	if (!cdev || !cdev->dev.parent)
+		return -ENODEV;
+	sch = to_subchannel(cdev->dev.parent);
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
+	if (cdev->private->state == DEV_STATE_NOT_OPER)
+		return -ENODEV;
+	if (cdev->private->state != DEV_STATE_ONLINE ||
+	    !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
+		return -EINVAL;
+	return cio_resume(sch);
+}
+
+/**
+ * ccw_device_get_ciw() - Search for CIW command in extended sense data.
+ * @cdev: ccw device to inspect
+ * @ct: command type to look for
+ *
+ * During SenseID, command information words (CIWs) describing special
+ * commands available to the device may have been stored in the extended
+ * sense data. This function searches for CIWs of a specified command
+ * type in the extended sense data.
+ * Returns:
+ *  %NULL if no extended sense data has been stored or if no CIW of the
+ *  specified command type could be found,
+ *  else a pointer to the CIW of the specified command type.
+ */
+struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
+{
+	int ciw_cnt;
+
+	if (cdev->private->flags.esid == 0)
+		return NULL;
+	for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
+		if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
+			return cdev->private->senseid.ciw + ciw_cnt;
+	return NULL;
+}
+
+/**
+ * ccw_device_get_path_mask() - get currently available paths
+ * @cdev: ccw device to be queried
+ * Returns:
+ *  %0 if no subchannel for the device is available,
+ *  else the mask of currently available paths for the ccw device's subchannel.
+ */
+__u8 ccw_device_get_path_mask(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+
+	if (!cdev->dev.parent)
+		return 0;
+
+	sch = to_subchannel(cdev->dev.parent);
+	return sch->lpm;
+}
+
+/**
+ * ccw_device_get_chp_desc() - return newly allocated channel-path descriptor
+ * @cdev: device to obtain the descriptor for
+ * @chp_idx: index of the channel path
+ *
+ * On success return a newly allocated copy of the channel-path description
+ * data associated with the given channel path. Return %NULL on error.
+ */
+struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *cdev,
+						       int chp_idx)
+{
+	struct subchannel *sch;
+	struct chp_id chpid;
+
+	sch = to_subchannel(cdev->dev.parent);
+	chp_id_init(&chpid);
+	chpid.id = sch->schib.pmcw.chpid[chp_idx];
+	return chp_get_chp_desc(chpid);
+}
+
+/**
+ * ccw_device_get_util_str() - return newly allocated utility strings
+ * @cdev: device to obtain the utility strings for
+ * @chp_idx: index of the channel path
+ *
+ * On success return a newly allocated copy of the utility strings
+ * associated with the given channel path. Return %NULL on error.
+ */
+u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct channel_path *chp;
+	struct chp_id chpid;
+	u8 *util_str;
+
+	chp_id_init(&chpid);
+	chpid.id = sch->schib.pmcw.chpid[chp_idx];
+	chp = chpid_to_chp(chpid);
+
+	util_str = kmalloc(sizeof(chp->desc_fmt3.util_str), GFP_KERNEL);
+	if (!util_str)
+		return NULL;
+
+	mutex_lock(&chp->lock);
+	memcpy(util_str, chp->desc_fmt3.util_str, sizeof(chp->desc_fmt3.util_str));
+	mutex_unlock(&chp->lock);
+
+	return util_str;
+}
+
+/**
+ * ccw_device_get_id() - obtain a ccw device id
+ * @cdev: device to obtain the id for
+ * @dev_id: where to fill in the values
+ */
+void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
+{
+	*dev_id = cdev->private->dev_id;
+}
+EXPORT_SYMBOL(ccw_device_get_id);
+
+/**
+ * ccw_device_tm_start_timeout_key() - perform start function
+ * @cdev: ccw device on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @intparm: user defined parameter to be passed to the interrupt handler
+ * @lpm: mask of paths to use
+ * @key: storage key to use for storage access
+ * @expires: time span in jiffies after which to abort request
+ *
+ * Start the tcw on the given ccw device. Return zero on success, non-zero
+ * otherwise.
+ */
+int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
+				    unsigned long intparm, u8 lpm, u8 key,
+				    int expires)
+{
+	struct subchannel *sch;
+	int rc;
+
+	sch = to_subchannel(cdev->dev.parent);
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
+	if (cdev->private->state == DEV_STATE_VERIFY) {
+		/* Remember to fake irb when finished. */
+		if (!cdev->private->flags.fake_irb) {
+			cdev->private->flags.fake_irb = FAKE_TM_IRB;
+			cdev->private->intparm = intparm;
+			return 0;
+		} else
+			/* There's already a fake I/O around. */
+			return -EBUSY;
+	}
+	if (cdev->private->state != DEV_STATE_ONLINE)
+		return -EIO;
+	/* Adjust requested path mask to exclude unusable paths. */
+	if (lpm) {
+		lpm &= sch->lpm;
+		if (lpm == 0)
+			return -EACCES;
+	}
+	rc = cio_tm_start_key(sch, tcw, lpm, key);
+	if (rc == 0) {
+		cdev->private->intparm = intparm;
+		if (expires)
+			ccw_device_set_timeout(cdev, expires);
+	}
+	return rc;
+}
+EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
+
+/**
+ * ccw_device_tm_start_key() - perform start function
+ * @cdev: ccw device on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @intparm: user defined parameter to be passed to the interrupt handler
+ * @lpm: mask of paths to use
+ * @key: storage key to use for storage access
+ *
+ * Start the tcw on the given ccw device. Return zero on success, non-zero
+ * otherwise.
+ */
+int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
+			    unsigned long intparm, u8 lpm, u8 key)
+{
+	return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0);
+}
+EXPORT_SYMBOL(ccw_device_tm_start_key);
+
+/**
+ * ccw_device_tm_start() - perform start function
+ * @cdev: ccw device on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @intparm: user defined parameter to be passed to the interrupt handler
+ * @lpm: mask of paths to use
+ *
+ * Start the tcw on the given ccw device. Return zero on success, non-zero
+ * otherwise.
+ */
+int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
+			unsigned long intparm, u8 lpm)
+{
+	return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
+				       PAGE_DEFAULT_KEY);
+}
+EXPORT_SYMBOL(ccw_device_tm_start);
+
+/**
+ * ccw_device_tm_start_timeout() - perform start function
+ * @cdev: ccw device on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @intparm: user defined parameter to be passed to the interrupt handler
+ * @lpm: mask of paths to use
+ * @expires: time span in jiffies after which to abort request
+ *
+ * Start the tcw on the given ccw device. Return zero on success, non-zero
+ * otherwise.
+ */
+int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
+			       unsigned long intparm, u8 lpm, int expires)
+{
+	return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
+					       PAGE_DEFAULT_KEY, expires);
+}
+EXPORT_SYMBOL(ccw_device_tm_start_timeout);
+
+/**
+ * ccw_device_get_mdc() - accumulate max data count
+ * @cdev: ccw device for which the max data count is accumulated
+ * @mask: mask of paths to use
+ *
+ * Return the number of 64K-bytes blocks all paths at least support
+ * for a transport command. Return values <= 0 indicate failures.
+ */
+int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct channel_path *chp;
+	struct chp_id chpid;
+	int mdc = 0, i;
+
+	/* Adjust requested path mask to excluded varied off paths. */
+	if (mask)
+		mask &= sch->lpm;
+	else
+		mask = sch->lpm;
+
+	chp_id_init(&chpid);
+	for (i = 0; i < 8; i++) {
+		if (!(mask & (0x80 >> i)))
+			continue;
+		chpid.id = sch->schib.pmcw.chpid[i];
+		chp = chpid_to_chp(chpid);
+		if (!chp)
+			continue;
+
+		mutex_lock(&chp->lock);
+		if (!chp->desc_fmt1.f) {
+			mutex_unlock(&chp->lock);
+			return 0;
+		}
+		if (!chp->desc_fmt1.r)
+			mdc = 1;
+		mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) :
+			    chp->desc_fmt1.mdc;
+		mutex_unlock(&chp->lock);
+	}
+
+	return mdc;
+}
+EXPORT_SYMBOL(ccw_device_get_mdc);
+
+/**
+ * ccw_device_tm_intrg() - perform interrogate function
+ * @cdev: ccw device on which to perform the interrogate function
+ *
+ * Perform an interrogate function on the given ccw device. Return zero on
+ * success, non-zero otherwise.
+ */
+int ccw_device_tm_intrg(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	if (!sch->schib.pmcw.ena)
+		return -EINVAL;
+	if (cdev->private->state != DEV_STATE_ONLINE)
+		return -EIO;
+	if (!scsw_is_tm(&sch->schib.scsw) ||
+	    !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
+		return -EINVAL;
+	return cio_tm_intrg(sch);
+}
+EXPORT_SYMBOL(ccw_device_tm_intrg);
+
+/**
+ * ccw_device_get_schid() - obtain a subchannel id
+ * @cdev: device to obtain the id for
+ * @schid: where to fill in the values
+ */
+void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+	*schid = sch->schid;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_schid);
+
+EXPORT_SYMBOL(ccw_device_set_options_mask);
+EXPORT_SYMBOL(ccw_device_set_options);
+EXPORT_SYMBOL(ccw_device_clear_options);
+EXPORT_SYMBOL(ccw_device_clear);
+EXPORT_SYMBOL(ccw_device_halt);
+EXPORT_SYMBOL(ccw_device_resume);
+EXPORT_SYMBOL(ccw_device_start_timeout);
+EXPORT_SYMBOL(ccw_device_start);
+EXPORT_SYMBOL(ccw_device_start_timeout_key);
+EXPORT_SYMBOL(ccw_device_start_key);
+EXPORT_SYMBOL(ccw_device_get_ciw);
+EXPORT_SYMBOL(ccw_device_get_path_mask);
+EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
+EXPORT_SYMBOL_GPL(ccw_device_get_util_str);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
new file mode 100644
index 0000000..d30a3ba
--- /dev/null
+++ b/drivers/s390/cio/device_pgid.c
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  CCW device PGID and path verification I/O handling.
+ *
+ *    Copyright IBM Corp. 2002, 2009
+ *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *		 Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "device.h"
+#include "io_sch.h"
+
+#define PGID_RETRIES	256
+#define PGID_TIMEOUT	(10 * HZ)
+
+static void verify_start(struct ccw_device *cdev);
+
+/*
+ * Process path verification data and report result.
+ */
+static void verify_done(struct ccw_device *cdev, int rc)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_dev_id *id = &cdev->private->dev_id;
+	int mpath = cdev->private->flags.mpath;
+	int pgroup = cdev->private->flags.pgroup;
+
+	if (rc)
+		goto out;
+	/* Ensure consistent multipathing state at device and channel. */
+	if (sch->config.mp != mpath) {
+		sch->config.mp = mpath;
+		rc = cio_commit_config(sch);
+	}
+out:
+	CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
+			 "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
+			 sch->vpm);
+	ccw_device_verify_done(cdev, rc);
+}
+
+/*
+ * Create channel program to perform a NOOP.
+ */
+static void nop_build_cp(struct ccw_device *cdev)
+{
+	struct ccw_request *req = &cdev->private->req;
+	struct ccw1 *cp = cdev->private->iccws;
+
+	cp->cmd_code	= CCW_CMD_NOOP;
+	cp->cda		= 0;
+	cp->count	= 0;
+	cp->flags	= CCW_FLAG_SLI;
+	req->cp		= cp;
+}
+
+/*
+ * Perform NOOP on a single path.
+ */
+static void nop_do(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+
+	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
+			      ~cdev->private->path_noirq_mask);
+	if (!req->lpm)
+		goto out_nopath;
+	nop_build_cp(cdev);
+	ccw_request_start(cdev);
+	return;
+
+out_nopath:
+	verify_done(cdev, sch->vpm ? 0 : -EACCES);
+}
+
+/*
+ * Adjust NOOP I/O status.
+ */
+static enum io_status nop_filter(struct ccw_device *cdev, void *data,
+				 struct irb *irb, enum io_status status)
+{
+	/* Only subchannel status might indicate a path error. */
+	if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
+		return IO_DONE;
+	return status;
+}
+
+/*
+ * Process NOOP request result for a single path.
+ */
+static void nop_callback(struct ccw_device *cdev, void *data, int rc)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+
+	switch (rc) {
+	case 0:
+		sch->vpm |= req->lpm;
+		break;
+	case -ETIME:
+		cdev->private->path_noirq_mask |= req->lpm;
+		break;
+	case -EACCES:
+		cdev->private->path_notoper_mask |= req->lpm;
+		break;
+	default:
+		goto err;
+	}
+	/* Continue on the next path. */
+	req->lpm >>= 1;
+	nop_do(cdev);
+	return;
+
+err:
+	verify_done(cdev, rc);
+}
+
+/*
+ * Create channel program to perform SET PGID on a single path.
+ */
+static void spid_build_cp(struct ccw_device *cdev, u8 fn)
+{
+	struct ccw_request *req = &cdev->private->req;
+	struct ccw1 *cp = cdev->private->iccws;
+	int i = pathmask_to_pos(req->lpm);
+	struct pgid *pgid = &cdev->private->pgid[i];
+
+	pgid->inf.fc	= fn;
+	cp->cmd_code	= CCW_CMD_SET_PGID;
+	cp->cda		= (u32) (addr_t) pgid;
+	cp->count	= sizeof(*pgid);
+	cp->flags	= CCW_FLAG_SLI;
+	req->cp		= cp;
+}
+
+static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
+{
+	if (rc) {
+		/* We don't know the path groups' state. Abort. */
+		verify_done(cdev, rc);
+		return;
+	}
+	/*
+	 * Path groups have been reset. Restart path verification but
+	 * leave paths in path_noirq_mask out.
+	 */
+	cdev->private->flags.pgid_unknown = 0;
+	verify_start(cdev);
+}
+
+/*
+ * Reset pathgroups and restart path verification, leave unusable paths out.
+ */
+static void pgid_wipeout_start(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_dev_id *id = &cdev->private->dev_id;
+	struct ccw_request *req = &cdev->private->req;
+	u8 fn;
+
+	CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
+		      id->ssid, id->devno, cdev->private->pgid_valid_mask,
+		      cdev->private->path_noirq_mask);
+
+	/* Initialize request data. */
+	memset(req, 0, sizeof(*req));
+	req->timeout	= PGID_TIMEOUT;
+	req->maxretries	= PGID_RETRIES;
+	req->lpm	= sch->schib.pmcw.pam;
+	req->callback	= pgid_wipeout_callback;
+	fn = SPID_FUNC_DISBAND;
+	if (cdev->private->flags.mpath)
+		fn |= SPID_FUNC_MULTI_PATH;
+	spid_build_cp(cdev, fn);
+	ccw_request_start(cdev);
+}
+
+/*
+ * Perform establish/resign SET PGID on a single path.
+ */
+static void spid_do(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+	u8 fn;
+
+	/* Use next available path that is not already in correct state. */
+	req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
+	if (!req->lpm)
+		goto out_nopath;
+	/* Channel program setup. */
+	if (req->lpm & sch->opm)
+		fn = SPID_FUNC_ESTABLISH;
+	else
+		fn = SPID_FUNC_RESIGN;
+	if (cdev->private->flags.mpath)
+		fn |= SPID_FUNC_MULTI_PATH;
+	spid_build_cp(cdev, fn);
+	ccw_request_start(cdev);
+	return;
+
+out_nopath:
+	if (cdev->private->flags.pgid_unknown) {
+		/* At least one SPID could be partially done. */
+		pgid_wipeout_start(cdev);
+		return;
+	}
+	verify_done(cdev, sch->vpm ? 0 : -EACCES);
+}
+
+/*
+ * Process SET PGID request result for a single path.
+ */
+static void spid_callback(struct ccw_device *cdev, void *data, int rc)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+
+	switch (rc) {
+	case 0:
+		sch->vpm |= req->lpm & sch->opm;
+		break;
+	case -ETIME:
+		cdev->private->flags.pgid_unknown = 1;
+		cdev->private->path_noirq_mask |= req->lpm;
+		break;
+	case -EACCES:
+		cdev->private->path_notoper_mask |= req->lpm;
+		break;
+	case -EOPNOTSUPP:
+		if (cdev->private->flags.mpath) {
+			/* Try without multipathing. */
+			cdev->private->flags.mpath = 0;
+			goto out_restart;
+		}
+		/* Try without pathgrouping. */
+		cdev->private->flags.pgroup = 0;
+		goto out_restart;
+	default:
+		goto err;
+	}
+	req->lpm >>= 1;
+	spid_do(cdev);
+	return;
+
+out_restart:
+	verify_start(cdev);
+	return;
+err:
+	verify_done(cdev, rc);
+}
+
+static void spid_start(struct ccw_device *cdev)
+{
+	struct ccw_request *req = &cdev->private->req;
+
+	/* Initialize request data. */
+	memset(req, 0, sizeof(*req));
+	req->timeout	= PGID_TIMEOUT;
+	req->maxretries	= PGID_RETRIES;
+	req->lpm	= 0x80;
+	req->singlepath	= 1;
+	req->callback	= spid_callback;
+	spid_do(cdev);
+}
+
+static int pgid_is_reset(struct pgid *p)
+{
+	char *c;
+
+	for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
+		if (*c != 0)
+			return 0;
+	}
+	return 1;
+}
+
+static int pgid_cmp(struct pgid *p1, struct pgid *p2)
+{
+	return memcmp((char *) p1 + 1, (char *) p2 + 1,
+		      sizeof(struct pgid) - 1);
+}
+
+/*
+ * Determine pathgroup state from PGID data.
+ */
+static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
+			 int *mismatch, u8 *reserved, u8 *reset)
+{
+	struct pgid *pgid = &cdev->private->pgid[0];
+	struct pgid *first = NULL;
+	int lpm;
+	int i;
+
+	*mismatch = 0;
+	*reserved = 0;
+	*reset = 0;
+	for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
+		if ((cdev->private->pgid_valid_mask & lpm) == 0)
+			continue;
+		if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
+			*reserved |= lpm;
+		if (pgid_is_reset(pgid)) {
+			*reset |= lpm;
+			continue;
+		}
+		if (!first) {
+			first = pgid;
+			continue;
+		}
+		if (pgid_cmp(pgid, first) != 0)
+			*mismatch = 1;
+	}
+	if (!first)
+		first = &channel_subsystems[0]->global_pgid;
+	*p = first;
+}
+
+static u8 pgid_to_donepm(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct pgid *pgid;
+	int i;
+	int lpm;
+	u8 donepm = 0;
+
+	/* Set bits for paths which are already in the target state. */
+	for (i = 0; i < 8; i++) {
+		lpm = 0x80 >> i;
+		if ((cdev->private->pgid_valid_mask & lpm) == 0)
+			continue;
+		pgid = &cdev->private->pgid[i];
+		if (sch->opm & lpm) {
+			if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
+				continue;
+		} else {
+			if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
+				continue;
+		}
+		if (cdev->private->flags.mpath) {
+			if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
+				continue;
+		} else {
+			if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
+				continue;
+		}
+		donepm |= lpm;
+	}
+
+	return donepm;
+}
+
+static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
+{
+	int i;
+
+	for (i = 0; i < 8; i++)
+		memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
+}
+
+/*
+ * Process SENSE PGID data and report result.
+ */
+static void snid_done(struct ccw_device *cdev, int rc)
+{
+	struct ccw_dev_id *id = &cdev->private->dev_id;
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct pgid *pgid;
+	int mismatch = 0;
+	u8 reserved = 0;
+	u8 reset = 0;
+	u8 donepm;
+
+	if (rc)
+		goto out;
+	pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
+	if (reserved == cdev->private->pgid_valid_mask)
+		rc = -EUSERS;
+	else if (mismatch)
+		rc = -EOPNOTSUPP;
+	else {
+		donepm = pgid_to_donepm(cdev);
+		sch->vpm = donepm & sch->opm;
+		cdev->private->pgid_reset_mask |= reset;
+		cdev->private->pgid_todo_mask &=
+			~(donepm | cdev->private->path_noirq_mask);
+		pgid_fill(cdev, pgid);
+	}
+out:
+	CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
+		      "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
+		      id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
+		      cdev->private->pgid_todo_mask, mismatch, reserved, reset);
+	switch (rc) {
+	case 0:
+		if (cdev->private->flags.pgid_unknown) {
+			pgid_wipeout_start(cdev);
+			return;
+		}
+		/* Anything left to do? */
+		if (cdev->private->pgid_todo_mask == 0) {
+			verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
+			return;
+		}
+		/* Perform path-grouping. */
+		spid_start(cdev);
+		break;
+	case -EOPNOTSUPP:
+		/* Path-grouping not supported. */
+		cdev->private->flags.pgroup = 0;
+		cdev->private->flags.mpath = 0;
+		verify_start(cdev);
+		break;
+	default:
+		verify_done(cdev, rc);
+	}
+}
+
+/*
+ * Create channel program to perform a SENSE PGID on a single path.
+ */
+static void snid_build_cp(struct ccw_device *cdev)
+{
+	struct ccw_request *req = &cdev->private->req;
+	struct ccw1 *cp = cdev->private->iccws;
+	int i = pathmask_to_pos(req->lpm);
+
+	/* Channel program setup. */
+	cp->cmd_code	= CCW_CMD_SENSE_PGID;
+	cp->cda		= (u32) (addr_t) &cdev->private->pgid[i];
+	cp->count	= sizeof(struct pgid);
+	cp->flags	= CCW_FLAG_SLI;
+	req->cp		= cp;
+}
+
+/*
+ * Perform SENSE PGID on a single path.
+ */
+static void snid_do(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+	int ret;
+
+	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
+			      ~cdev->private->path_noirq_mask);
+	if (!req->lpm)
+		goto out_nopath;
+	snid_build_cp(cdev);
+	ccw_request_start(cdev);
+	return;
+
+out_nopath:
+	if (cdev->private->pgid_valid_mask)
+		ret = 0;
+	else if (cdev->private->path_noirq_mask)
+		ret = -ETIME;
+	else
+		ret = -EACCES;
+	snid_done(cdev, ret);
+}
+
+/*
+ * Process SENSE PGID request result for single path.
+ */
+static void snid_callback(struct ccw_device *cdev, void *data, int rc)
+{
+	struct ccw_request *req = &cdev->private->req;
+
+	switch (rc) {
+	case 0:
+		cdev->private->pgid_valid_mask |= req->lpm;
+		break;
+	case -ETIME:
+		cdev->private->flags.pgid_unknown = 1;
+		cdev->private->path_noirq_mask |= req->lpm;
+		break;
+	case -EACCES:
+		cdev->private->path_notoper_mask |= req->lpm;
+		break;
+	default:
+		goto err;
+	}
+	/* Continue on the next path. */
+	req->lpm >>= 1;
+	snid_do(cdev);
+	return;
+
+err:
+	snid_done(cdev, rc);
+}
+
+/*
+ * Perform path verification.
+ */
+static void verify_start(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+	struct ccw_dev_id *devid = &cdev->private->dev_id;
+
+	sch->vpm = 0;
+	sch->lpm = sch->schib.pmcw.pam;
+
+	/* Initialize PGID data. */
+	memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
+	cdev->private->pgid_valid_mask = 0;
+	cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
+	cdev->private->path_notoper_mask = 0;
+
+	/* Initialize request data. */
+	memset(req, 0, sizeof(*req));
+	req->timeout	= PGID_TIMEOUT;
+	req->maxretries	= PGID_RETRIES;
+	req->lpm	= 0x80;
+	req->singlepath	= 1;
+	if (cdev->private->flags.pgroup) {
+		CIO_TRACE_EVENT(4, "snid");
+		CIO_HEX_EVENT(4, devid, sizeof(*devid));
+		req->callback	= snid_callback;
+		snid_do(cdev);
+	} else {
+		CIO_TRACE_EVENT(4, "nop");
+		CIO_HEX_EVENT(4, devid, sizeof(*devid));
+		req->filter	= nop_filter;
+		req->callback	= nop_callback;
+		nop_do(cdev);
+	}
+}
+
+/**
+ * ccw_device_verify_start - perform path verification
+ * @cdev: ccw device
+ *
+ * Perform an I/O on each available channel path to @cdev to determine which
+ * paths are operational. The resulting path mask is stored in sch->vpm.
+ * If device options specify pathgrouping, establish a pathgroup for the
+ * operational paths. When finished, call ccw_device_verify_done with a
+ * return code specifying the result.
+ */
+void ccw_device_verify_start(struct ccw_device *cdev)
+{
+	CIO_TRACE_EVENT(4, "vrfy");
+	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+	/*
+	 * Initialize pathgroup and multipath state with target values.
+	 * They may change in the course of path verification.
+	 */
+	cdev->private->flags.pgroup = cdev->private->options.pgroup;
+	cdev->private->flags.mpath = cdev->private->options.mpath;
+	cdev->private->flags.doverify = 0;
+	cdev->private->path_noirq_mask = 0;
+	verify_start(cdev);
+}
+
+/*
+ * Process disband SET PGID request result.
+ */
+static void disband_callback(struct ccw_device *cdev, void *data, int rc)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_dev_id *id = &cdev->private->dev_id;
+
+	if (rc)
+		goto out;
+	/* Ensure consistent multipathing state at device and channel. */
+	cdev->private->flags.mpath = 0;
+	if (sch->config.mp) {
+		sch->config.mp = 0;
+		rc = cio_commit_config(sch);
+	}
+out:
+	CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
+		      rc);
+	ccw_device_disband_done(cdev, rc);
+}
+
+/**
+ * ccw_device_disband_start - disband pathgroup
+ * @cdev: ccw device
+ *
+ * Execute a SET PGID channel program on @cdev to disband a previously
+ * established pathgroup. When finished, call ccw_device_disband_done with
+ * a return code specifying the result.
+ */
+void ccw_device_disband_start(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+	u8 fn;
+
+	CIO_TRACE_EVENT(4, "disb");
+	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+	/* Request setup. */
+	memset(req, 0, sizeof(*req));
+	req->timeout	= PGID_TIMEOUT;
+	req->maxretries	= PGID_RETRIES;
+	req->lpm	= sch->schib.pmcw.pam & sch->opm;
+	req->singlepath	= 1;
+	req->callback	= disband_callback;
+	fn = SPID_FUNC_DISBAND;
+	if (cdev->private->flags.mpath)
+		fn |= SPID_FUNC_MULTI_PATH;
+	spid_build_cp(cdev, fn);
+	ccw_request_start(cdev);
+}
+
+struct stlck_data {
+	struct completion done;
+	int rc;
+};
+
+static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
+{
+	struct ccw_request *req = &cdev->private->req;
+	struct ccw1 *cp = cdev->private->iccws;
+
+	cp[0].cmd_code = CCW_CMD_STLCK;
+	cp[0].cda = (u32) (addr_t) buf1;
+	cp[0].count = 32;
+	cp[0].flags = CCW_FLAG_CC;
+	cp[1].cmd_code = CCW_CMD_RELEASE;
+	cp[1].cda = (u32) (addr_t) buf2;
+	cp[1].count = 32;
+	cp[1].flags = 0;
+	req->cp = cp;
+}
+
+static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
+{
+	struct stlck_data *sdata = data;
+
+	sdata->rc = rc;
+	complete(&sdata->done);
+}
+
+/**
+ * ccw_device_stlck_start - perform unconditional release
+ * @cdev: ccw device
+ * @data: data pointer to be passed to ccw_device_stlck_done
+ * @buf1: data pointer used in channel program
+ * @buf2: data pointer used in channel program
+ *
+ * Execute a channel program on @cdev to release an existing PGID reservation.
+ */
+static void ccw_device_stlck_start(struct ccw_device *cdev, void *data,
+				   void *buf1, void *buf2)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct ccw_request *req = &cdev->private->req;
+
+	CIO_TRACE_EVENT(4, "stlck");
+	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+	/* Request setup. */
+	memset(req, 0, sizeof(*req));
+	req->timeout	= PGID_TIMEOUT;
+	req->maxretries	= PGID_RETRIES;
+	req->lpm	= sch->schib.pmcw.pam & sch->opm;
+	req->data	= data;
+	req->callback	= stlck_callback;
+	stlck_build_cp(cdev, buf1, buf2);
+	ccw_request_start(cdev);
+}
+
+/*
+ * Perform unconditional reserve + release.
+ */
+int ccw_device_stlck(struct ccw_device *cdev)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct stlck_data data;
+	u8 *buffer;
+	int rc;
+
+	/* Check if steal lock operation is valid for this device. */
+	if (cdev->drv) {
+		if (!cdev->private->options.force)
+			return -EINVAL;
+	}
+	buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+	init_completion(&data.done);
+	data.rc = -EIO;
+	spin_lock_irq(sch->lock);
+	rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
+	if (rc)
+		goto out_unlock;
+	/* Perform operation. */
+	cdev->private->state = DEV_STATE_STEAL_LOCK;
+	ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
+	spin_unlock_irq(sch->lock);
+	/* Wait for operation to finish. */
+	if (wait_for_completion_interruptible(&data.done)) {
+		/* Got a signal. */
+		spin_lock_irq(sch->lock);
+		ccw_request_cancel(cdev);
+		spin_unlock_irq(sch->lock);
+		wait_for_completion(&data.done);
+	}
+	rc = data.rc;
+	/* Check results. */
+	spin_lock_irq(sch->lock);
+	cio_disable_subchannel(sch);
+	cdev->private->state = DEV_STATE_BOXED;
+out_unlock:
+	spin_unlock_irq(sch->lock);
+	kfree(buffer);
+
+	return rc;
+}
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
new file mode 100644
index 0000000..7d5c789
--- /dev/null
+++ b/drivers/s390/cio/device_status.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    Copyright IBM Corp. 2002
+ *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
+ *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Status accumulation and basic sense functions.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "io_sch.h"
+
+/*
+ * Check for any kind of channel or interface control check but don't
+ * issue the message for the console device
+ */
+static void
+ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	char dbf_text[15];
+
+	if (!scsw_is_valid_cstat(&irb->scsw) ||
+	    !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
+	      SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
+		return;
+	CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
+		      "received"
+		      " ... device %04x on subchannel 0.%x.%04x, dev_stat "
+		      ": %02X sch_stat : %02X\n",
+		      cdev->private->dev_id.devno, sch->schid.ssid,
+		      sch->schid.sch_no,
+		      scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
+	sprintf(dbf_text, "chk%x", sch->schid.sch_no);
+	CIO_TRACE_EVENT(0, dbf_text);
+	CIO_HEX_EVENT(0, irb, sizeof(struct irb));
+}
+
+/*
+ * Some paths became not operational (pno bit in scsw is set).
+ */
+static void
+ccw_device_path_notoper(struct ccw_device *cdev)
+{
+	struct subchannel *sch;
+
+	sch = to_subchannel(cdev->dev.parent);
+	if (cio_update_schib(sch))
+		goto doverify;
+
+	CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
+		      "not operational \n", __func__,
+		      sch->schid.ssid, sch->schid.sch_no,
+		      sch->schib.pmcw.pnom);
+
+	sch->lpm &= ~sch->schib.pmcw.pnom;
+doverify:
+	cdev->private->flags.doverify = 1;
+}
+
+/*
+ * Copy valid bits from the extended control word to device irb.
+ */
+static void
+ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
+{
+	/*
+	 * Copy extended control bit if it is valid... yes there
+	 * are condition that have to be met for the extended control
+	 * bit to have meaning. Sick.
+	 */
+	cdev->private->irb.scsw.cmd.ectl = 0;
+	if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
+	    !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
+		cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
+	/* Check if extended control word is valid. */
+	if (!cdev->private->irb.scsw.cmd.ectl)
+		return;
+	/* Copy concurrent sense / model dependent information. */
+	memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
+}
+
+/*
+ * Check if extended status word is valid.
+ */
+static int
+ccw_device_accumulate_esw_valid(struct irb *irb)
+{
+	if (!irb->scsw.cmd.eswf &&
+	    (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
+		return 0;
+	if (irb->scsw.cmd.stctl ==
+			(SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
+	    !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
+		return 0;
+	return 1;
+}
+
+/*
+ * Copy valid bits from the extended status word to device irb.
+ */
+static void
+ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
+{
+	struct irb *cdev_irb;
+	struct sublog *cdev_sublog, *sublog;
+
+	if (!ccw_device_accumulate_esw_valid(irb))
+		return;
+
+	cdev_irb = &cdev->private->irb;
+
+	/* Copy last path used mask. */
+	cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
+
+	/* Copy subchannel logout information if esw is of format 0. */
+	if (irb->scsw.cmd.eswf) {
+		cdev_sublog = &cdev_irb->esw.esw0.sublog;
+		sublog = &irb->esw.esw0.sublog;
+		/* Copy extended status flags. */
+		cdev_sublog->esf = sublog->esf;
+		/*
+		 * Copy fields that have a meaning for channel data check
+		 * channel control check and interface control check.
+		 */
+		if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
+				       SCHN_STAT_CHN_CTRL_CHK |
+				       SCHN_STAT_INTF_CTRL_CHK)) {
+			/* Copy ancillary report bit. */
+			cdev_sublog->arep = sublog->arep;
+			/* Copy field-validity-flags. */
+			cdev_sublog->fvf = sublog->fvf;
+			/* Copy storage access code. */
+			cdev_sublog->sacc = sublog->sacc;
+			/* Copy termination code. */
+			cdev_sublog->termc = sublog->termc;
+			/* Copy sequence code. */
+			cdev_sublog->seqc = sublog->seqc;
+		}
+		/* Copy device status check. */
+		cdev_sublog->devsc = sublog->devsc;
+		/* Copy secondary error. */
+		cdev_sublog->serr = sublog->serr;
+		/* Copy i/o-error alert. */
+		cdev_sublog->ioerr = sublog->ioerr;
+		/* Copy channel path timeout bit. */
+		if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
+			cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
+		/* Copy failing storage address validity flag. */
+		cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
+		if (cdev_irb->esw.esw0.erw.fsavf) {
+			/* ... and copy the failing storage address. */
+			memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
+			       sizeof (irb->esw.esw0.faddr));
+			/* ... and copy the failing storage address format. */
+			cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
+		}
+		/* Copy secondary ccw address validity bit. */
+		cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
+		if (irb->esw.esw0.erw.scavf)
+			/* ... and copy the secondary ccw address. */
+			cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
+		
+	}
+	/* FIXME: DCTI for format 2? */
+
+	/* Copy authorization bit. */
+	cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
+	/* Copy path verification required flag. */
+	cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
+	if (irb->esw.esw0.erw.pvrf)
+		cdev->private->flags.doverify = 1;
+	/* Copy concurrent sense bit. */
+	cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
+	if (irb->esw.esw0.erw.cons)
+		cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
+}
+
+/*
+ * Accumulate status from irb to devstat.
+ */
+void
+ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
+{
+	struct irb *cdev_irb;
+
+	/*
+	 * Check if the status pending bit is set in stctl.
+	 * If not, the remaining bit have no meaning and we must ignore them.
+	 * The esw is not meaningful as well...
+	 */
+	if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
+		return;
+
+	/* Check for channel checks and interface control checks. */
+	ccw_device_msg_control_check(cdev, irb);
+
+	/* Check for path not operational. */
+	if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
+		ccw_device_path_notoper(cdev);
+	/* No irb accumulation for transport mode irbs. */
+	if (scsw_is_tm(&irb->scsw)) {
+		memcpy(&cdev->private->irb, irb, sizeof(struct irb));
+		return;
+	}
+	/*
+	 * Don't accumulate unsolicited interrupts.
+	 */
+	if (!scsw_is_solicited(&irb->scsw))
+		return;
+
+	cdev_irb = &cdev->private->irb;
+
+	/*
+	 * If the clear function had been performed, all formerly pending
+	 * status at the subchannel has been cleared and we must not pass
+	 * intermediate accumulated status to the device driver.
+	 */
+	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
+		memset(&cdev->private->irb, 0, sizeof(struct irb));
+
+	/* Copy bits which are valid only for the start function. */
+	if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
+		/* Copy key. */
+		cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
+		/* Copy suspend control bit. */
+		cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
+		/* Accumulate deferred condition code. */
+		cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
+		/* Copy ccw format bit. */
+		cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
+		/* Copy prefetch bit. */
+		cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
+		/* Copy initial-status-interruption-control. */
+		cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
+		/* Copy address limit checking control. */
+		cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
+		/* Copy suppress suspend bit. */
+		cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
+	}
+
+	/* Take care of the extended control bit and extended control word. */
+	ccw_device_accumulate_ecw(cdev, irb);
+	    
+	/* Accumulate function control. */
+	cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
+	/* Copy activity control. */
+	cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
+	/* Accumulate status control. */
+	cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
+	/*
+	 * Copy ccw address if it is valid. This is a bit simplified
+	 * but should be close enough for all practical purposes.
+	 */
+	if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
+	    ((irb->scsw.cmd.stctl ==
+	      (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
+	     (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
+	     (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
+	    (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
+		cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
+	/* Accumulate device status, but not the device busy flag. */
+	cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
+	/* dstat is not always valid. */
+	if (irb->scsw.cmd.stctl &
+	    (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
+	     | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
+		cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
+	/* Accumulate subchannel status. */
+	cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
+	/* Copy residual count if it is valid. */
+	if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
+	    (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
+	     == 0)
+		cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
+
+	/* Take care of bits in the extended status word. */
+	ccw_device_accumulate_esw(cdev, irb);
+
+	/*
+	 * Check whether we must issue a SENSE CCW ourselves if there is no
+	 * concurrent sense facility installed for the subchannel.
+	 * No sense is required if no delayed sense is pending
+	 * and we did not get a unit check without sense information.
+	 *
+	 * Note: We should check for ioinfo[irq]->flags.consns but VM
+	 *	 violates the ESA/390 architecture and doesn't present an
+	 *	 operand exception for virtual devices without concurrent
+	 *	 sense facility available/supported when enabling the
+	 *	 concurrent sense facility.
+	 */
+	if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+	    !(cdev_irb->esw.esw0.erw.cons))
+		cdev->private->flags.dosense = 1;
+}
+
+/*
+ * Do a basic sense.
+ */
+int
+ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
+{
+	struct subchannel *sch;
+	struct ccw1 *sense_ccw;
+	int rc;
+
+	sch = to_subchannel(cdev->dev.parent);
+
+	/* A sense is required, can we do it now ? */
+	if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
+		/*
+		 * we received an Unit Check but we have no final
+		 *  status yet, therefore we must delay the SENSE
+		 *  processing. We must not report this intermediate
+		 *  status to the device interrupt handler.
+		 */
+		return -EBUSY;
+
+	/*
+	 * We have ending status but no sense information. Do a basic sense.
+	 */
+	sense_ccw = &to_io_private(sch)->sense_ccw;
+	sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
+	sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw);
+	sense_ccw->count = SENSE_MAX_COUNT;
+	sense_ccw->flags = CCW_FLAG_SLI;
+
+	rc = cio_start(sch, sense_ccw, 0xff);
+	if (rc == -ENODEV || rc == -EACCES)
+		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+	return rc;
+}
+
+/*
+ * Add information from basic sense to devstat.
+ */
+void
+ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
+{
+	/*
+	 * Check if the status pending bit is set in stctl.
+	 * If not, the remaining bit have no meaning and we must ignore them.
+	 * The esw is not meaningful as well...
+	 */
+	if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
+		return;
+
+	/* Check for channel checks and interface control checks. */
+	ccw_device_msg_control_check(cdev, irb);
+
+	/* Check for path not operational. */
+	if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
+		ccw_device_path_notoper(cdev);
+
+	if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+	    (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
+		cdev->private->irb.esw.esw0.erw.cons = 1;
+		cdev->private->flags.dosense = 0;
+	}
+	/* Check if path verification is required. */
+	if (ccw_device_accumulate_esw_valid(irb) &&
+	    irb->esw.esw0.erw.pvrf)
+		cdev->private->flags.doverify = 1;
+}
+
+/*
+ * This function accumulates the status into the private devstat and
+ * starts a basic sense if one is needed.
+ */
+int
+ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
+{
+	ccw_device_accumulate_irb(cdev, irb);
+	if ((irb->scsw.cmd.actl  & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
+		return -EBUSY;
+	/* Check for basic sense. */
+	if (cdev->private->flags.dosense &&
+	    !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
+		cdev->private->irb.esw.esw0.erw.cons = 1;
+		cdev->private->flags.dosense = 0;
+		return 0;
+	}
+	if (cdev->private->flags.dosense) {
+		ccw_device_do_sense(cdev, irb);
+		return -EBUSY;
+	}
+	return 0;
+}
+
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
new file mode 100644
index 0000000..53468ae
--- /dev/null
+++ b/drivers/s390/cio/eadm_sch.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for s390 eadm subchannels
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include <asm/css_chars.h>
+#include <asm/debug.h>
+#include <asm/isc.h>
+#include <asm/cio.h>
+#include <asm/scsw.h>
+#include <asm/eadm.h>
+
+#include "eadm_sch.h"
+#include "ioasm.h"
+#include "cio.h"
+#include "css.h"
+#include "orb.h"
+
+MODULE_DESCRIPTION("driver for s390 eadm subchannels");
+MODULE_LICENSE("GPL");
+
+#define EADM_TIMEOUT (7 * HZ)
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(eadm_list);
+
+static debug_info_t *eadm_debug;
+
+#define EADM_LOG(imp, txt) do {					\
+		debug_text_event(eadm_debug, imp, txt);		\
+	} while (0)
+
+static void EADM_LOG_HEX(int level, void *data, int length)
+{
+	debug_event(eadm_debug, level, data, length);
+}
+
+static void orb_init(union orb *orb)
+{
+	memset(orb, 0, sizeof(union orb));
+	orb->eadm.compat1 = 1;
+	orb->eadm.compat2 = 1;
+	orb->eadm.fmt = 1;
+	orb->eadm.x = 1;
+}
+
+static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
+{
+	union orb *orb = &get_eadm_private(sch)->orb;
+	int cc;
+
+	orb_init(orb);
+	orb->eadm.aob = (u32)__pa(aob);
+	orb->eadm.intparm = (u32)(addr_t)sch;
+	orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
+
+	EADM_LOG(6, "start");
+	EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
+
+	cc = ssch(sch->schid, orb);
+	switch (cc) {
+	case 0:
+		sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
+		break;
+	case 1:		/* status pending */
+	case 2:		/* busy */
+		return -EBUSY;
+	case 3:		/* not operational */
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static int eadm_subchannel_clear(struct subchannel *sch)
+{
+	int cc;
+
+	cc = csch(sch->schid);
+	if (cc)
+		return -ENODEV;
+
+	sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
+	return 0;
+}
+
+static void eadm_subchannel_timeout(struct timer_list *t)
+{
+	struct eadm_private *private = from_timer(private, t, timer);
+	struct subchannel *sch = private->sch;
+
+	spin_lock_irq(sch->lock);
+	EADM_LOG(1, "timeout");
+	EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
+	if (eadm_subchannel_clear(sch))
+		EADM_LOG(0, "clear failed");
+	spin_unlock_irq(sch->lock);
+}
+
+static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
+{
+	struct eadm_private *private = get_eadm_private(sch);
+
+	if (expires == 0) {
+		del_timer(&private->timer);
+		return;
+	}
+	if (timer_pending(&private->timer)) {
+		if (mod_timer(&private->timer, jiffies + expires))
+			return;
+	}
+	private->timer.expires = jiffies + expires;
+	add_timer(&private->timer);
+}
+
+static void eadm_subchannel_irq(struct subchannel *sch)
+{
+	struct eadm_private *private = get_eadm_private(sch);
+	struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
+	struct irb *irb = this_cpu_ptr(&cio_irb);
+	blk_status_t error = BLK_STS_OK;
+
+	EADM_LOG(6, "irq");
+	EADM_LOG_HEX(6, irb, sizeof(*irb));
+
+	inc_irq_stat(IRQIO_ADM);
+
+	if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
+	    && scsw->eswf == 1 && irb->esw.eadm.erw.r)
+		error = BLK_STS_IOERR;
+
+	if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
+		error = BLK_STS_TIMEOUT;
+
+	eadm_subchannel_set_timeout(sch, 0);
+
+	if (private->state != EADM_BUSY) {
+		EADM_LOG(1, "irq unsol");
+		EADM_LOG_HEX(1, irb, sizeof(*irb));
+		private->state = EADM_NOT_OPER;
+		css_sched_sch_todo(sch, SCH_TODO_EVAL);
+		return;
+	}
+	scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
+	private->state = EADM_IDLE;
+
+	if (private->completion)
+		complete(private->completion);
+}
+
+static struct subchannel *eadm_get_idle_sch(void)
+{
+	struct eadm_private *private;
+	struct subchannel *sch;
+	unsigned long flags;
+
+	spin_lock_irqsave(&list_lock, flags);
+	list_for_each_entry(private, &eadm_list, head) {
+		sch = private->sch;
+		spin_lock(sch->lock);
+		if (private->state == EADM_IDLE) {
+			private->state = EADM_BUSY;
+			list_move_tail(&private->head, &eadm_list);
+			spin_unlock(sch->lock);
+			spin_unlock_irqrestore(&list_lock, flags);
+
+			return sch;
+		}
+		spin_unlock(sch->lock);
+	}
+	spin_unlock_irqrestore(&list_lock, flags);
+
+	return NULL;
+}
+
+int eadm_start_aob(struct aob *aob)
+{
+	struct eadm_private *private;
+	struct subchannel *sch;
+	unsigned long flags;
+	int ret;
+
+	sch = eadm_get_idle_sch();
+	if (!sch)
+		return -EBUSY;
+
+	spin_lock_irqsave(sch->lock, flags);
+	eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
+	ret = eadm_subchannel_start(sch, aob);
+	if (!ret)
+		goto out_unlock;
+
+	/* Handle start subchannel failure. */
+	eadm_subchannel_set_timeout(sch, 0);
+	private = get_eadm_private(sch);
+	private->state = EADM_NOT_OPER;
+	css_sched_sch_todo(sch, SCH_TODO_EVAL);
+
+out_unlock:
+	spin_unlock_irqrestore(sch->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(eadm_start_aob);
+
+static int eadm_subchannel_probe(struct subchannel *sch)
+{
+	struct eadm_private *private;
+	int ret;
+
+	private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+	if (!private)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&private->head);
+	timer_setup(&private->timer, eadm_subchannel_timeout, 0);
+
+	spin_lock_irq(sch->lock);
+	set_eadm_private(sch, private);
+	private->state = EADM_IDLE;
+	private->sch = sch;
+	sch->isc = EADM_SCH_ISC;
+	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+	if (ret) {
+		set_eadm_private(sch, NULL);
+		spin_unlock_irq(sch->lock);
+		kfree(private);
+		goto out;
+	}
+	spin_unlock_irq(sch->lock);
+
+	spin_lock_irq(&list_lock);
+	list_add(&private->head, &eadm_list);
+	spin_unlock_irq(&list_lock);
+
+	if (dev_get_uevent_suppress(&sch->dev)) {
+		dev_set_uevent_suppress(&sch->dev, 0);
+		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+	}
+out:
+	return ret;
+}
+
+static void eadm_quiesce(struct subchannel *sch)
+{
+	struct eadm_private *private = get_eadm_private(sch);
+	DECLARE_COMPLETION_ONSTACK(completion);
+	int ret;
+
+	spin_lock_irq(sch->lock);
+	if (private->state != EADM_BUSY)
+		goto disable;
+
+	if (eadm_subchannel_clear(sch))
+		goto disable;
+
+	private->completion = &completion;
+	spin_unlock_irq(sch->lock);
+
+	wait_for_completion_io(&completion);
+
+	spin_lock_irq(sch->lock);
+	private->completion = NULL;
+
+disable:
+	eadm_subchannel_set_timeout(sch, 0);
+	do {
+		ret = cio_disable_subchannel(sch);
+	} while (ret == -EBUSY);
+
+	spin_unlock_irq(sch->lock);
+}
+
+static int eadm_subchannel_remove(struct subchannel *sch)
+{
+	struct eadm_private *private = get_eadm_private(sch);
+
+	spin_lock_irq(&list_lock);
+	list_del(&private->head);
+	spin_unlock_irq(&list_lock);
+
+	eadm_quiesce(sch);
+
+	spin_lock_irq(sch->lock);
+	set_eadm_private(sch, NULL);
+	spin_unlock_irq(sch->lock);
+
+	kfree(private);
+
+	return 0;
+}
+
+static void eadm_subchannel_shutdown(struct subchannel *sch)
+{
+	eadm_quiesce(sch);
+}
+
+static int eadm_subchannel_freeze(struct subchannel *sch)
+{
+	return cio_disable_subchannel(sch);
+}
+
+static int eadm_subchannel_restore(struct subchannel *sch)
+{
+	return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+}
+
+/**
+ * eadm_subchannel_sch_event - process subchannel event
+ * @sch: subchannel
+ * @process: non-zero if function is called in process context
+ *
+ * An unspecified event occurred for this subchannel. Adjust data according
+ * to the current operational state of the subchannel. Return zero when the
+ * event has been handled sufficiently or -EAGAIN when this function should
+ * be called again in process context.
+ */
+static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
+{
+	struct eadm_private *private;
+	unsigned long flags;
+
+	spin_lock_irqsave(sch->lock, flags);
+	if (!device_is_registered(&sch->dev))
+		goto out_unlock;
+
+	if (work_pending(&sch->todo_work))
+		goto out_unlock;
+
+	if (cio_update_schib(sch)) {
+		css_sched_sch_todo(sch, SCH_TODO_UNREG);
+		goto out_unlock;
+	}
+	private = get_eadm_private(sch);
+	if (private->state == EADM_NOT_OPER)
+		private->state = EADM_IDLE;
+
+out_unlock:
+	spin_unlock_irqrestore(sch->lock, flags);
+
+	return 0;
+}
+
+static struct css_device_id eadm_subchannel_ids[] = {
+	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
+	{ /* end of list */ },
+};
+MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
+
+static struct css_driver eadm_subchannel_driver = {
+	.drv = {
+		.name = "eadm_subchannel",
+		.owner = THIS_MODULE,
+	},
+	.subchannel_type = eadm_subchannel_ids,
+	.irq = eadm_subchannel_irq,
+	.probe = eadm_subchannel_probe,
+	.remove = eadm_subchannel_remove,
+	.shutdown = eadm_subchannel_shutdown,
+	.sch_event = eadm_subchannel_sch_event,
+	.freeze = eadm_subchannel_freeze,
+	.thaw = eadm_subchannel_restore,
+	.restore = eadm_subchannel_restore,
+};
+
+static int __init eadm_sch_init(void)
+{
+	int ret;
+
+	if (!css_general_characteristics.eadm)
+		return -ENXIO;
+
+	eadm_debug = debug_register("eadm_log", 16, 1, 16);
+	if (!eadm_debug)
+		return -ENOMEM;
+
+	debug_register_view(eadm_debug, &debug_hex_ascii_view);
+	debug_set_level(eadm_debug, 2);
+
+	isc_register(EADM_SCH_ISC);
+	ret = css_driver_register(&eadm_subchannel_driver);
+	if (ret)
+		goto cleanup;
+
+	return ret;
+
+cleanup:
+	isc_unregister(EADM_SCH_ISC);
+	debug_unregister(eadm_debug);
+	return ret;
+}
+
+static void __exit eadm_sch_exit(void)
+{
+	css_driver_unregister(&eadm_subchannel_driver);
+	isc_unregister(EADM_SCH_ISC);
+	debug_unregister(eadm_debug);
+}
+module_init(eadm_sch_init);
+module_exit(eadm_sch_exit);
diff --git a/drivers/s390/cio/eadm_sch.h b/drivers/s390/cio/eadm_sch.h
new file mode 100644
index 0000000..390ab5a
--- /dev/null
+++ b/drivers/s390/cio/eadm_sch.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef EADM_SCH_H
+#define EADM_SCH_H
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include "orb.h"
+
+struct eadm_private {
+	union orb orb;
+	enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state;
+	struct completion *completion;
+	struct subchannel *sch;
+	struct timer_list timer;
+	struct list_head head;
+} __aligned(8);
+
+#define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev))
+#define set_eadm_private(n, p) (dev_set_drvdata(&n->dev, p))
+
+#endif
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
new file mode 100644
index 0000000..99c900c
--- /dev/null
+++ b/drivers/s390/cio/fcx.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Functions for assembling fcx enabled I/O control blocks.
+ *
+ *    Copyright IBM Corp. 2008
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <asm/fcx.h>
+#include "cio.h"
+
+/**
+ * tcw_get_intrg - return pointer to associated interrogate tcw
+ * @tcw: pointer to the original tcw
+ *
+ * Return a pointer to the interrogate tcw associated with the specified tcw
+ * or %NULL if there is no associated interrogate tcw.
+ */
+struct tcw *tcw_get_intrg(struct tcw *tcw)
+{
+	return (struct tcw *) ((addr_t) tcw->intrg);
+}
+EXPORT_SYMBOL(tcw_get_intrg);
+
+/**
+ * tcw_get_data - return pointer to input/output data associated with tcw
+ * @tcw: pointer to the tcw
+ *
+ * Return the input or output data address specified in the tcw depending
+ * on whether the r-bit or the w-bit is set. If neither bit is set, return
+ * %NULL.
+ */
+void *tcw_get_data(struct tcw *tcw)
+{
+	if (tcw->r)
+		return (void *) ((addr_t) tcw->input);
+	if (tcw->w)
+		return (void *) ((addr_t) tcw->output);
+	return NULL;
+}
+EXPORT_SYMBOL(tcw_get_data);
+
+/**
+ * tcw_get_tccb - return pointer to tccb associated with tcw
+ * @tcw: pointer to the tcw
+ *
+ * Return pointer to the tccb associated with this tcw.
+ */
+struct tccb *tcw_get_tccb(struct tcw *tcw)
+{
+	return (struct tccb *) ((addr_t) tcw->tccb);
+}
+EXPORT_SYMBOL(tcw_get_tccb);
+
+/**
+ * tcw_get_tsb - return pointer to tsb associated with tcw
+ * @tcw: pointer to the tcw
+ *
+ * Return pointer to the tsb associated with this tcw.
+ */
+struct tsb *tcw_get_tsb(struct tcw *tcw)
+{
+	return (struct tsb *) ((addr_t) tcw->tsb);
+}
+EXPORT_SYMBOL(tcw_get_tsb);
+
+/**
+ * tcw_init - initialize tcw data structure
+ * @tcw: pointer to the tcw to be initialized
+ * @r: initial value of the r-bit
+ * @w: initial value of the w-bit
+ *
+ * Initialize all fields of the specified tcw data structure with zero and
+ * fill in the format, flags, r and w fields.
+ */
+void tcw_init(struct tcw *tcw, int r, int w)
+{
+	memset(tcw, 0, sizeof(struct tcw));
+	tcw->format = TCW_FORMAT_DEFAULT;
+	tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT);
+	if (r)
+		tcw->r = 1;
+	if (w)
+		tcw->w = 1;
+}
+EXPORT_SYMBOL(tcw_init);
+
+static inline size_t tca_size(struct tccb *tccb)
+{
+	return tccb->tcah.tcal - 12;
+}
+
+static u32 calc_dcw_count(struct tccb *tccb)
+{
+	int offset;
+	struct dcw *dcw;
+	u32 count = 0;
+	size_t size;
+
+	size = tca_size(tccb);
+	for (offset = 0; offset < size;) {
+		dcw = (struct dcw *) &tccb->tca[offset];
+		count += dcw->count;
+		if (!(dcw->flags & DCW_FLAGS_CC))
+			break;
+		offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4);
+	}
+	return count;
+}
+
+static u32 calc_cbc_size(struct tidaw *tidaw, int num)
+{
+	int i;
+	u32 cbc_data;
+	u32 cbc_count = 0;
+	u64 data_count = 0;
+
+	for (i = 0; i < num; i++) {
+		if (tidaw[i].flags & TIDAW_FLAGS_LAST)
+			break;
+		/* TODO: find out if padding applies to total of data
+		 * transferred or data transferred by this tidaw. Assumption:
+		 * applies to total. */
+		data_count += tidaw[i].count;
+		if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) {
+			cbc_data = 4 + ALIGN(data_count, 4) - data_count;
+			cbc_count += cbc_data;
+			data_count += cbc_data;
+		}
+	}
+	return cbc_count;
+}
+
+/**
+ * tcw_finalize - finalize tcw length fields and tidaw list
+ * @tcw: pointer to the tcw
+ * @num_tidaws: the number of tidaws used to address input/output data or zero
+ * if no tida is used
+ *
+ * Calculate the input-/output-count and tccbl field in the tcw, add a
+ * tcat the tccb and terminate the data tidaw list if used.
+ *
+ * Note: in case input- or output-tida is used, the tidaw-list must be stored
+ * in contiguous storage (no ttic). The tcal field in the tccb must be
+ * up-to-date.
+ */
+void tcw_finalize(struct tcw *tcw, int num_tidaws)
+{
+	struct tidaw *tidaw;
+	struct tccb *tccb;
+	struct tccb_tcat *tcat;
+	u32 count;
+
+	/* Terminate tidaw list. */
+	tidaw = tcw_get_data(tcw);
+	if (num_tidaws > 0)
+		tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST;
+	/* Add tcat to tccb. */
+	tccb = tcw_get_tccb(tcw);
+	tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
+	memset(tcat, 0, sizeof(*tcat));
+	/* Calculate tcw input/output count and tcat transport count. */
+	count = calc_dcw_count(tccb);
+	if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
+		count += calc_cbc_size(tidaw, num_tidaws);
+	if (tcw->r)
+		tcw->input_count = count;
+	else if (tcw->w)
+		tcw->output_count = count;
+	tcat->count = ALIGN(count, 4) + 4;
+	/* Calculate tccbl. */
+	tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) +
+		      sizeof(struct tccb_tcat) - 20) >> 2;
+}
+EXPORT_SYMBOL(tcw_finalize);
+
+/**
+ * tcw_set_intrg - set the interrogate tcw address of a tcw
+ * @tcw: the tcw address
+ * @intrg_tcw: the address of the interrogate tcw
+ *
+ * Set the address of the interrogate tcw in the specified tcw.
+ */
+void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw)
+{
+	tcw->intrg = (u32) ((addr_t) intrg_tcw);
+}
+EXPORT_SYMBOL(tcw_set_intrg);
+
+/**
+ * tcw_set_data - set data address and tida flag of a tcw
+ * @tcw: the tcw address
+ * @data: the data address
+ * @use_tidal: zero of the data address specifies a contiguous block of data,
+ * non-zero if it specifies a list if tidaws.
+ *
+ * Set the input/output data address of a tcw (depending on the value of the
+ * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
+ * is set as well.
+ */
+void tcw_set_data(struct tcw *tcw, void *data, int use_tidal)
+{
+	if (tcw->r) {
+		tcw->input = (u64) ((addr_t) data);
+		if (use_tidal)
+			tcw->flags |= TCW_FLAGS_INPUT_TIDA;
+	} else if (tcw->w) {
+		tcw->output = (u64) ((addr_t) data);
+		if (use_tidal)
+			tcw->flags |= TCW_FLAGS_OUTPUT_TIDA;
+	}
+}
+EXPORT_SYMBOL(tcw_set_data);
+
+/**
+ * tcw_set_tccb - set tccb address of a tcw
+ * @tcw: the tcw address
+ * @tccb: the tccb address
+ *
+ * Set the address of the tccb in the specified tcw.
+ */
+void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb)
+{
+	tcw->tccb = (u64) ((addr_t) tccb);
+}
+EXPORT_SYMBOL(tcw_set_tccb);
+
+/**
+ * tcw_set_tsb - set tsb address of a tcw
+ * @tcw: the tcw address
+ * @tsb: the tsb address
+ *
+ * Set the address of the tsb in the specified tcw.
+ */
+void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb)
+{
+	tcw->tsb = (u64) ((addr_t) tsb);
+}
+EXPORT_SYMBOL(tcw_set_tsb);
+
+/**
+ * tccb_init - initialize tccb
+ * @tccb: the tccb address
+ * @size: the maximum size of the tccb
+ * @sac: the service-action-code to be user
+ *
+ * Initialize the header of the specified tccb by resetting all values to zero
+ * and filling in defaults for format, sac and initial tcal fields.
+ */
+void tccb_init(struct tccb *tccb, size_t size, u32 sac)
+{
+	memset(tccb, 0, size);
+	tccb->tcah.format = TCCB_FORMAT_DEFAULT;
+	tccb->tcah.sac = sac;
+	tccb->tcah.tcal = 12;
+}
+EXPORT_SYMBOL(tccb_init);
+
+/**
+ * tsb_init - initialize tsb
+ * @tsb: the tsb address
+ *
+ * Initialize the specified tsb by resetting all values to zero.
+ */
+void tsb_init(struct tsb *tsb)
+{
+	memset(tsb, 0, sizeof(*tsb));
+}
+EXPORT_SYMBOL(tsb_init);
+
+/**
+ * tccb_add_dcw - add a dcw to the tccb
+ * @tccb: the tccb address
+ * @tccb_size: the maximum tccb size
+ * @cmd: the dcw command
+ * @flags: flags for the dcw
+ * @cd: pointer to control data for this dcw or NULL if none is required
+ * @cd_count: number of control data bytes for this dcw
+ * @count: number of data bytes for this dcw
+ *
+ * Add a new dcw to the specified tccb by writing the dcw information specified
+ * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
+ * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
+ * would exceed the available space as defined by @tccb_size.
+ *
+ * Note: the tcal field of the tccb header will be updates to reflect added
+ * content.
+ */
+struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags,
+			 void *cd, u8 cd_count, u32 count)
+{
+	struct dcw *dcw;
+	int size;
+	int tca_offset;
+
+	/* Check for space. */
+	tca_offset = tca_size(tccb);
+	size = ALIGN(sizeof(struct dcw) + cd_count, 4);
+	if (sizeof(struct tccb_tcah) + tca_offset + size +
+	    sizeof(struct tccb_tcat) > tccb_size)
+		return ERR_PTR(-ENOSPC);
+	/* Add dcw to tca. */
+	dcw = (struct dcw *) &tccb->tca[tca_offset];
+	memset(dcw, 0, size);
+	dcw->cmd = cmd;
+	dcw->flags = flags;
+	dcw->count = count;
+	dcw->cd_count = cd_count;
+	if (cd)
+		memcpy(&dcw->cd[0], cd, cd_count);
+	tccb->tcah.tcal += size;
+	return dcw;
+}
+EXPORT_SYMBOL(tccb_add_dcw);
+
+/**
+ * tcw_add_tidaw - add a tidaw to a tcw
+ * @tcw: the tcw address
+ * @num_tidaws: the current number of tidaws
+ * @flags: flags for the new tidaw
+ * @addr: address value for the new tidaw
+ * @count: count value for the new tidaw
+ *
+ * Add a new tidaw to the input/output data tidaw-list of the specified tcw
+ * (depending on the value of the r-flag and w-flag) and return a pointer to
+ * the new tidaw.
+ *
+ * Note: the tidaw-list is assumed to be contiguous with no ttics. The caller
+ * must ensure that there is enough space for the new tidaw. The last-tidaw
+ * flag for the last tidaw in the list will be set by tcw_finalize.
+ */
+struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
+			    void *addr, u32 count)
+{
+	struct tidaw *tidaw;
+
+	/* Add tidaw to tidaw-list. */
+	tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws;
+	memset(tidaw, 0, sizeof(struct tidaw));
+	tidaw->flags = flags;
+	tidaw->count = count;
+	tidaw->addr = (u64) ((addr_t) addr);
+	return tidaw;
+}
+EXPORT_SYMBOL(tcw_add_tidaw);
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
new file mode 100644
index 0000000..835de44
--- /dev/null
+++ b/drivers/s390/cio/idset.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    Copyright IBM Corp. 2007, 2012
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include "idset.h"
+#include "css.h"
+
+struct idset {
+	int num_ssid;
+	int num_id;
+	unsigned long bitmap[0];
+};
+
+static inline unsigned long bitmap_size(int num_ssid, int num_id)
+{
+	return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long);
+}
+
+static struct idset *idset_new(int num_ssid, int num_id)
+{
+	struct idset *set;
+
+	set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id));
+	if (set) {
+		set->num_ssid = num_ssid;
+		set->num_id = num_id;
+		memset(set->bitmap, 0, bitmap_size(num_ssid, num_id));
+	}
+	return set;
+}
+
+void idset_free(struct idset *set)
+{
+	vfree(set);
+}
+
+void idset_fill(struct idset *set)
+{
+	memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
+}
+
+static inline void idset_add(struct idset *set, int ssid, int id)
+{
+	set_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline void idset_del(struct idset *set, int ssid, int id)
+{
+	clear_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline int idset_contains(struct idset *set, int ssid, int id)
+{
+	return test_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline int idset_get_first(struct idset *set, int *ssid, int *id)
+{
+	int bitnum;
+
+	bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
+	if (bitnum >= set->num_ssid * set->num_id)
+		return 0;
+	*ssid = bitnum / set->num_id;
+	*id = bitnum % set->num_id;
+	return 1;
+}
+
+struct idset *idset_sch_new(void)
+{
+	return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1);
+}
+
+void idset_sch_add(struct idset *set, struct subchannel_id schid)
+{
+	idset_add(set, schid.ssid, schid.sch_no);
+}
+
+void idset_sch_del(struct idset *set, struct subchannel_id schid)
+{
+	idset_del(set, schid.ssid, schid.sch_no);
+}
+
+/* Clear ids starting from @schid up to end of subchannel set. */
+void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid)
+{
+	int pos = schid.ssid * set->num_id + schid.sch_no;
+
+	bitmap_clear(set->bitmap, pos, set->num_id - schid.sch_no);
+}
+
+int idset_sch_contains(struct idset *set, struct subchannel_id schid)
+{
+	return idset_contains(set, schid.ssid, schid.sch_no);
+}
+
+int idset_is_empty(struct idset *set)
+{
+	return bitmap_empty(set->bitmap, set->num_ssid * set->num_id);
+}
+
+void idset_add_set(struct idset *to, struct idset *from)
+{
+	int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id);
+
+	bitmap_or(to->bitmap, to->bitmap, from->bitmap, len);
+}
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
new file mode 100644
index 0000000..a3ece8d
--- /dev/null
+++ b/drivers/s390/cio/idset.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    Copyright IBM Corp. 2007, 2012
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef S390_IDSET_H
+#define S390_IDSET_H
+
+#include <asm/schid.h>
+
+struct idset;
+
+void idset_free(struct idset *set);
+void idset_fill(struct idset *set);
+
+struct idset *idset_sch_new(void);
+void idset_sch_add(struct idset *set, struct subchannel_id id);
+void idset_sch_del(struct idset *set, struct subchannel_id id);
+void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid);
+int idset_sch_contains(struct idset *set, struct subchannel_id id);
+int idset_is_empty(struct idset *set);
+void idset_add_set(struct idset *to, struct idset *from);
+
+#endif /* S390_IDSET_H */
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
new file mode 100644
index 0000000..90e4e3a
--- /dev/null
+++ b/drivers/s390/cio/io_sch.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_IO_SCH_H
+#define S390_IO_SCH_H
+
+#include <linux/types.h>
+#include <asm/schid.h>
+#include <asm/ccwdev.h>
+#include <asm/irq.h>
+#include "css.h"
+#include "orb.h"
+
+struct io_subchannel_private {
+	union orb orb;		/* operation request block */
+	struct ccw1 sense_ccw;	/* static ccw for sense command */
+	struct ccw_device *cdev;/* pointer to the child ccw device */
+	struct {
+		unsigned int suspend:1;	/* allow suspend */
+		unsigned int prefetch:1;/* deny prefetch */
+		unsigned int inter:1;	/* suppress intermediate interrupts */
+	} __packed options;
+} __aligned(8);
+
+#define to_io_private(n) ((struct io_subchannel_private *) \
+			  dev_get_drvdata(&(n)->dev))
+#define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p))
+
+static inline struct ccw_device *sch_get_cdev(struct subchannel *sch)
+{
+	struct io_subchannel_private *priv = to_io_private(sch);
+	return priv ? priv->cdev : NULL;
+}
+
+static inline void sch_set_cdev(struct subchannel *sch,
+				struct ccw_device *cdev)
+{
+	struct io_subchannel_private *priv = to_io_private(sch);
+	if (priv)
+		priv->cdev = cdev;
+}
+
+#define MAX_CIWS 8
+
+/*
+ * Possible status values for a CCW request's I/O.
+ */
+enum io_status {
+	IO_DONE,
+	IO_RUNNING,
+	IO_STATUS_ERROR,
+	IO_PATH_ERROR,
+	IO_REJECTED,
+	IO_KILLED
+};
+
+/**
+ * ccw_request - Internal CCW request.
+ * @cp: channel program to start
+ * @timeout: maximum allowable time in jiffies between start I/O and interrupt
+ * @maxretries: number of retries per I/O operation and path
+ * @lpm: mask of paths to use
+ * @check: optional callback that determines if results are final
+ * @filter: optional callback to adjust request status based on IRB data
+ * @callback: final callback
+ * @data: user-defined pointer passed to all callbacks
+ * @singlepath: if set, use only one path from @lpm per start I/O
+ * @cancel: non-zero if request was cancelled
+ * @done: non-zero if request was finished
+ * @mask: current path mask
+ * @retries: current number of retries
+ * @drc: delayed return code
+ */
+struct ccw_request {
+	struct ccw1 *cp;
+	unsigned long timeout;
+	u16 maxretries;
+	u8 lpm;
+	int (*check)(struct ccw_device *, void *);
+	enum io_status (*filter)(struct ccw_device *, void *, struct irb *,
+				 enum io_status);
+	void (*callback)(struct ccw_device *, void *, int);
+	void *data;
+	unsigned int singlepath:1;
+	/* These fields are used internally. */
+	unsigned int cancel:1;
+	unsigned int done:1;
+	u16 mask;
+	u16 retries;
+	int drc;
+} __attribute__((packed));
+
+/*
+ * sense-id response buffer layout
+ */
+struct senseid {
+	/* common part */
+	u8  reserved;	/* always 0x'FF' */
+	u16 cu_type;	/* control unit type */
+	u8  cu_model;	/* control unit model */
+	u16 dev_type;	/* device type */
+	u8  dev_model;	/* device model */
+	u8  unused;	/* padding byte */
+	/* extended part */
+	struct ciw ciw[MAX_CIWS];	/* variable # of CIWs */
+}  __attribute__ ((packed, aligned(4)));
+
+enum cdev_todo {
+	CDEV_TODO_NOTHING,
+	CDEV_TODO_ENABLE_CMF,
+	CDEV_TODO_REBIND,
+	CDEV_TODO_REGISTER,
+	CDEV_TODO_UNREG,
+	CDEV_TODO_UNREG_EVAL,
+};
+
+#define FAKE_CMD_IRB	1
+#define FAKE_TM_IRB	2
+
+struct ccw_device_private {
+	struct ccw_device *cdev;
+	struct subchannel *sch;
+	int state;		/* device state */
+	atomic_t onoff;
+	struct ccw_dev_id dev_id;	/* device id */
+	struct ccw_request req;		/* internal I/O request */
+	int iretry;
+	u8 pgid_valid_mask;	/* mask of valid PGIDs */
+	u8 pgid_todo_mask;	/* mask of PGIDs to be adjusted */
+	u8 pgid_reset_mask;	/* mask of PGIDs which were reset */
+	u8 path_noirq_mask;	/* mask of paths for which no irq was
+				   received */
+	u8 path_notoper_mask;	/* mask of paths which were found
+				   not operable */
+	u8 path_gone_mask;	/* mask of paths, that became unavailable */
+	u8 path_new_mask;	/* mask of paths, that became available */
+	u8 path_broken_mask;	/* mask of paths, which were found to be
+				   unusable */
+	struct {
+		unsigned int fast:1;	/* post with "channel end" */
+		unsigned int repall:1;	/* report every interrupt status */
+		unsigned int pgroup:1;	/* do path grouping */
+		unsigned int force:1;	/* allow forced online */
+		unsigned int mpath:1;	/* do multipathing */
+	} __attribute__ ((packed)) options;
+	struct {
+		unsigned int esid:1;	    /* Ext. SenseID supported by HW */
+		unsigned int dosense:1;	    /* delayed SENSE required */
+		unsigned int doverify:1;    /* delayed path verification */
+		unsigned int donotify:1;    /* call notify function */
+		unsigned int recog_done:1;  /* dev. recog. complete */
+		unsigned int fake_irb:2;    /* deliver faked irb */
+		unsigned int resuming:1;    /* recognition while resume */
+		unsigned int pgroup:1;	    /* pathgroup is set up */
+		unsigned int mpath:1;	    /* multipathing is set up */
+		unsigned int pgid_unknown:1;/* unknown pgid state */
+		unsigned int initialized:1; /* set if initial reference held */
+	} __attribute__((packed)) flags;
+	unsigned long intparm;	/* user interruption parameter */
+	struct qdio_irq *qdio_data;
+	struct irb irb;		/* device status */
+	int async_kill_io_rc;
+	struct senseid senseid;	/* SenseID info */
+	struct pgid pgid[8];	/* path group IDs per chpid*/
+	struct ccw1 iccws[2];	/* ccws for SNID/SID/SPGID commands */
+	struct work_struct todo_work;
+	enum cdev_todo todo;
+	wait_queue_head_t wait_q;
+	struct timer_list timer;
+	void *cmb;			/* measurement information */
+	struct list_head cmb_list;	/* list of measured devices */
+	u64 cmb_start_time;		/* clock value of cmb reset */
+	void *cmb_wait;			/* deferred cmb enable/disable */
+	enum interruption_class int_class;
+};
+
+#endif
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
new file mode 100644
index 0000000..14d3283
--- /dev/null
+++ b/drivers/s390/cio/ioasm.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Channel subsystem I/O instructions.
+ */
+
+#include <linux/export.h>
+
+#include <asm/chpid.h>
+#include <asm/schid.h>
+#include <asm/crw.h>
+
+#include "ioasm.h"
+#include "orb.h"
+#include "cio.h"
+
+static inline int __stsch(struct subchannel_id schid, struct schib *addr)
+{
+	register struct subchannel_id reg1 asm ("1") = schid;
+	int ccode = -EIO;
+
+	asm volatile(
+		"	stsch	0(%3)\n"
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b, 1b)
+		: "+d" (ccode), "=m" (*addr)
+		: "d" (reg1), "a" (addr)
+		: "cc");
+	return ccode;
+}
+
+int stsch(struct subchannel_id schid, struct schib *addr)
+{
+	int ccode;
+
+	ccode = __stsch(schid, addr);
+	trace_s390_cio_stsch(schid, addr, ccode);
+
+	return ccode;
+}
+EXPORT_SYMBOL(stsch);
+
+static inline int __msch(struct subchannel_id schid, struct schib *addr)
+{
+	register struct subchannel_id reg1 asm ("1") = schid;
+	int ccode = -EIO;
+
+	asm volatile(
+		"	msch	0(%2)\n"
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b, 1b)
+		: "+d" (ccode)
+		: "d" (reg1), "a" (addr), "m" (*addr)
+		: "cc");
+	return ccode;
+}
+
+int msch(struct subchannel_id schid, struct schib *addr)
+{
+	int ccode;
+
+	ccode = __msch(schid, addr);
+	trace_s390_cio_msch(schid, addr, ccode);
+
+	return ccode;
+}
+
+static inline int __tsch(struct subchannel_id schid, struct irb *addr)
+{
+	register struct subchannel_id reg1 asm ("1") = schid;
+	int ccode;
+
+	asm volatile(
+		"	tsch	0(%3)\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode), "=m" (*addr)
+		: "d" (reg1), "a" (addr)
+		: "cc");
+	return ccode;
+}
+
+int tsch(struct subchannel_id schid, struct irb *addr)
+{
+	int ccode;
+
+	ccode = __tsch(schid, addr);
+	trace_s390_cio_tsch(schid, addr, ccode);
+
+	return ccode;
+}
+
+static inline int __ssch(struct subchannel_id schid, union orb *addr)
+{
+	register struct subchannel_id reg1 asm("1") = schid;
+	int ccode = -EIO;
+
+	asm volatile(
+		"	ssch	0(%2)\n"
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b, 1b)
+		: "+d" (ccode)
+		: "d" (reg1), "a" (addr), "m" (*addr)
+		: "cc", "memory");
+	return ccode;
+}
+
+int ssch(struct subchannel_id schid, union orb *addr)
+{
+	int ccode;
+
+	ccode = __ssch(schid, addr);
+	trace_s390_cio_ssch(schid, addr, ccode);
+
+	return ccode;
+}
+EXPORT_SYMBOL(ssch);
+
+static inline int __csch(struct subchannel_id schid)
+{
+	register struct subchannel_id reg1 asm("1") = schid;
+	int ccode;
+
+	asm volatile(
+		"	csch\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode)
+		: "d" (reg1)
+		: "cc");
+	return ccode;
+}
+
+int csch(struct subchannel_id schid)
+{
+	int ccode;
+
+	ccode = __csch(schid);
+	trace_s390_cio_csch(schid, ccode);
+
+	return ccode;
+}
+EXPORT_SYMBOL(csch);
+
+int tpi(struct tpi_info *addr)
+{
+	int ccode;
+
+	asm volatile(
+		"	tpi	0(%2)\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode), "=m" (*addr)
+		: "a" (addr)
+		: "cc");
+	trace_s390_cio_tpi(addr, ccode);
+
+	return ccode;
+}
+
+int chsc(void *chsc_area)
+{
+	typedef struct { char _[4096]; } addr_type;
+	int cc = -EIO;
+
+	asm volatile(
+		"	.insn	rre,0xb25f0000,%2,0\n"
+		"0:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"1:\n"
+		EX_TABLE(0b, 1b)
+		: "+d" (cc), "=m" (*(addr_type *) chsc_area)
+		: "d" (chsc_area), "m" (*(addr_type *) chsc_area)
+		: "cc");
+	trace_s390_cio_chsc(chsc_area, cc);
+
+	return cc;
+}
+EXPORT_SYMBOL(chsc);
+
+static inline int __rsch(struct subchannel_id schid)
+{
+	register struct subchannel_id reg1 asm("1") = schid;
+	int ccode;
+
+	asm volatile(
+		"	rsch\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode)
+		: "d" (reg1)
+		: "cc", "memory");
+
+	return ccode;
+}
+
+int rsch(struct subchannel_id schid)
+{
+	int ccode;
+
+	ccode = __rsch(schid);
+	trace_s390_cio_rsch(schid, ccode);
+
+	return ccode;
+}
+
+static inline int __hsch(struct subchannel_id schid)
+{
+	register struct subchannel_id reg1 asm("1") = schid;
+	int ccode;
+
+	asm volatile(
+		"	hsch\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode)
+		: "d" (reg1)
+		: "cc");
+	return ccode;
+}
+
+int hsch(struct subchannel_id schid)
+{
+	int ccode;
+
+	ccode = __hsch(schid);
+	trace_s390_cio_hsch(schid, ccode);
+
+	return ccode;
+}
+
+static inline int __xsch(struct subchannel_id schid)
+{
+	register struct subchannel_id reg1 asm("1") = schid;
+	int ccode;
+
+	asm volatile(
+		"	xsch\n"
+		"	ipm	%0\n"
+		"	srl	%0,28"
+		: "=d" (ccode)
+		: "d" (reg1)
+		: "cc");
+	return ccode;
+}
+
+int xsch(struct subchannel_id schid)
+{
+	int ccode;
+
+	ccode = __xsch(schid);
+	trace_s390_cio_xsch(schid, ccode);
+
+	return ccode;
+}
+
+int stcrw(struct crw *crw)
+{
+	int ccode;
+
+	asm volatile(
+		"	stcrw	0(%2)\n"
+		"	ipm	%0\n"
+		"	srl	%0,28\n"
+		: "=d" (ccode), "=m" (*crw)
+		: "a" (crw)
+		: "cc");
+	trace_s390_cio_stcrw(crw, ccode);
+
+	return ccode;
+}
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
new file mode 100644
index 0000000..4be539c
--- /dev/null
+++ b/drivers/s390/cio/ioasm.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_CIO_IOASM_H
+#define S390_CIO_IOASM_H
+
+#include <asm/chpid.h>
+#include <asm/schid.h>
+#include <asm/crw.h>
+#include "orb.h"
+#include "cio.h"
+#include "trace.h"
+
+/*
+ * Some S390 specific IO instructions
+ */
+
+int stsch(struct subchannel_id schid, struct schib *addr);
+int msch(struct subchannel_id schid, struct schib *addr);
+int tsch(struct subchannel_id schid, struct irb *addr);
+int ssch(struct subchannel_id schid, union orb *addr);
+int csch(struct subchannel_id schid);
+int tpi(struct tpi_info *addr);
+int chsc(void *chsc_area);
+int rsch(struct subchannel_id schid);
+int hsch(struct subchannel_id schid);
+int xsch(struct subchannel_id schid);
+int stcrw(struct crw *crw);
+
+#endif
diff --git a/drivers/s390/cio/isc.c b/drivers/s390/cio/isc.c
new file mode 100644
index 0000000..77fde9f
--- /dev/null
+++ b/drivers/s390/cio/isc.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions for registration of I/O interruption subclasses on s390.
+ *
+ * Copyright IBM Corp. 2008
+ * Authors: Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <asm/isc.h>
+
+static unsigned int isc_refs[MAX_ISC + 1];
+static DEFINE_SPINLOCK(isc_ref_lock);
+
+
+/**
+ * isc_register - register an I/O interruption subclass.
+ * @isc: I/O interruption subclass to register
+ *
+ * The number of users for @isc is increased. If this is the first user to
+ * register @isc, the corresponding I/O interruption subclass mask is enabled.
+ *
+ * Context:
+ *   This function must not be called in interrupt context.
+ */
+void isc_register(unsigned int isc)
+{
+	if (isc > MAX_ISC) {
+		WARN_ON(1);
+		return;
+	}
+
+	spin_lock(&isc_ref_lock);
+	if (isc_refs[isc] == 0)
+		ctl_set_bit(6, 31 - isc);
+	isc_refs[isc]++;
+	spin_unlock(&isc_ref_lock);
+}
+EXPORT_SYMBOL_GPL(isc_register);
+
+/**
+ * isc_unregister - unregister an I/O interruption subclass.
+ * @isc: I/O interruption subclass to unregister
+ *
+ * The number of users for @isc is decreased. If this is the last user to
+ * unregister @isc, the corresponding I/O interruption subclass mask is
+ * disabled.
+ * Note: This function must not be called if isc_register() hasn't been called
+ * before by the driver for @isc.
+ *
+ * Context:
+ *   This function must not be called in interrupt context.
+ */
+void isc_unregister(unsigned int isc)
+{
+	spin_lock(&isc_ref_lock);
+	/* check for misuse */
+	if (isc > MAX_ISC || isc_refs[isc] == 0) {
+		WARN_ON(1);
+		goto out_unlock;
+	}
+	if (isc_refs[isc] == 1)
+		ctl_clear_bit(6, 31 - isc);
+	isc_refs[isc]--;
+out_unlock:
+	spin_unlock(&isc_ref_lock);
+}
+EXPORT_SYMBOL_GPL(isc_unregister);
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c
new file mode 100644
index 0000000..19e4636
--- /dev/null
+++ b/drivers/s390/cio/itcw.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Functions for incremental construction of fcx enabled I/O control blocks.
+ *
+ *    Copyright IBM Corp. 2008
+ *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <asm/fcx.h>
+#include <asm/itcw.h>
+
+/*
+ * struct itcw - incremental tcw helper data type
+ *
+ * This structure serves as a handle for the incremental construction of a
+ * tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate
+ * tcw and associated data. The data structures are contained inside a single
+ * contiguous buffer provided by the user.
+ *
+ * The itcw construction functions take care of overall data integrity:
+ * - reset unused fields to zero
+ * - fill in required pointers
+ * - ensure required alignment for data structures
+ * - prevent data structures to cross 4k-byte boundary where required
+ * - calculate tccb-related length fields
+ * - optionally provide ready-made interrogate tcw and associated structures
+ *
+ * Restrictions apply to the itcws created with these construction functions:
+ * - tida only supported for data address, not for tccb
+ * - only contiguous tidaw-lists (no ttic)
+ * - total number of bytes required per itcw may not exceed 4k bytes
+ * - either read or write operation (may not work with r=0 and w=0)
+ *
+ * Example:
+ * struct itcw *itcw;
+ * void *buffer;
+ * size_t size;
+ *
+ * size = itcw_calc_size(1, 2, 0);
+ * buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
+ * if (!buffer)
+ *	return -ENOMEM;
+ * itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0);
+ * if (IS_ERR(itcw))
+ *	return PTR_ER(itcw);
+ * itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72);
+ * itcw_add_tidaw(itcw, 0, 0x30000, 20);
+ * itcw_add_tidaw(itcw, 0, 0x40000, 52);
+ * itcw_finalize(itcw);
+ *
+ */
+struct itcw {
+	struct tcw *tcw;
+	struct tcw *intrg_tcw;
+	int num_tidaws;
+	int max_tidaws;
+	int intrg_num_tidaws;
+	int intrg_max_tidaws;
+};
+
+/**
+ * itcw_get_tcw - return pointer to tcw associated with the itcw
+ * @itcw: address of the itcw
+ *
+ * Return pointer to the tcw associated with the itcw.
+ */
+struct tcw *itcw_get_tcw(struct itcw *itcw)
+{
+	return itcw->tcw;
+}
+EXPORT_SYMBOL(itcw_get_tcw);
+
+/**
+ * itcw_calc_size - return the size of an itcw with the given parameters
+ * @intrg: if non-zero, add an interrogate tcw
+ * @max_tidaws: maximum number of tidaws to be used for data addressing or zero
+ * if no tida is to be used.
+ * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
+ * by the interrogate tcw, if specified
+ *
+ * Calculate and return the number of bytes required to hold an itcw with the
+ * given parameters and assuming tccbs with maximum size.
+ *
+ * Note that the resulting size also contains bytes needed for alignment
+ * padding as well as padding to ensure that data structures don't cross a
+ * 4k-boundary where required.
+ */
+size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
+{
+	size_t len;
+	int cross_count;
+
+	/* Main data. */
+	len = sizeof(struct itcw);
+	len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
+	       /* TSB */ sizeof(struct tsb) +
+	       /* TIDAL */ max_tidaws * sizeof(struct tidaw);
+	/* Interrogate data. */
+	if (intrg) {
+		len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
+		       /* TSB */ sizeof(struct tsb) +
+		       /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
+	}
+
+	/* Maximum required alignment padding. */
+	len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
+
+	/* TIDAW lists may not cross a 4k boundary. To cross a
+	 * boundary we need to add a TTIC TIDAW. We need to reserve
+	 * one additional TIDAW for a TTIC that we may need to add due
+	 * to the placement of the data chunk in memory, and a further
+	 * TIDAW for each page boundary that the TIDAW list may cross
+	 * due to it's own size.
+	 */
+	if (max_tidaws) {
+		cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
+				   >> PAGE_SHIFT);
+		len += cross_count * sizeof(struct tidaw);
+	}
+	if (intrg_max_tidaws) {
+		cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
+				   >> PAGE_SHIFT);
+		len += cross_count * sizeof(struct tidaw);
+	}
+	return len;
+}
+EXPORT_SYMBOL(itcw_calc_size);
+
+#define CROSS4K(x, l)	(((x) & ~4095) != ((x + l) & ~4095))
+
+static inline void *fit_chunk(addr_t *start, addr_t end, size_t len,
+			      int align, int check_4k)
+{
+	addr_t addr;
+
+	addr = ALIGN(*start, align);
+	if (check_4k && CROSS4K(addr, len)) {
+		addr = ALIGN(addr, 4096);
+		addr = ALIGN(addr, align);
+	}
+	if (addr + len > end)
+		return ERR_PTR(-ENOSPC);
+	*start = addr + len;
+	return (void *) addr;
+}
+
+/**
+ * itcw_init - initialize incremental tcw data structure
+ * @buffer: address of buffer to use for data structures
+ * @size: number of bytes in buffer
+ * @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write
+ * operation tcw
+ * @intrg: if non-zero, add and initialize an interrogate tcw
+ * @max_tidaws: maximum number of tidaws to be used for data addressing or zero
+ * if no tida is to be used.
+ * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
+ * by the interrogate tcw, if specified
+ *
+ * Prepare the specified buffer to be used as an incremental tcw, i.e. a
+ * helper data structure that can be used to construct a valid tcw by
+ * successive calls to other helper functions. Note: the buffer needs to be
+ * located below the 2G address limit. The resulting tcw has the following
+ * restrictions:
+ *  - no tccb tidal
+ *  - input/output tidal is contiguous (no ttic)
+ *  - total data should not exceed 4k
+ *  - tcw specifies either read or write operation
+ *
+ * On success, return pointer to the resulting incremental tcw data structure,
+ * ERR_PTR otherwise.
+ */
+struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
+		       int max_tidaws, int intrg_max_tidaws)
+{
+	struct itcw *itcw;
+	void *chunk;
+	addr_t start;
+	addr_t end;
+	int cross_count;
+
+	/* Check for 2G limit. */
+	start = (addr_t) buffer;
+	end = start + size;
+	if (end > (1 << 31))
+		return ERR_PTR(-EINVAL);
+	memset(buffer, 0, size);
+	/* ITCW. */
+	chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0);
+	if (IS_ERR(chunk))
+		return chunk;
+	itcw = chunk;
+	/* allow for TTIC tidaws that may be needed to cross a page boundary */
+	cross_count = 0;
+	if (max_tidaws)
+		cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
+				   >> PAGE_SHIFT);
+	itcw->max_tidaws = max_tidaws + cross_count;
+	cross_count = 0;
+	if (intrg_max_tidaws)
+		cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
+				   >> PAGE_SHIFT);
+	itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count;
+	/* Main TCW. */
+	chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
+	if (IS_ERR(chunk))
+		return chunk;
+	itcw->tcw = chunk;
+	tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0,
+		 (op == ITCW_OP_WRITE) ? 1 : 0);
+	/* Interrogate TCW. */
+	if (intrg) {
+		chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
+		if (IS_ERR(chunk))
+			return chunk;
+		itcw->intrg_tcw = chunk;
+		tcw_init(itcw->intrg_tcw, 1, 0);
+		tcw_set_intrg(itcw->tcw, itcw->intrg_tcw);
+	}
+	/* Data TIDAL. */
+	if (max_tidaws > 0) {
+		chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
+				  itcw->max_tidaws, 16, 0);
+		if (IS_ERR(chunk))
+			return chunk;
+		tcw_set_data(itcw->tcw, chunk, 1);
+	}
+	/* Interrogate data TIDAL. */
+	if (intrg && (intrg_max_tidaws > 0)) {
+		chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
+				  itcw->intrg_max_tidaws, 16, 0);
+		if (IS_ERR(chunk))
+			return chunk;
+		tcw_set_data(itcw->intrg_tcw, chunk, 1);
+	}
+	/* TSB. */
+	chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
+	if (IS_ERR(chunk))
+		return chunk;
+	tsb_init(chunk);
+	tcw_set_tsb(itcw->tcw, chunk);
+	/* Interrogate TSB. */
+	if (intrg) {
+		chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
+		if (IS_ERR(chunk))
+			return chunk;
+		tsb_init(chunk);
+		tcw_set_tsb(itcw->intrg_tcw, chunk);
+	}
+	/* TCCB. */
+	chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
+	if (IS_ERR(chunk))
+		return chunk;
+	tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT);
+	tcw_set_tccb(itcw->tcw, chunk);
+	/* Interrogate TCCB. */
+	if (intrg) {
+		chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
+		if (IS_ERR(chunk))
+			return chunk;
+		tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG);
+		tcw_set_tccb(itcw->intrg_tcw, chunk);
+		tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL,
+			     sizeof(struct dcw_intrg_data), 0);
+		tcw_finalize(itcw->intrg_tcw, 0);
+	}
+	return itcw;
+}
+EXPORT_SYMBOL(itcw_init);
+
+/**
+ * itcw_add_dcw - add a dcw to the itcw
+ * @itcw: address of the itcw
+ * @cmd: the dcw command
+ * @flags: flags for the dcw
+ * @cd: address of control data for this dcw or NULL if none is required
+ * @cd_count: number of control data bytes for this dcw
+ * @count: number of data bytes for this dcw
+ *
+ * Add a new dcw to the specified itcw by writing the dcw information specified
+ * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
+ * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
+ * would exceed the available space.
+ *
+ * Note: the tcal field of the tccb header will be updated to reflect added
+ * content.
+ */
+struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd,
+			 u8 cd_count, u32 count)
+{
+	return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd,
+			    flags, cd, cd_count, count);
+}
+EXPORT_SYMBOL(itcw_add_dcw);
+
+/**
+ * itcw_add_tidaw - add a tidaw to the itcw
+ * @itcw: address of the itcw
+ * @flags: flags for the new tidaw
+ * @addr: address value for the new tidaw
+ * @count: count value for the new tidaw
+ *
+ * Add a new tidaw to the input/output data tidaw-list of the specified itcw
+ * (depending on the value of the r-flag and w-flag). Return a pointer to
+ * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
+ * available space.
+ *
+ * Note: TTIC tidaws are automatically added when needed, so explicitly calling
+ * this interface with the TTIC flag is not supported. The last-tidaw flag
+ * for the last tidaw in the list will be set by itcw_finalize.
+ */
+struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
+{
+	struct tidaw *following;
+
+	if (itcw->num_tidaws >= itcw->max_tidaws)
+		return ERR_PTR(-ENOSPC);
+	/*
+	 * Is the tidaw, which follows the one we are about to fill, on the next
+	 * page? Then we have to insert a TTIC tidaw first, that points to the
+	 * tidaw on the new page.
+	 */
+	following = ((struct tidaw *) tcw_get_data(itcw->tcw))
+		+ itcw->num_tidaws + 1;
+	if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) {
+		tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++,
+			      TIDAW_FLAGS_TTIC, following, 0);
+		if (itcw->num_tidaws >= itcw->max_tidaws)
+			return ERR_PTR(-ENOSPC);
+	}
+	return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
+}
+EXPORT_SYMBOL(itcw_add_tidaw);
+
+/**
+ * itcw_set_data - set data address and tida flag of the itcw
+ * @itcw: address of the itcw
+ * @addr: the data address
+ * @use_tidal: zero of the data address specifies a contiguous block of data,
+ * non-zero if it specifies a list if tidaws.
+ *
+ * Set the input/output data address of the itcw (depending on the value of the
+ * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
+ * is set as well.
+ */
+void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal)
+{
+	tcw_set_data(itcw->tcw, addr, use_tidal);
+}
+EXPORT_SYMBOL(itcw_set_data);
+
+/**
+ * itcw_finalize - calculate length and count fields of the itcw
+ * @itcw: address of the itcw
+ *
+ * Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb.
+ * In case input- or output-tida is used, the tidaw-list must be stored in
+ * continuous storage (no ttic). The tcal field in the tccb must be
+ * up-to-date.
+ */
+void itcw_finalize(struct itcw *itcw)
+{
+	tcw_finalize(itcw->tcw, itcw->num_tidaws);
+}
+EXPORT_SYMBOL(itcw_finalize);
diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h
new file mode 100644
index 0000000..a2d3778
--- /dev/null
+++ b/drivers/s390/cio/orb.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Orb related data structures.
+ *
+ * Copyright IBM Corp. 2007, 2011
+ *
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *	      Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ *	      Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#ifndef S390_ORB_H
+#define S390_ORB_H
+
+/*
+ * Command-mode operation request block
+ */
+struct cmd_orb {
+	u32 intparm;	/* interruption parameter */
+	u32 key:4;	/* flags, like key, suspend control, etc. */
+	u32 spnd:1;	/* suspend control */
+	u32 res1:1;	/* reserved */
+	u32 mod:1;	/* modification control */
+	u32 sync:1;	/* synchronize control */
+	u32 fmt:1;	/* format control */
+	u32 pfch:1;	/* prefetch control */
+	u32 isic:1;	/* initial-status-interruption control */
+	u32 alcc:1;	/* address-limit-checking control */
+	u32 ssic:1;	/* suppress-suspended-interr. control */
+	u32 res2:1;	/* reserved */
+	u32 c64:1;	/* IDAW/QDIO 64 bit control  */
+	u32 i2k:1;	/* IDAW 2/4kB block size control */
+	u32 lpm:8;	/* logical path mask */
+	u32 ils:1;	/* incorrect length */
+	u32 zero:6;	/* reserved zeros */
+	u32 orbx:1;	/* ORB extension control */
+	u32 cpa;	/* channel program address */
+}  __packed __aligned(4);
+
+/*
+ * Transport-mode operation request block
+ */
+struct tm_orb {
+	u32 intparm;
+	u32 key:4;
+	u32:9;
+	u32 b:1;
+	u32:2;
+	u32 lpm:8;
+	u32:7;
+	u32 x:1;
+	u32 tcw;
+	u32 prio:8;
+	u32:8;
+	u32 rsvpgm:8;
+	u32:8;
+	u32:32;
+	u32:32;
+	u32:32;
+	u32:32;
+}  __packed __aligned(4);
+
+/*
+ * eadm operation request block
+ */
+struct eadm_orb {
+	u32 intparm;
+	u32 key:4;
+	u32:4;
+	u32 compat1:1;
+	u32 compat2:1;
+	u32:21;
+	u32 x:1;
+	u32 aob;
+	u32 css_prio:8;
+	u32:8;
+	u32 scm_prio:8;
+	u32:8;
+	u32:29;
+	u32 fmt:3;
+	u32:32;
+	u32:32;
+	u32:32;
+}  __packed __aligned(4);
+
+union orb {
+	struct cmd_orb cmd;
+	struct tm_orb tm;
+	struct eadm_orb eadm;
+}  __packed __aligned(4);
+
+#endif /* S390_ORB_H */
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
new file mode 100644
index 0000000..a6f7c29
--- /dev/null
+++ b/drivers/s390/cio/qdio.h
@@ -0,0 +1,419 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2000, 2009
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ *	      Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#ifndef _CIO_QDIO_H
+#define _CIO_QDIO_H
+
+#include <asm/page.h>
+#include <asm/schid.h>
+#include <asm/debug.h>
+#include "chsc.h"
+
+#define QDIO_BUSY_BIT_PATIENCE		(100 << 12)	/* 100 microseconds */
+#define QDIO_BUSY_BIT_RETRY_DELAY	10		/* 10 milliseconds */
+#define QDIO_BUSY_BIT_RETRIES		1000		/* = 10s retry time */
+#define QDIO_INPUT_THRESHOLD		(500 << 12)	/* 500 microseconds */
+
+enum qdio_irq_states {
+	QDIO_IRQ_STATE_INACTIVE,
+	QDIO_IRQ_STATE_ESTABLISHED,
+	QDIO_IRQ_STATE_ACTIVE,
+	QDIO_IRQ_STATE_STOPPED,
+	QDIO_IRQ_STATE_CLEANUP,
+	QDIO_IRQ_STATE_ERR,
+	NR_QDIO_IRQ_STATES,
+};
+
+/* used as intparm in do_IO */
+#define QDIO_DOING_ESTABLISH	1
+#define QDIO_DOING_ACTIVATE	2
+#define QDIO_DOING_CLEANUP	3
+
+#define SLSB_STATE_NOT_INIT	0x0
+#define SLSB_STATE_EMPTY	0x1
+#define SLSB_STATE_PRIMED	0x2
+#define SLSB_STATE_PENDING	0x3
+#define SLSB_STATE_HALTED	0xe
+#define SLSB_STATE_ERROR	0xf
+#define SLSB_TYPE_INPUT		0x0
+#define SLSB_TYPE_OUTPUT	0x20
+#define SLSB_OWNER_PROG		0x80
+#define SLSB_OWNER_CU		0x40
+
+#define SLSB_P_INPUT_NOT_INIT	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT)  /* 0x80 */
+#define SLSB_P_INPUT_ACK	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY)	   /* 0x81 */
+#define SLSB_CU_INPUT_EMPTY	\
+	(SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY)	   /* 0x41 */
+#define SLSB_P_INPUT_PRIMED	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED)	   /* 0x82 */
+#define SLSB_P_INPUT_HALTED	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED)	   /* 0x8e */
+#define SLSB_P_INPUT_ERROR	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR)	   /* 0x8f */
+#define SLSB_P_OUTPUT_NOT_INIT	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
+#define SLSB_P_OUTPUT_EMPTY	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY)	   /* 0xa1 */
+#define SLSB_P_OUTPUT_PENDING \
+	(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_PENDING)  /* 0xa3 */
+#define SLSB_CU_OUTPUT_PRIMED	\
+	(SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED)	   /* 0x62 */
+#define SLSB_P_OUTPUT_HALTED	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED)   /* 0xae */
+#define SLSB_P_OUTPUT_ERROR	\
+	(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR)	   /* 0xaf */
+
+#define SLSB_ERROR_DURING_LOOKUP  0xff
+
+/* additional CIWs returned by extended Sense-ID */
+#define CIW_TYPE_EQUEUE			0x3 /* establish QDIO queues */
+#define CIW_TYPE_AQUEUE			0x4 /* activate QDIO queues */
+
+/* flags for st qdio sch data */
+#define CHSC_FLAG_QDIO_CAPABILITY	0x80
+#define CHSC_FLAG_VALIDITY		0x40
+
+/* SIGA flags */
+#define QDIO_SIGA_WRITE		0x00
+#define QDIO_SIGA_READ		0x01
+#define QDIO_SIGA_SYNC		0x02
+#define QDIO_SIGA_WRITEQ	0x04
+#define QDIO_SIGA_QEBSM_FLAG	0x80
+
+static inline int do_sqbs(u64 token, unsigned char state, int queue,
+			  int *start, int *count)
+{
+	register unsigned long _ccq asm ("0") = *count;
+	register unsigned long _token asm ("1") = token;
+	unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
+
+	asm volatile(
+		"	.insn	rsy,0xeb000000008A,%1,0,0(%2)"
+		: "+d" (_ccq), "+d" (_queuestart)
+		: "d" ((unsigned long)state), "d" (_token)
+		: "memory", "cc");
+	*count = _ccq & 0xff;
+	*start = _queuestart & 0xff;
+
+	return (_ccq >> 32) & 0xff;
+}
+
+static inline int do_eqbs(u64 token, unsigned char *state, int queue,
+			  int *start, int *count, int ack)
+{
+	register unsigned long _ccq asm ("0") = *count;
+	register unsigned long _token asm ("1") = token;
+	unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
+	unsigned long _state = (unsigned long)ack << 63;
+
+	asm volatile(
+		"	.insn	rrf,0xB99c0000,%1,%2,0,0"
+		: "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
+		: "d" (_token)
+		: "memory", "cc");
+	*count = _ccq & 0xff;
+	*start = _queuestart & 0xff;
+	*state = _state & 0xff;
+
+	return (_ccq >> 32) & 0xff;
+}
+
+struct qdio_irq;
+
+struct siga_flag {
+	u8 input:1;
+	u8 output:1;
+	u8 sync:1;
+	u8 sync_after_ai:1;
+	u8 sync_out_after_pci:1;
+	u8:3;
+} __attribute__ ((packed));
+
+struct qdio_dev_perf_stat {
+	unsigned int adapter_int;
+	unsigned int qdio_int;
+	unsigned int pci_request_int;
+
+	unsigned int tasklet_inbound;
+	unsigned int tasklet_inbound_resched;
+	unsigned int tasklet_inbound_resched2;
+	unsigned int tasklet_outbound;
+
+	unsigned int siga_read;
+	unsigned int siga_write;
+	unsigned int siga_sync;
+
+	unsigned int inbound_call;
+	unsigned int inbound_handler;
+	unsigned int stop_polling;
+	unsigned int inbound_queue_full;
+	unsigned int outbound_call;
+	unsigned int outbound_handler;
+	unsigned int outbound_queue_full;
+	unsigned int fast_requeue;
+	unsigned int target_full;
+	unsigned int eqbs;
+	unsigned int eqbs_partial;
+	unsigned int sqbs;
+	unsigned int sqbs_partial;
+	unsigned int int_discarded;
+} ____cacheline_aligned;
+
+struct qdio_queue_perf_stat {
+	/*
+	 * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
+	 * Since max. 127 SBALs are scanned reuse entry for 128 as queue full
+	 * aka 127 SBALs found.
+	 */
+	unsigned int nr_sbals[8];
+	unsigned int nr_sbal_error;
+	unsigned int nr_sbal_nop;
+	unsigned int nr_sbal_total;
+};
+
+enum qdio_queue_irq_states {
+	QDIO_QUEUE_IRQS_DISABLED,
+};
+
+struct qdio_input_q {
+	/* input buffer acknowledgement flag */
+	int polling;
+	/* first ACK'ed buffer */
+	int ack_start;
+	/* how much sbals are acknowledged with qebsm */
+	int ack_count;
+	/* last time of noticing incoming data */
+	u64 timestamp;
+	/* upper-layer polling flag */
+	unsigned long queue_irq_state;
+	/* callback to start upper-layer polling */
+	void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
+};
+
+struct qdio_output_q {
+	/* PCIs are enabled for the queue */
+	int pci_out_enabled;
+	/* cq: use asynchronous output buffers */
+	int use_cq;
+	/* cq: aobs used for particual SBAL */
+	struct qaob **aobs;
+	/* cq: sbal state related to asynchronous operation */
+	struct qdio_outbuf_state *sbal_state;
+	/* timer to check for more outbound work */
+	struct timer_list timer;
+	/* used SBALs before tasklet schedule */
+	int scan_threshold;
+};
+
+/*
+ * Note on cache alignment: grouped slsb and write mostly data at the beginning
+ * sbal[] is read-only and starts on a new cacheline followed by read mostly.
+ */
+struct qdio_q {
+	struct slsb slsb;
+
+	union {
+		struct qdio_input_q in;
+		struct qdio_output_q out;
+	} u;
+
+	/*
+	 * inbound: next buffer the program should check for
+	 * outbound: next buffer to check if adapter processed it
+	 */
+	int first_to_check;
+
+	/* first_to_check of the last time */
+	int last_move;
+
+	/* beginning position for calling the program */
+	int first_to_kick;
+
+	/* number of buffers in use by the adapter */
+	atomic_t nr_buf_used;
+
+	/* error condition during a data transfer */
+	unsigned int qdio_error;
+
+	/* last scan of the queue */
+	u64 timestamp;
+
+	struct tasklet_struct tasklet;
+	struct qdio_queue_perf_stat q_stats;
+
+	struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
+
+	/* queue number */
+	int nr;
+
+	/* bitmask of queue number */
+	int mask;
+
+	/* input or output queue */
+	int is_input_q;
+
+	/* list of thinint input queues */
+	struct list_head entry;
+
+	/* upper-layer program handler */
+	qdio_handler_t (*handler);
+
+	struct dentry *debugfs_q;
+	struct qdio_irq *irq_ptr;
+	struct sl *sl;
+	/*
+	 * A page is allocated under this pointer and used for slib and sl.
+	 * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
+	 */
+	struct slib *slib;
+} __attribute__ ((aligned(256)));
+
+struct qdio_irq {
+	struct qib qib;
+	u32 *dsci;		/* address of device state change indicator */
+	struct ccw_device *cdev;
+	struct dentry *debugfs_dev;
+	struct dentry *debugfs_perf;
+
+	unsigned long int_parm;
+	struct subchannel_id schid;
+	unsigned long sch_token;	/* QEBSM facility */
+
+	enum qdio_irq_states state;
+
+	struct siga_flag siga_flag;	/* siga sync information from qdioac */
+
+	int nr_input_qs;
+	int nr_output_qs;
+
+	struct ccw1 ccw;
+	struct ciw equeue;
+	struct ciw aqueue;
+
+	struct qdio_ssqd_desc ssqd_desc;
+	void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
+
+	int perf_stat_enabled;
+
+	struct qdr *qdr;
+	unsigned long chsc_page;
+
+	struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
+	struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
+
+	debug_info_t *debug_area;
+	struct mutex setup_mutex;
+	struct qdio_dev_perf_stat perf_stat;
+};
+
+/* helper functions */
+#define queue_type(q)	q->irq_ptr->qib.qfmt
+#define SCH_NO(q)	(q->irq_ptr->schid.sch_no)
+
+#define is_thinint_irq(irq) \
+	(irq->qib.qfmt == QDIO_IQDIO_QFMT || \
+	 css_general_characteristics.aif_osa)
+
+#define qperf(__qdev, __attr)	((__qdev)->perf_stat.(__attr))
+
+#define qperf_inc(__q, __attr)						\
+({									\
+	struct qdio_irq *qdev = (__q)->irq_ptr;				\
+	if (qdev->perf_stat_enabled)					\
+		(qdev->perf_stat.__attr)++;				\
+})
+
+static inline void account_sbals_error(struct qdio_q *q, int count)
+{
+	q->q_stats.nr_sbal_error += count;
+	q->q_stats.nr_sbal_total += count;
+}
+
+/* the highest iqdio queue is used for multicast */
+static inline int multicast_outbound(struct qdio_q *q)
+{
+	return (q->irq_ptr->nr_output_qs > 1) &&
+	       (q->nr == q->irq_ptr->nr_output_qs - 1);
+}
+
+#define pci_out_supported(q) \
+	(q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
+#define is_qebsm(q)			(q->irq_ptr->sch_token != 0)
+
+#define need_siga_in(q)			(q->irq_ptr->siga_flag.input)
+#define need_siga_out(q)		(q->irq_ptr->siga_flag.output)
+#define need_siga_sync(q)		(unlikely(q->irq_ptr->siga_flag.sync))
+#define need_siga_sync_after_ai(q)	\
+	(unlikely(q->irq_ptr->siga_flag.sync_after_ai))
+#define need_siga_sync_out_after_pci(q)	\
+	(unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
+
+#define for_each_input_queue(irq_ptr, q, i)		\
+	for (i = 0; i < irq_ptr->nr_input_qs &&		\
+		({ q = irq_ptr->input_qs[i]; 1; }); i++)
+#define for_each_output_queue(irq_ptr, q, i)		\
+	for (i = 0; i < irq_ptr->nr_output_qs &&	\
+		({ q = irq_ptr->output_qs[i]; 1; }); i++)
+
+#define prev_buf(bufnr)	\
+	((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
+#define next_buf(bufnr)	\
+	((bufnr + 1) & QDIO_MAX_BUFFERS_MASK)
+#define add_buf(bufnr, inc) \
+	((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
+#define sub_buf(bufnr, dec) \
+	((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
+
+#define queue_irqs_enabled(q)			\
+	(test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
+#define queue_irqs_disabled(q)			\
+	(test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
+
+extern u64 last_ai_time;
+
+/* prototypes for thin interrupt */
+void qdio_setup_thinint(struct qdio_irq *irq_ptr);
+int qdio_establish_thinint(struct qdio_irq *irq_ptr);
+void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
+void tiqdio_add_input_queues(struct qdio_irq *irq_ptr);
+void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr);
+void tiqdio_inbound_processing(unsigned long q);
+int tiqdio_allocate_memory(void);
+void tiqdio_free_memory(void);
+int tiqdio_register_thinints(void);
+void tiqdio_unregister_thinints(void);
+void clear_nonshared_ind(struct qdio_irq *);
+int test_nonshared_ind(struct qdio_irq *);
+
+/* prototypes for setup */
+void qdio_inbound_processing(unsigned long data);
+void qdio_outbound_processing(unsigned long data);
+void qdio_outbound_timer(struct timer_list *t);
+void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
+		      struct irb *irb);
+int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
+		     int nr_output_qs);
+void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
+int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
+			struct subchannel_id *schid,
+			struct qdio_ssqd_desc *data);
+int qdio_setup_irq(struct qdio_initialize *init_data);
+void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
+				struct ccw_device *cdev);
+void qdio_release_memory(struct qdio_irq *irq_ptr);
+int qdio_setup_create_sysfs(struct ccw_device *cdev);
+void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
+int qdio_setup_init(void);
+void qdio_setup_exit(void);
+int qdio_enable_async_operation(struct qdio_output_q *q);
+void qdio_disable_async_operation(struct qdio_output_q *q);
+struct qaob *qdio_allocate_aob(void);
+
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+			unsigned char *state);
+#endif /* _CIO_QDIO_H */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
new file mode 100644
index 0000000..68a82f3
--- /dev/null
+++ b/drivers/s390/cio/qdio_debug.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright IBM Corp. 2008, 2009
+ *
+ *  Author: Jan Glauber (jang@linux.vnet.ibm.com)
+ */
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <asm/debug.h>
+#include "qdio_debug.h"
+#include "qdio.h"
+
+debug_info_t *qdio_dbf_setup;
+debug_info_t *qdio_dbf_error;
+
+static struct dentry *debugfs_root;
+#define QDIO_DEBUGFS_NAME_LEN	10
+#define QDIO_DBF_NAME_LEN	20
+
+struct qdio_dbf_entry {
+	char dbf_name[QDIO_DBF_NAME_LEN];
+	debug_info_t *dbf_info;
+	struct list_head dbf_list;
+};
+
+static LIST_HEAD(qdio_dbf_list);
+static DEFINE_MUTEX(qdio_dbf_list_mutex);
+
+static debug_info_t *qdio_get_dbf_entry(char *name)
+{
+	struct qdio_dbf_entry *entry;
+	debug_info_t *rc = NULL;
+
+	mutex_lock(&qdio_dbf_list_mutex);
+	list_for_each_entry(entry, &qdio_dbf_list, dbf_list) {
+		if (strcmp(entry->dbf_name, name) == 0) {
+			rc = entry->dbf_info;
+			break;
+		}
+	}
+	mutex_unlock(&qdio_dbf_list_mutex);
+	return rc;
+}
+
+static void qdio_clear_dbf_list(void)
+{
+	struct qdio_dbf_entry *entry, *tmp;
+
+	mutex_lock(&qdio_dbf_list_mutex);
+	list_for_each_entry_safe(entry, tmp, &qdio_dbf_list, dbf_list) {
+		list_del(&entry->dbf_list);
+		debug_unregister(entry->dbf_info);
+		kfree(entry);
+	}
+	mutex_unlock(&qdio_dbf_list_mutex);
+}
+
+int qdio_allocate_dbf(struct qdio_initialize *init_data,
+		       struct qdio_irq *irq_ptr)
+{
+	char text[QDIO_DBF_NAME_LEN];
+	struct qdio_dbf_entry *new_entry;
+
+	DBF_EVENT("qfmt:%1d", init_data->q_format);
+	DBF_HEX(init_data->adapter_name, 8);
+	DBF_EVENT("qpff%4x", init_data->qib_param_field_format);
+	DBF_HEX(&init_data->qib_param_field, sizeof(void *));
+	DBF_HEX(&init_data->input_slib_elements, sizeof(void *));
+	DBF_HEX(&init_data->output_slib_elements, sizeof(void *));
+	DBF_EVENT("niq:%1d noq:%1d", init_data->no_input_qs,
+		  init_data->no_output_qs);
+	DBF_HEX(&init_data->input_handler, sizeof(void *));
+	DBF_HEX(&init_data->output_handler, sizeof(void *));
+	DBF_HEX(&init_data->int_parm, sizeof(long));
+	DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *));
+	DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *));
+	DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
+
+	/* allocate trace view for the interface */
+	snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s",
+					dev_name(&init_data->cdev->dev));
+	irq_ptr->debug_area = qdio_get_dbf_entry(text);
+	if (irq_ptr->debug_area)
+		DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused");
+	else {
+		irq_ptr->debug_area = debug_register(text, 2, 1, 16);
+		if (!irq_ptr->debug_area)
+			return -ENOMEM;
+		if (debug_register_view(irq_ptr->debug_area,
+						&debug_hex_ascii_view)) {
+			debug_unregister(irq_ptr->debug_area);
+			return -ENOMEM;
+		}
+		debug_set_level(irq_ptr->debug_area, DBF_WARN);
+		DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
+		new_entry = kzalloc(sizeof(struct qdio_dbf_entry), GFP_KERNEL);
+		if (!new_entry) {
+			debug_unregister(irq_ptr->debug_area);
+			return -ENOMEM;
+		}
+		strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
+		new_entry->dbf_info = irq_ptr->debug_area;
+		mutex_lock(&qdio_dbf_list_mutex);
+		list_add(&new_entry->dbf_list, &qdio_dbf_list);
+		mutex_unlock(&qdio_dbf_list_mutex);
+	}
+	return 0;
+}
+
+static int qstat_show(struct seq_file *m, void *v)
+{
+	unsigned char state;
+	struct qdio_q *q = m->private;
+	int i;
+
+	if (!q)
+		return 0;
+
+	seq_printf(m, "Timestamp: %Lx  Last AI: %Lx\n",
+		   q->timestamp, last_ai_time);
+	seq_printf(m, "nr_used: %d  ftc: %d  last_move: %d\n",
+		   atomic_read(&q->nr_buf_used),
+		   q->first_to_check, q->last_move);
+	if (q->is_input_q) {
+		seq_printf(m, "polling: %d  ack start: %d  ack count: %d\n",
+			   q->u.in.polling, q->u.in.ack_start,
+			   q->u.in.ack_count);
+		seq_printf(m, "DSCI: %d   IRQs disabled: %u\n",
+			   *(u32 *)q->irq_ptr->dsci,
+			   test_bit(QDIO_QUEUE_IRQS_DISABLED,
+			   &q->u.in.queue_irq_state));
+	}
+	seq_printf(m, "SBAL states:\n");
+	seq_printf(m, "|0      |8      |16     |24     |32     |40     |48     |56  63|\n");
+
+	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
+		debug_get_buf_state(q, i, &state);
+		switch (state) {
+		case SLSB_P_INPUT_NOT_INIT:
+		case SLSB_P_OUTPUT_NOT_INIT:
+			seq_printf(m, "N");
+			break;
+		case SLSB_P_OUTPUT_PENDING:
+			seq_printf(m, "P");
+			break;
+		case SLSB_P_INPUT_PRIMED:
+		case SLSB_CU_OUTPUT_PRIMED:
+			seq_printf(m, "+");
+			break;
+		case SLSB_P_INPUT_ACK:
+			seq_printf(m, "A");
+			break;
+		case SLSB_P_INPUT_ERROR:
+		case SLSB_P_OUTPUT_ERROR:
+			seq_printf(m, "x");
+			break;
+		case SLSB_CU_INPUT_EMPTY:
+		case SLSB_P_OUTPUT_EMPTY:
+			seq_printf(m, "-");
+			break;
+		case SLSB_P_INPUT_HALTED:
+		case SLSB_P_OUTPUT_HALTED:
+			seq_printf(m, ".");
+			break;
+		default:
+			seq_printf(m, "?");
+		}
+		if (i == 63)
+			seq_printf(m, "\n");
+	}
+	seq_printf(m, "\n");
+	seq_printf(m, "|64     |72     |80     |88     |96     |104    |112    |   127|\n");
+
+	seq_printf(m, "\nSBAL statistics:");
+	if (!q->irq_ptr->perf_stat_enabled) {
+		seq_printf(m, " disabled\n");
+		return 0;
+	}
+
+	seq_printf(m, "\n1          2..        4..        8..        "
+		   "16..       32..       64..       127\n");
+	for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
+		seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
+	seq_printf(m, "\nError      NOP        Total\n%-10u %-10u %-10u\n\n",
+		   q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop,
+		   q->q_stats.nr_sbal_total);
+	return 0;
+}
+
+static int qstat_seq_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, qstat_show,
+			   file_inode(filp)->i_private);
+}
+
+static const struct file_operations debugfs_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = qstat_seq_open,
+	.read	 = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+};
+
+static char *qperf_names[] = {
+	"Assumed adapter interrupts",
+	"QDIO interrupts",
+	"Requested PCIs",
+	"Inbound tasklet runs",
+	"Inbound tasklet resched",
+	"Inbound tasklet resched2",
+	"Outbound tasklet runs",
+	"SIGA read",
+	"SIGA write",
+	"SIGA sync",
+	"Inbound calls",
+	"Inbound handler",
+	"Inbound stop_polling",
+	"Inbound queue full",
+	"Outbound calls",
+	"Outbound handler",
+	"Outbound queue full",
+	"Outbound fast_requeue",
+	"Outbound target_full",
+	"QEBSM eqbs",
+	"QEBSM eqbs partial",
+	"QEBSM sqbs",
+	"QEBSM sqbs partial",
+	"Discarded interrupts"
+};
+
+static int qperf_show(struct seq_file *m, void *v)
+{
+	struct qdio_irq *irq_ptr = m->private;
+	unsigned int *stat;
+	int i;
+
+	if (!irq_ptr)
+		return 0;
+	if (!irq_ptr->perf_stat_enabled) {
+		seq_printf(m, "disabled\n");
+		return 0;
+	}
+	stat = (unsigned int *)&irq_ptr->perf_stat;
+
+	for (i = 0; i < ARRAY_SIZE(qperf_names); i++)
+		seq_printf(m, "%26s:\t%u\n",
+			   qperf_names[i], *(stat + i));
+	return 0;
+}
+
+static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
+			       size_t count, loff_t *off)
+{
+	struct seq_file *seq = file->private_data;
+	struct qdio_irq *irq_ptr = seq->private;
+	struct qdio_q *q;
+	unsigned long val;
+	int ret, i;
+
+	if (!irq_ptr)
+		return 0;
+
+	ret = kstrtoul_from_user(ubuf, count, 10, &val);
+	if (ret)
+		return ret;
+
+	switch (val) {
+	case 0:
+		irq_ptr->perf_stat_enabled = 0;
+		memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
+		for_each_input_queue(irq_ptr, q, i)
+			memset(&q->q_stats, 0, sizeof(q->q_stats));
+		for_each_output_queue(irq_ptr, q, i)
+			memset(&q->q_stats, 0, sizeof(q->q_stats));
+		break;
+	case 1:
+		irq_ptr->perf_stat_enabled = 1;
+		break;
+	}
+	return count;
+}
+
+static int qperf_seq_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, qperf_show,
+			   file_inode(filp)->i_private);
+}
+
+static const struct file_operations debugfs_perf_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = qperf_seq_open,
+	.read	 = seq_read,
+	.write	 = qperf_seq_write,
+	.llseek  = seq_lseek,
+	.release = single_release,
+};
+
+static void setup_debugfs_entry(struct qdio_q *q)
+{
+	char name[QDIO_DEBUGFS_NAME_LEN];
+
+	snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
+		 q->is_input_q ? "input" : "output",
+		 q->nr);
+	q->debugfs_q = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
+				q->irq_ptr->debugfs_dev, q, &debugfs_fops);
+	if (IS_ERR(q->debugfs_q))
+		q->debugfs_q = NULL;
+}
+
+void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
+{
+	struct qdio_q *q;
+	int i;
+
+	irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev),
+						  debugfs_root);
+	if (IS_ERR(irq_ptr->debugfs_dev))
+		irq_ptr->debugfs_dev = NULL;
+
+	irq_ptr->debugfs_perf = debugfs_create_file("statistics",
+				S_IFREG | S_IRUGO | S_IWUSR,
+				irq_ptr->debugfs_dev, irq_ptr,
+				&debugfs_perf_fops);
+	if (IS_ERR(irq_ptr->debugfs_perf))
+		irq_ptr->debugfs_perf = NULL;
+
+	for_each_input_queue(irq_ptr, q, i)
+		setup_debugfs_entry(q);
+	for_each_output_queue(irq_ptr, q, i)
+		setup_debugfs_entry(q);
+}
+
+void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr)
+{
+	struct qdio_q *q;
+	int i;
+
+	for_each_input_queue(irq_ptr, q, i)
+		debugfs_remove(q->debugfs_q);
+	for_each_output_queue(irq_ptr, q, i)
+		debugfs_remove(q->debugfs_q);
+	debugfs_remove(irq_ptr->debugfs_perf);
+	debugfs_remove(irq_ptr->debugfs_dev);
+}
+
+int __init qdio_debug_init(void)
+{
+	debugfs_root = debugfs_create_dir("qdio", NULL);
+
+	qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16);
+	debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
+	debug_set_level(qdio_dbf_setup, DBF_INFO);
+	DBF_EVENT("dbf created\n");
+
+	qdio_dbf_error = debug_register("qdio_error", 4, 1, 16);
+	debug_register_view(qdio_dbf_error, &debug_hex_ascii_view);
+	debug_set_level(qdio_dbf_error, DBF_INFO);
+	DBF_ERROR("dbf created\n");
+	return 0;
+}
+
+void qdio_debug_exit(void)
+{
+	qdio_clear_dbf_list();
+	debugfs_remove(debugfs_root);
+	debug_unregister(qdio_dbf_setup);
+	debug_unregister(qdio_dbf_error);
+}
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
new file mode 100644
index 0000000..f85f5fa
--- /dev/null
+++ b/drivers/s390/cio/qdio_debug.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright IBM Corp. 2008
+ *
+ *  Author: Jan Glauber (jang@linux.vnet.ibm.com)
+ */
+#ifndef QDIO_DEBUG_H
+#define QDIO_DEBUG_H
+
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include "qdio.h"
+
+/* that gives us 15 characters in the text event views */
+#define QDIO_DBF_LEN	32
+
+extern debug_info_t *qdio_dbf_setup;
+extern debug_info_t *qdio_dbf_error;
+
+#define DBF_ERR		3	/* error conditions	*/
+#define DBF_WARN	4	/* warning conditions	*/
+#define DBF_INFO	6	/* informational	*/
+
+#undef DBF_EVENT
+#undef DBF_ERROR
+#undef DBF_DEV_EVENT
+
+#define DBF_EVENT(text...) \
+	do { \
+		char debug_buffer[QDIO_DBF_LEN]; \
+		snprintf(debug_buffer, QDIO_DBF_LEN, text); \
+		debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \
+	} while (0)
+
+static inline void DBF_HEX(void *addr, int len)
+{
+	debug_event(qdio_dbf_setup, DBF_ERR, addr, len);
+}
+
+#define DBF_ERROR(text...) \
+	do { \
+		char debug_buffer[QDIO_DBF_LEN]; \
+		snprintf(debug_buffer, QDIO_DBF_LEN, text); \
+		debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \
+	} while (0)
+
+static inline void DBF_ERROR_HEX(void *addr, int len)
+{
+	debug_event(qdio_dbf_error, DBF_ERR, addr, len);
+}
+
+#define DBF_DEV_EVENT(level, device, text...) \
+	do { \
+		char debug_buffer[QDIO_DBF_LEN]; \
+		if (debug_level_enabled(device->debug_area, level)) { \
+			snprintf(debug_buffer, QDIO_DBF_LEN, text); \
+			debug_text_event(device->debug_area, level, debug_buffer); \
+		} \
+	} while (0)
+
+static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
+			       int len, int level)
+{
+	debug_event(dev->debug_area, level, addr, len);
+}
+
+int qdio_allocate_dbf(struct qdio_initialize *init_data,
+		       struct qdio_irq *irq_ptr);
+void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
+			      struct ccw_device *cdev);
+void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr);
+int qdio_debug_init(void);
+void qdio_debug_exit(void);
+
+#endif
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
new file mode 100644
index 0000000..9c7d9da
--- /dev/null
+++ b/drivers/s390/cio/qdio_main.c
@@ -0,0 +1,1872 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux for s390 qdio support, buffer handling, qdio API and module support.
+ *
+ * Copyright IBM Corp. 2000, 2008
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ *	      Jan Glauber <jang@linux.vnet.ibm.com>
+ * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include <asm/ipl.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "qdio.h"
+#include "qdio_debug.h"
+
+MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
+	"Jan Glauber <jang@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("QDIO base support");
+MODULE_LICENSE("GPL");
+
+static inline int do_siga_sync(unsigned long schid,
+			       unsigned int out_mask, unsigned int in_mask,
+			       unsigned int fc)
+{
+	register unsigned long __fc asm ("0") = fc;
+	register unsigned long __schid asm ("1") = schid;
+	register unsigned long out asm ("2") = out_mask;
+	register unsigned long in asm ("3") = in_mask;
+	int cc;
+
+	asm volatile(
+		"	siga	0\n"
+		"	ipm	%0\n"
+		"	srl	%0,28\n"
+		: "=d" (cc)
+		: "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
+	return cc;
+}
+
+static inline int do_siga_input(unsigned long schid, unsigned int mask,
+				unsigned int fc)
+{
+	register unsigned long __fc asm ("0") = fc;
+	register unsigned long __schid asm ("1") = schid;
+	register unsigned long __mask asm ("2") = mask;
+	int cc;
+
+	asm volatile(
+		"	siga	0\n"
+		"	ipm	%0\n"
+		"	srl	%0,28\n"
+		: "=d" (cc)
+		: "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
+	return cc;
+}
+
+/**
+ * do_siga_output - perform SIGA-w/wt function
+ * @schid: subchannel id or in case of QEBSM the subchannel token
+ * @mask: which output queues to process
+ * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
+ * @fc: function code to perform
+ * @aob: asynchronous operation block
+ *
+ * Returns condition code.
+ * Note: For IQDC unicast queues only the highest priority queue is processed.
+ */
+static inline int do_siga_output(unsigned long schid, unsigned long mask,
+				 unsigned int *bb, unsigned int fc,
+				 unsigned long aob)
+{
+	register unsigned long __fc asm("0") = fc;
+	register unsigned long __schid asm("1") = schid;
+	register unsigned long __mask asm("2") = mask;
+	register unsigned long __aob asm("3") = aob;
+	int cc;
+
+	asm volatile(
+		"	siga	0\n"
+		"	ipm	%0\n"
+		"	srl	%0,28\n"
+		: "=d" (cc), "+d" (__fc), "+d" (__aob)
+		: "d" (__schid), "d" (__mask)
+		: "cc");
+	*bb = __fc >> 31;
+	return cc;
+}
+
+/**
+ * qdio_do_eqbs - extract buffer states for QEBSM
+ * @q: queue to manipulate
+ * @state: state of the extracted buffers
+ * @start: buffer number to start at
+ * @count: count of buffers to examine
+ * @auto_ack: automatically acknowledge buffers
+ *
+ * Returns the number of successfully extracted equal buffer states.
+ * Stops processing if a state is different from the last buffers state.
+ */
+static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
+			int start, int count, int auto_ack)
+{
+	int tmp_count = count, tmp_start = start, nr = q->nr;
+	unsigned int ccq = 0;
+
+	qperf_inc(q, eqbs);
+
+	if (!q->is_input_q)
+		nr += q->irq_ptr->nr_input_qs;
+again:
+	ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
+		      auto_ack);
+
+	switch (ccq) {
+	case 0:
+	case 32:
+		/* all done, or next buffer state different */
+		return count - tmp_count;
+	case 96:
+		/* not all buffers processed */
+		qperf_inc(q, eqbs_partial);
+		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
+			tmp_count);
+		return count - tmp_count;
+	case 97:
+		/* no buffer processed */
+		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
+		goto again;
+	default:
+		DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
+		DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
+		DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+		q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
+			   q->first_to_kick, count, q->irq_ptr->int_parm);
+		return 0;
+	}
+}
+
+/**
+ * qdio_do_sqbs - set buffer states for QEBSM
+ * @q: queue to manipulate
+ * @state: new state of the buffers
+ * @start: first buffer number to change
+ * @count: how many buffers to change
+ *
+ * Returns the number of successfully changed buffers.
+ * Does retrying until the specified count of buffer states is set or an
+ * error occurs.
+ */
+static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
+			int count)
+{
+	unsigned int ccq = 0;
+	int tmp_count = count, tmp_start = start;
+	int nr = q->nr;
+
+	if (!count)
+		return 0;
+	qperf_inc(q, sqbs);
+
+	if (!q->is_input_q)
+		nr += q->irq_ptr->nr_input_qs;
+again:
+	ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
+
+	switch (ccq) {
+	case 0:
+	case 32:
+		/* all done, or active buffer adapter-owned */
+		WARN_ON_ONCE(tmp_count);
+		return count - tmp_count;
+	case 96:
+		/* not all buffers processed */
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
+		qperf_inc(q, sqbs_partial);
+		goto again;
+	default:
+		DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
+		DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
+		DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+		q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
+			   q->first_to_kick, count, q->irq_ptr->int_parm);
+		return 0;
+	}
+}
+
+/*
+ * Returns number of examined buffers and their common state in *state.
+ * Requested number of buffers-to-examine must be > 0.
+ */
+static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
+				 unsigned char *state, unsigned int count,
+				 int auto_ack, int merge_pending)
+{
+	unsigned char __state = 0;
+	int i;
+
+	if (is_qebsm(q))
+		return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
+
+	/* get initial state: */
+	__state = q->slsb.val[bufnr];
+	if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
+		__state = SLSB_P_OUTPUT_EMPTY;
+
+	for (i = 1; i < count; i++) {
+		bufnr = next_buf(bufnr);
+
+		/* merge PENDING into EMPTY: */
+		if (merge_pending &&
+		    q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
+		    __state == SLSB_P_OUTPUT_EMPTY)
+			continue;
+
+		/* stop if next state differs from initial state: */
+		if (q->slsb.val[bufnr] != __state)
+			break;
+	}
+	*state = __state;
+	return i;
+}
+
+static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
+				unsigned char *state, int auto_ack)
+{
+	return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
+}
+
+/* wrap-around safe setting of slsb states, returns number of changed buffers */
+static inline int set_buf_states(struct qdio_q *q, int bufnr,
+				 unsigned char state, int count)
+{
+	int i;
+
+	if (is_qebsm(q))
+		return qdio_do_sqbs(q, state, bufnr, count);
+
+	for (i = 0; i < count; i++) {
+		xchg(&q->slsb.val[bufnr], state);
+		bufnr = next_buf(bufnr);
+	}
+	return count;
+}
+
+static inline int set_buf_state(struct qdio_q *q, int bufnr,
+				unsigned char state)
+{
+	return set_buf_states(q, bufnr, state, 1);
+}
+
+/* set slsb states to initial state */
+static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
+{
+	struct qdio_q *q;
+	int i;
+
+	for_each_input_queue(irq_ptr, q, i)
+		set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
+			       QDIO_MAX_BUFFERS_PER_Q);
+	for_each_output_queue(irq_ptr, q, i)
+		set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
+			       QDIO_MAX_BUFFERS_PER_Q);
+}
+
+static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
+			  unsigned int input)
+{
+	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+	unsigned int fc = QDIO_SIGA_SYNC;
+	int cc;
+
+	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
+	qperf_inc(q, siga_sync);
+
+	if (is_qebsm(q)) {
+		schid = q->irq_ptr->sch_token;
+		fc |= QDIO_SIGA_QEBSM_FLAG;
+	}
+
+	cc = do_siga_sync(schid, output, input, fc);
+	if (unlikely(cc))
+		DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
+	return (cc) ? -EIO : 0;
+}
+
+static inline int qdio_siga_sync_q(struct qdio_q *q)
+{
+	if (q->is_input_q)
+		return qdio_siga_sync(q, 0, q->mask);
+	else
+		return qdio_siga_sync(q, q->mask, 0);
+}
+
+static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
+	unsigned long aob)
+{
+	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+	unsigned int fc = QDIO_SIGA_WRITE;
+	u64 start_time = 0;
+	int retries = 0, cc;
+	unsigned long laob = 0;
+
+	WARN_ON_ONCE(aob && ((queue_type(q) != QDIO_IQDIO_QFMT) ||
+			     !q->u.out.use_cq));
+	if (q->u.out.use_cq && aob != 0) {
+		fc = QDIO_SIGA_WRITEQ;
+		laob = aob;
+	}
+
+	if (is_qebsm(q)) {
+		schid = q->irq_ptr->sch_token;
+		fc |= QDIO_SIGA_QEBSM_FLAG;
+	}
+again:
+	cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
+
+	/* hipersocket busy condition */
+	if (unlikely(*busy_bit)) {
+		retries++;
+
+		if (!start_time) {
+			start_time = get_tod_clock_fast();
+			goto again;
+		}
+		if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
+			goto again;
+	}
+	if (retries) {
+		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
+			      "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
+		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
+	}
+	return cc;
+}
+
+static inline int qdio_siga_input(struct qdio_q *q)
+{
+	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+	unsigned int fc = QDIO_SIGA_READ;
+	int cc;
+
+	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
+	qperf_inc(q, siga_read);
+
+	if (is_qebsm(q)) {
+		schid = q->irq_ptr->sch_token;
+		fc |= QDIO_SIGA_QEBSM_FLAG;
+	}
+
+	cc = do_siga_input(schid, q->mask, fc);
+	if (unlikely(cc))
+		DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
+	return (cc) ? -EIO : 0;
+}
+
+#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
+#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
+
+static inline void qdio_sync_queues(struct qdio_q *q)
+{
+	/* PCI capable outbound queues will also be scanned so sync them too */
+	if (pci_out_supported(q))
+		qdio_siga_sync_all(q);
+	else
+		qdio_siga_sync_q(q);
+}
+
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+			unsigned char *state)
+{
+	if (need_siga_sync(q))
+		qdio_siga_sync_q(q);
+	return get_buf_states(q, bufnr, state, 1, 0, 0);
+}
+
+static inline void qdio_stop_polling(struct qdio_q *q)
+{
+	if (!q->u.in.polling)
+		return;
+
+	q->u.in.polling = 0;
+	qperf_inc(q, stop_polling);
+
+	/* show the card that we are not polling anymore */
+	if (is_qebsm(q)) {
+		set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
+			       q->u.in.ack_count);
+		q->u.in.ack_count = 0;
+	} else
+		set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
+}
+
+static inline void account_sbals(struct qdio_q *q, unsigned int count)
+{
+	int pos;
+
+	q->q_stats.nr_sbal_total += count;
+	if (count == QDIO_MAX_BUFFERS_MASK) {
+		q->q_stats.nr_sbals[7]++;
+		return;
+	}
+	pos = ilog2(count);
+	q->q_stats.nr_sbals[pos]++;
+}
+
+static void process_buffer_error(struct qdio_q *q, int count)
+{
+	unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
+					SLSB_P_OUTPUT_NOT_INIT;
+
+	q->qdio_error = QDIO_ERROR_SLSB_STATE;
+
+	/* special handling for no target buffer empty */
+	if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
+	    q->sbal[q->first_to_check]->element[15].sflags == 0x10) {
+		qperf_inc(q, target_full);
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
+			      q->first_to_check);
+		goto set;
+	}
+
+	DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
+	DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
+	DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
+	DBF_ERROR("F14:%2x F15:%2x",
+		  q->sbal[q->first_to_check]->element[14].sflags,
+		  q->sbal[q->first_to_check]->element[15].sflags);
+
+set:
+	/*
+	 * Interrupts may be avoided as long as the error is present
+	 * so change the buffer state immediately to avoid starvation.
+	 */
+	set_buf_states(q, q->first_to_check, state, count);
+}
+
+static inline void inbound_primed(struct qdio_q *q, int count)
+{
+	int new;
+
+	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count);
+
+	/* for QEBSM the ACK was already set by EQBS */
+	if (is_qebsm(q)) {
+		if (!q->u.in.polling) {
+			q->u.in.polling = 1;
+			q->u.in.ack_count = count;
+			q->u.in.ack_start = q->first_to_check;
+			return;
+		}
+
+		/* delete the previous ACK's */
+		set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
+			       q->u.in.ack_count);
+		q->u.in.ack_count = count;
+		q->u.in.ack_start = q->first_to_check;
+		return;
+	}
+
+	/*
+	 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
+	 * or by the next inbound run.
+	 */
+	new = add_buf(q->first_to_check, count - 1);
+	if (q->u.in.polling) {
+		/* reset the previous ACK but first set the new one */
+		set_buf_state(q, new, SLSB_P_INPUT_ACK);
+		set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
+	} else {
+		q->u.in.polling = 1;
+		set_buf_state(q, new, SLSB_P_INPUT_ACK);
+	}
+
+	q->u.in.ack_start = new;
+	count--;
+	if (!count)
+		return;
+	/* need to change ALL buffers to get more interrupts */
+	set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
+}
+
+static int get_inbound_buffer_frontier(struct qdio_q *q)
+{
+	unsigned char state = 0;
+	int count;
+
+	q->timestamp = get_tod_clock_fast();
+
+	/*
+	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
+	 * would return 0.
+	 */
+	count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+	if (!count)
+		goto out;
+
+	/*
+	 * No siga sync here, as a PCI or we after a thin interrupt
+	 * already sync'ed the queues.
+	 */
+	count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
+	if (!count)
+		goto out;
+
+	switch (state) {
+	case SLSB_P_INPUT_PRIMED:
+		inbound_primed(q, count);
+		q->first_to_check = add_buf(q->first_to_check, count);
+		if (atomic_sub_return(count, &q->nr_buf_used) == 0)
+			qperf_inc(q, inbound_queue_full);
+		if (q->irq_ptr->perf_stat_enabled)
+			account_sbals(q, count);
+		break;
+	case SLSB_P_INPUT_ERROR:
+		process_buffer_error(q, count);
+		q->first_to_check = add_buf(q->first_to_check, count);
+		if (atomic_sub_return(count, &q->nr_buf_used) == 0)
+			qperf_inc(q, inbound_queue_full);
+		if (q->irq_ptr->perf_stat_enabled)
+			account_sbals_error(q, count);
+		break;
+	case SLSB_CU_INPUT_EMPTY:
+	case SLSB_P_INPUT_NOT_INIT:
+	case SLSB_P_INPUT_ACK:
+		if (q->irq_ptr->perf_stat_enabled)
+			q->q_stats.nr_sbal_nop++;
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
+			q->nr, q->first_to_check);
+		break;
+	default:
+		WARN_ON_ONCE(1);
+	}
+out:
+	return q->first_to_check;
+}
+
+static int qdio_inbound_q_moved(struct qdio_q *q)
+{
+	int bufnr;
+
+	bufnr = get_inbound_buffer_frontier(q);
+
+	if (bufnr != q->last_move) {
+		q->last_move = bufnr;
+		if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
+			q->u.in.timestamp = get_tod_clock();
+		return 1;
+	} else
+		return 0;
+}
+
+static inline int qdio_inbound_q_done(struct qdio_q *q)
+{
+	unsigned char state = 0;
+
+	if (!atomic_read(&q->nr_buf_used))
+		return 1;
+
+	if (need_siga_sync(q))
+		qdio_siga_sync_q(q);
+	get_buf_state(q, q->first_to_check, &state, 0);
+
+	if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
+		/* more work coming */
+		return 0;
+
+	if (is_thinint_irq(q->irq_ptr))
+		return 1;
+
+	/* don't poll under z/VM */
+	if (MACHINE_IS_VM)
+		return 1;
+
+	/*
+	 * At this point we know, that inbound first_to_check
+	 * has (probably) not moved (see qdio_inbound_processing).
+	 */
+	if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
+			      q->first_to_check);
+		return 1;
+	} else
+		return 0;
+}
+
+static inline int contains_aobs(struct qdio_q *q)
+{
+	return !q->is_input_q && q->u.out.use_cq;
+}
+
+static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
+{
+	unsigned char state = 0;
+	int j, b = start;
+
+	if (!contains_aobs(q))
+		return;
+
+	for (j = 0; j < count; ++j) {
+		get_buf_state(q, b, &state, 0);
+		if (state == SLSB_P_OUTPUT_PENDING) {
+			struct qaob *aob = q->u.out.aobs[b];
+			if (aob == NULL)
+				continue;
+
+			q->u.out.sbal_state[b].flags |=
+				QDIO_OUTBUF_STATE_FLAG_PENDING;
+			q->u.out.aobs[b] = NULL;
+		} else if (state == SLSB_P_OUTPUT_EMPTY) {
+			q->u.out.sbal_state[b].aob = NULL;
+		}
+		b = next_buf(b);
+	}
+}
+
+static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
+					int bufnr)
+{
+	unsigned long phys_aob = 0;
+
+	if (!q->use_cq)
+		return 0;
+
+	if (!q->aobs[bufnr]) {
+		struct qaob *aob = qdio_allocate_aob();
+		q->aobs[bufnr] = aob;
+	}
+	if (q->aobs[bufnr]) {
+		q->sbal_state[bufnr].aob = q->aobs[bufnr];
+		q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
+		phys_aob = virt_to_phys(q->aobs[bufnr]);
+		WARN_ON_ONCE(phys_aob & 0xFF);
+	}
+
+	q->sbal_state[bufnr].flags = 0;
+	return phys_aob;
+}
+
+static void qdio_kick_handler(struct qdio_q *q)
+{
+	int start = q->first_to_kick;
+	int end = q->first_to_check;
+	int count;
+
+	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
+		return;
+
+	count = sub_buf(end, start);
+
+	if (q->is_input_q) {
+		qperf_inc(q, inbound_handler);
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
+	} else {
+		qperf_inc(q, outbound_handler);
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
+			      start, count);
+	}
+
+	qdio_handle_aobs(q, start, count);
+
+	q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
+		   q->irq_ptr->int_parm);
+
+	/* for the next time */
+	q->first_to_kick = end;
+	q->qdio_error = 0;
+}
+
+static inline int qdio_tasklet_schedule(struct qdio_q *q)
+{
+	if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
+		tasklet_schedule(&q->tasklet);
+		return 0;
+	}
+	return -EPERM;
+}
+
+static void __qdio_inbound_processing(struct qdio_q *q)
+{
+	qperf_inc(q, tasklet_inbound);
+
+	if (!qdio_inbound_q_moved(q))
+		return;
+
+	qdio_kick_handler(q);
+
+	if (!qdio_inbound_q_done(q)) {
+		/* means poll time is not yet over */
+		qperf_inc(q, tasklet_inbound_resched);
+		if (!qdio_tasklet_schedule(q))
+			return;
+	}
+
+	qdio_stop_polling(q);
+	/*
+	 * We need to check again to not lose initiative after
+	 * resetting the ACK state.
+	 */
+	if (!qdio_inbound_q_done(q)) {
+		qperf_inc(q, tasklet_inbound_resched2);
+		qdio_tasklet_schedule(q);
+	}
+}
+
+void qdio_inbound_processing(unsigned long data)
+{
+	struct qdio_q *q = (struct qdio_q *)data;
+	__qdio_inbound_processing(q);
+}
+
+static int get_outbound_buffer_frontier(struct qdio_q *q)
+{
+	unsigned char state = 0;
+	int count;
+
+	q->timestamp = get_tod_clock_fast();
+
+	if (need_siga_sync(q))
+		if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
+		    !pci_out_supported(q)) ||
+		    (queue_type(q) == QDIO_IQDIO_QFMT &&
+		    multicast_outbound(q)))
+			qdio_siga_sync_q(q);
+
+	/*
+	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
+	 * would return 0.
+	 */
+	count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+	if (!count)
+		goto out;
+
+	count = get_buf_states(q, q->first_to_check, &state, count, 0,
+			       q->u.out.use_cq);
+	if (!count)
+		goto out;
+
+	switch (state) {
+	case SLSB_P_OUTPUT_EMPTY:
+		/* the adapter got it */
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
+			"out empty:%1d %02x", q->nr, count);
+
+		atomic_sub(count, &q->nr_buf_used);
+		q->first_to_check = add_buf(q->first_to_check, count);
+		if (q->irq_ptr->perf_stat_enabled)
+			account_sbals(q, count);
+
+		break;
+	case SLSB_P_OUTPUT_ERROR:
+		process_buffer_error(q, count);
+		q->first_to_check = add_buf(q->first_to_check, count);
+		atomic_sub(count, &q->nr_buf_used);
+		if (q->irq_ptr->perf_stat_enabled)
+			account_sbals_error(q, count);
+		break;
+	case SLSB_CU_OUTPUT_PRIMED:
+		/* the adapter has not fetched the output yet */
+		if (q->irq_ptr->perf_stat_enabled)
+			q->q_stats.nr_sbal_nop++;
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
+			      q->nr);
+		break;
+	case SLSB_P_OUTPUT_NOT_INIT:
+	case SLSB_P_OUTPUT_HALTED:
+		break;
+	default:
+		WARN_ON_ONCE(1);
+	}
+
+out:
+	return q->first_to_check;
+}
+
+/* all buffers processed? */
+static inline int qdio_outbound_q_done(struct qdio_q *q)
+{
+	return atomic_read(&q->nr_buf_used) == 0;
+}
+
+static inline int qdio_outbound_q_moved(struct qdio_q *q)
+{
+	int bufnr;
+
+	bufnr = get_outbound_buffer_frontier(q);
+
+	if (bufnr != q->last_move) {
+		q->last_move = bufnr;
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
+		return 1;
+	} else
+		return 0;
+}
+
+static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
+{
+	int retries = 0, cc;
+	unsigned int busy_bit;
+
+	if (!need_siga_out(q))
+		return 0;
+
+	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
+retry:
+	qperf_inc(q, siga_write);
+
+	cc = qdio_siga_output(q, &busy_bit, aob);
+	switch (cc) {
+	case 0:
+		break;
+	case 2:
+		if (busy_bit) {
+			while (++retries < QDIO_BUSY_BIT_RETRIES) {
+				mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
+				goto retry;
+			}
+			DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
+			cc = -EBUSY;
+		} else {
+			DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
+			cc = -ENOBUFS;
+		}
+		break;
+	case 1:
+	case 3:
+		DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
+		cc = -EIO;
+		break;
+	}
+	if (retries) {
+		DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
+		DBF_ERROR("count:%u", retries);
+	}
+	return cc;
+}
+
+static void __qdio_outbound_processing(struct qdio_q *q)
+{
+	qperf_inc(q, tasklet_outbound);
+	WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
+
+	if (qdio_outbound_q_moved(q))
+		qdio_kick_handler(q);
+
+	if (queue_type(q) == QDIO_ZFCP_QFMT)
+		if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
+			goto sched;
+
+	if (q->u.out.pci_out_enabled)
+		return;
+
+	/*
+	 * Now we know that queue type is either qeth without pci enabled
+	 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
+	 * is noticed and outbound_handler is called after some time.
+	 */
+	if (qdio_outbound_q_done(q))
+		del_timer_sync(&q->u.out.timer);
+	else
+		if (!timer_pending(&q->u.out.timer) &&
+		    likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
+			mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
+	return;
+
+sched:
+	qdio_tasklet_schedule(q);
+}
+
+/* outbound tasklet */
+void qdio_outbound_processing(unsigned long data)
+{
+	struct qdio_q *q = (struct qdio_q *)data;
+	__qdio_outbound_processing(q);
+}
+
+void qdio_outbound_timer(struct timer_list *t)
+{
+	struct qdio_q *q = from_timer(q, t, u.out.timer);
+
+	qdio_tasklet_schedule(q);
+}
+
+static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
+{
+	struct qdio_q *out;
+	int i;
+
+	if (!pci_out_supported(q))
+		return;
+
+	for_each_output_queue(q->irq_ptr, out, i)
+		if (!qdio_outbound_q_done(out))
+			qdio_tasklet_schedule(out);
+}
+
+static void __tiqdio_inbound_processing(struct qdio_q *q)
+{
+	qperf_inc(q, tasklet_inbound);
+	if (need_siga_sync(q) && need_siga_sync_after_ai(q))
+		qdio_sync_queues(q);
+
+	/*
+	 * The interrupt could be caused by a PCI request. Check the
+	 * PCI capable outbound queues.
+	 */
+	qdio_check_outbound_after_thinint(q);
+
+	if (!qdio_inbound_q_moved(q))
+		return;
+
+	qdio_kick_handler(q);
+
+	if (!qdio_inbound_q_done(q)) {
+		qperf_inc(q, tasklet_inbound_resched);
+		if (!qdio_tasklet_schedule(q))
+			return;
+	}
+
+	qdio_stop_polling(q);
+	/*
+	 * We need to check again to not lose initiative after
+	 * resetting the ACK state.
+	 */
+	if (!qdio_inbound_q_done(q)) {
+		qperf_inc(q, tasklet_inbound_resched2);
+		qdio_tasklet_schedule(q);
+	}
+}
+
+void tiqdio_inbound_processing(unsigned long data)
+{
+	struct qdio_q *q = (struct qdio_q *)data;
+	__tiqdio_inbound_processing(q);
+}
+
+static inline void qdio_set_state(struct qdio_irq *irq_ptr,
+				  enum qdio_irq_states state)
+{
+	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
+
+	irq_ptr->state = state;
+	mb();
+}
+
+static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
+{
+	if (irb->esw.esw0.erw.cons) {
+		DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
+		DBF_ERROR_HEX(irb, 64);
+		DBF_ERROR_HEX(irb->ecw, 64);
+	}
+}
+
+/* PCI interrupt handler */
+static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
+{
+	int i;
+	struct qdio_q *q;
+
+	if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
+		return;
+
+	for_each_input_queue(irq_ptr, q, i) {
+		if (q->u.in.queue_start_poll) {
+			/* skip if polling is enabled or already in work */
+			if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+				     &q->u.in.queue_irq_state)) {
+				qperf_inc(q, int_discarded);
+				continue;
+			}
+			q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
+						 q->irq_ptr->int_parm);
+		} else {
+			tasklet_schedule(&q->tasklet);
+		}
+	}
+
+	if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
+		return;
+
+	for_each_output_queue(irq_ptr, q, i) {
+		if (qdio_outbound_q_done(q))
+			continue;
+		if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
+			qdio_siga_sync_q(q);
+		qdio_tasklet_schedule(q);
+	}
+}
+
+static void qdio_handle_activate_check(struct ccw_device *cdev,
+				unsigned long intparm, int cstat, int dstat)
+{
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+	struct qdio_q *q;
+	int count;
+
+	DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
+	DBF_ERROR("intp :%lx", intparm);
+	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
+
+	if (irq_ptr->nr_input_qs) {
+		q = irq_ptr->input_qs[0];
+	} else if (irq_ptr->nr_output_qs) {
+		q = irq_ptr->output_qs[0];
+	} else {
+		dump_stack();
+		goto no_handler;
+	}
+
+	count = sub_buf(q->first_to_check, q->first_to_kick);
+	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
+		   q->nr, q->first_to_kick, count, irq_ptr->int_parm);
+no_handler:
+	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
+	/*
+	 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
+	 * Therefore we call the LGR detection function here.
+	 */
+	lgr_info_log();
+}
+
+static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
+				      int dstat)
+{
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
+
+	if (cstat)
+		goto error;
+	if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
+		goto error;
+	if (!(dstat & DEV_STAT_DEV_END))
+		goto error;
+	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
+	return;
+
+error:
+	DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
+	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
+	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+}
+
+/* qdio interrupt handler */
+void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
+		      struct irb *irb)
+{
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+	struct subchannel_id schid;
+	int cstat, dstat;
+
+	if (!intparm || !irq_ptr) {
+		ccw_device_get_schid(cdev, &schid);
+		DBF_ERROR("qint:%4x", schid.sch_no);
+		return;
+	}
+
+	if (irq_ptr->perf_stat_enabled)
+		irq_ptr->perf_stat.qdio_int++;
+
+	if (IS_ERR(irb)) {
+		DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
+		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+		wake_up(&cdev->private->wait_q);
+		return;
+	}
+	qdio_irq_check_sense(irq_ptr, irb);
+	cstat = irb->scsw.cmd.cstat;
+	dstat = irb->scsw.cmd.dstat;
+
+	switch (irq_ptr->state) {
+	case QDIO_IRQ_STATE_INACTIVE:
+		qdio_establish_handle_irq(cdev, cstat, dstat);
+		break;
+	case QDIO_IRQ_STATE_CLEANUP:
+		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+		break;
+	case QDIO_IRQ_STATE_ESTABLISHED:
+	case QDIO_IRQ_STATE_ACTIVE:
+		if (cstat & SCHN_STAT_PCI) {
+			qdio_int_handler_pci(irq_ptr);
+			return;
+		}
+		if (cstat || dstat)
+			qdio_handle_activate_check(cdev, intparm, cstat,
+						   dstat);
+		break;
+	case QDIO_IRQ_STATE_STOPPED:
+		break;
+	default:
+		WARN_ON_ONCE(1);
+	}
+	wake_up(&cdev->private->wait_q);
+}
+
+/**
+ * qdio_get_ssqd_desc - get qdio subchannel description
+ * @cdev: ccw device to get description for
+ * @data: where to store the ssqd
+ *
+ * Returns 0 or an error code. The results of the chsc are stored in the
+ * specified structure.
+ */
+int qdio_get_ssqd_desc(struct ccw_device *cdev,
+		       struct qdio_ssqd_desc *data)
+{
+	struct subchannel_id schid;
+
+	if (!cdev || !cdev->private)
+		return -EINVAL;
+
+	ccw_device_get_schid(cdev, &schid);
+	DBF_EVENT("get ssqd:%4x", schid.sch_no);
+	return qdio_setup_get_ssqd(NULL, &schid, data);
+}
+EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
+
+static void qdio_shutdown_queues(struct ccw_device *cdev)
+{
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+	struct qdio_q *q;
+	int i;
+
+	for_each_input_queue(irq_ptr, q, i)
+		tasklet_kill(&q->tasklet);
+
+	for_each_output_queue(irq_ptr, q, i) {
+		del_timer_sync(&q->u.out.timer);
+		tasklet_kill(&q->tasklet);
+	}
+}
+
+/**
+ * qdio_shutdown - shut down a qdio subchannel
+ * @cdev: associated ccw device
+ * @how: use halt or clear to shutdown
+ */
+int qdio_shutdown(struct ccw_device *cdev, int how)
+{
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+	struct subchannel_id schid;
+	int rc;
+
+	if (!irq_ptr)
+		return -ENODEV;
+
+	WARN_ON_ONCE(irqs_disabled());
+	ccw_device_get_schid(cdev, &schid);
+	DBF_EVENT("qshutdown:%4x", schid.sch_no);
+
+	mutex_lock(&irq_ptr->setup_mutex);
+	/*
+	 * Subchannel was already shot down. We cannot prevent being called
+	 * twice since cio may trigger a shutdown asynchronously.
+	 */
+	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
+		mutex_unlock(&irq_ptr->setup_mutex);
+		return 0;
+	}
+
+	/*
+	 * Indicate that the device is going down. Scheduling the queue
+	 * tasklets is forbidden from here on.
+	 */
+	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
+
+	tiqdio_remove_input_queues(irq_ptr);
+	qdio_shutdown_queues(cdev);
+	qdio_shutdown_debug_entries(irq_ptr);
+
+	/* cleanup subchannel */
+	spin_lock_irq(get_ccwdev_lock(cdev));
+
+	if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
+		rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
+	else
+		/* default behaviour is halt */
+		rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
+	if (rc) {
+		DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
+		DBF_ERROR("rc:%4d", rc);
+		goto no_cleanup;
+	}
+
+	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+	wait_event_interruptible_timeout(cdev->private->wait_q,
+		irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
+		irq_ptr->state == QDIO_IRQ_STATE_ERR,
+		10 * HZ);
+	spin_lock_irq(get_ccwdev_lock(cdev));
+
+no_cleanup:
+	qdio_shutdown_thinint(irq_ptr);
+
+	/* restore interrupt handler */
+	if ((void *)cdev->handler == (void *)qdio_int_handler) {
+		cdev->handler = irq_ptr->orig_handler;
+		cdev->private->intparm = 0;
+	}
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+
+	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+	mutex_unlock(&irq_ptr->setup_mutex);
+	if (rc)
+		return rc;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_shutdown);
+
+/**
+ * qdio_free - free data structures for a qdio subchannel
+ * @cdev: associated ccw device
+ */
+int qdio_free(struct ccw_device *cdev)
+{
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+	struct subchannel_id schid;
+
+	if (!irq_ptr)
+		return -ENODEV;
+
+	ccw_device_get_schid(cdev, &schid);
+	DBF_EVENT("qfree:%4x", schid.sch_no);
+	DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
+	mutex_lock(&irq_ptr->setup_mutex);
+
+	irq_ptr->debug_area = NULL;
+	cdev->private->qdio_data = NULL;
+	mutex_unlock(&irq_ptr->setup_mutex);
+
+	qdio_release_memory(irq_ptr);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_free);
+
+/**
+ * qdio_allocate - allocate qdio queues and associated data
+ * @init_data: initialization data
+ */
+int qdio_allocate(struct qdio_initialize *init_data)
+{
+	struct subchannel_id schid;
+	struct qdio_irq *irq_ptr;
+
+	ccw_device_get_schid(init_data->cdev, &schid);
+	DBF_EVENT("qallocate:%4x", schid.sch_no);
+
+	if ((init_data->no_input_qs && !init_data->input_handler) ||
+	    (init_data->no_output_qs && !init_data->output_handler))
+		return -EINVAL;
+
+	if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
+	    (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
+		return -EINVAL;
+
+	if ((!init_data->input_sbal_addr_array) ||
+	    (!init_data->output_sbal_addr_array))
+		return -EINVAL;
+
+	/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
+	irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!irq_ptr)
+		goto out_err;
+
+	mutex_init(&irq_ptr->setup_mutex);
+	if (qdio_allocate_dbf(init_data, irq_ptr))
+		goto out_rel;
+
+	/*
+	 * Allocate a page for the chsc calls in qdio_establish.
+	 * Must be pre-allocated since a zfcp recovery will call
+	 * qdio_establish. In case of low memory and swap on a zfcp disk
+	 * we may not be able to allocate memory otherwise.
+	 */
+	irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
+	if (!irq_ptr->chsc_page)
+		goto out_rel;
+
+	/* qdr is used in ccw1.cda which is u32 */
+	irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!irq_ptr->qdr)
+		goto out_rel;
+
+	if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
+			     init_data->no_output_qs))
+		goto out_rel;
+
+	init_data->cdev->private->qdio_data = irq_ptr;
+	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+	return 0;
+out_rel:
+	qdio_release_memory(irq_ptr);
+out_err:
+	return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(qdio_allocate);
+
+static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
+{
+	struct qdio_q *q = irq_ptr->input_qs[0];
+	int i, use_cq = 0;
+
+	if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
+		use_cq = 1;
+
+	for_each_output_queue(irq_ptr, q, i) {
+		if (use_cq) {
+			if (qdio_enable_async_operation(&q->u.out) < 0) {
+				use_cq = 0;
+				continue;
+			}
+		} else
+			qdio_disable_async_operation(&q->u.out);
+	}
+	DBF_EVENT("use_cq:%d", use_cq);
+}
+
+/**
+ * qdio_establish - establish queues on a qdio subchannel
+ * @init_data: initialization data
+ */
+int qdio_establish(struct qdio_initialize *init_data)
+{
+	struct ccw_device *cdev = init_data->cdev;
+	struct subchannel_id schid;
+	struct qdio_irq *irq_ptr;
+	int rc;
+
+	ccw_device_get_schid(cdev, &schid);
+	DBF_EVENT("qestablish:%4x", schid.sch_no);
+
+	irq_ptr = cdev->private->qdio_data;
+	if (!irq_ptr)
+		return -ENODEV;
+
+	mutex_lock(&irq_ptr->setup_mutex);
+	qdio_setup_irq(init_data);
+
+	rc = qdio_establish_thinint(irq_ptr);
+	if (rc) {
+		mutex_unlock(&irq_ptr->setup_mutex);
+		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+		return rc;
+	}
+
+	/* establish q */
+	irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
+	irq_ptr->ccw.flags = CCW_FLAG_SLI;
+	irq_ptr->ccw.count = irq_ptr->equeue.count;
+	irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
+
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	ccw_device_set_options_mask(cdev, 0);
+
+	rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+	if (rc) {
+		DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
+		DBF_ERROR("rc:%4x", rc);
+		mutex_unlock(&irq_ptr->setup_mutex);
+		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+		return rc;
+	}
+
+	wait_event_interruptible_timeout(cdev->private->wait_q,
+		irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
+		irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+
+	if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
+		mutex_unlock(&irq_ptr->setup_mutex);
+		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+		return -EIO;
+	}
+
+	qdio_setup_ssqd_info(irq_ptr);
+
+	qdio_detect_hsicq(irq_ptr);
+
+	/* qebsm is now setup if available, initialize buffer states */
+	qdio_init_buf_states(irq_ptr);
+
+	mutex_unlock(&irq_ptr->setup_mutex);
+	qdio_print_subchannel_info(irq_ptr, cdev);
+	qdio_setup_debug_entries(irq_ptr, cdev);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_establish);
+
+/**
+ * qdio_activate - activate queues on a qdio subchannel
+ * @cdev: associated cdev
+ */
+int qdio_activate(struct ccw_device *cdev)
+{
+	struct subchannel_id schid;
+	struct qdio_irq *irq_ptr;
+	int rc;
+
+	ccw_device_get_schid(cdev, &schid);
+	DBF_EVENT("qactivate:%4x", schid.sch_no);
+
+	irq_ptr = cdev->private->qdio_data;
+	if (!irq_ptr)
+		return -ENODEV;
+
+	mutex_lock(&irq_ptr->setup_mutex);
+	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
+		rc = -EBUSY;
+		goto out;
+	}
+
+	irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
+	irq_ptr->ccw.flags = CCW_FLAG_SLI;
+	irq_ptr->ccw.count = irq_ptr->aqueue.count;
+	irq_ptr->ccw.cda = 0;
+
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
+
+	rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
+			      0, DOIO_DENY_PREFETCH);
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+	if (rc) {
+		DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
+		DBF_ERROR("rc:%4x", rc);
+		goto out;
+	}
+
+	if (is_thinint_irq(irq_ptr))
+		tiqdio_add_input_queues(irq_ptr);
+
+	/* wait for subchannel to become active */
+	msleep(5);
+
+	switch (irq_ptr->state) {
+	case QDIO_IRQ_STATE_STOPPED:
+	case QDIO_IRQ_STATE_ERR:
+		rc = -EIO;
+		break;
+	default:
+		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
+		rc = 0;
+	}
+out:
+	mutex_unlock(&irq_ptr->setup_mutex);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qdio_activate);
+
+static inline int buf_in_between(int bufnr, int start, int count)
+{
+	int end = add_buf(start, count);
+
+	if (end > start) {
+		if (bufnr >= start && bufnr < end)
+			return 1;
+		else
+			return 0;
+	}
+
+	/* wrap-around case */
+	if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
+	    (bufnr < end))
+		return 1;
+	else
+		return 0;
+}
+
+/**
+ * handle_inbound - reset processed input buffers
+ * @q: queue containing the buffers
+ * @callflags: flags
+ * @bufnr: first buffer to process
+ * @count: how many buffers are emptied
+ */
+static int handle_inbound(struct qdio_q *q, unsigned int callflags,
+			  int bufnr, int count)
+{
+	int diff;
+
+	qperf_inc(q, inbound_call);
+
+	if (!q->u.in.polling)
+		goto set;
+
+	/* protect against stop polling setting an ACK for an emptied slsb */
+	if (count == QDIO_MAX_BUFFERS_PER_Q) {
+		/* overwriting everything, just delete polling status */
+		q->u.in.polling = 0;
+		q->u.in.ack_count = 0;
+		goto set;
+	} else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
+		if (is_qebsm(q)) {
+			/* partial overwrite, just update ack_start */
+			diff = add_buf(bufnr, count);
+			diff = sub_buf(diff, q->u.in.ack_start);
+			q->u.in.ack_count -= diff;
+			if (q->u.in.ack_count <= 0) {
+				q->u.in.polling = 0;
+				q->u.in.ack_count = 0;
+				goto set;
+			}
+			q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
+		}
+		else
+			/* the only ACK will be deleted, so stop polling */
+			q->u.in.polling = 0;
+	}
+
+set:
+	count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
+	atomic_add(count, &q->nr_buf_used);
+
+	if (need_siga_in(q))
+		return qdio_siga_input(q);
+
+	return 0;
+}
+
+/**
+ * handle_outbound - process filled outbound buffers
+ * @q: queue containing the buffers
+ * @callflags: flags
+ * @bufnr: first buffer to process
+ * @count: how many buffers are filled
+ */
+static int handle_outbound(struct qdio_q *q, unsigned int callflags,
+			   int bufnr, int count)
+{
+	unsigned char state = 0;
+	int used, rc = 0;
+
+	qperf_inc(q, outbound_call);
+
+	count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
+	used = atomic_add_return(count, &q->nr_buf_used);
+
+	if (used == QDIO_MAX_BUFFERS_PER_Q)
+		qperf_inc(q, outbound_queue_full);
+
+	if (callflags & QDIO_FLAG_PCI_OUT) {
+		q->u.out.pci_out_enabled = 1;
+		qperf_inc(q, pci_request_int);
+	} else
+		q->u.out.pci_out_enabled = 0;
+
+	if (queue_type(q) == QDIO_IQDIO_QFMT) {
+		unsigned long phys_aob = 0;
+
+		/* One SIGA-W per buffer required for unicast HSI */
+		WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
+
+		phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
+
+		rc = qdio_kick_outbound_q(q, phys_aob);
+	} else if (need_siga_sync(q)) {
+		rc = qdio_siga_sync_q(q);
+	} else {
+		/* try to fast requeue buffers */
+		get_buf_state(q, prev_buf(bufnr), &state, 0);
+		if (state != SLSB_CU_OUTPUT_PRIMED)
+			rc = qdio_kick_outbound_q(q, 0);
+		else
+			qperf_inc(q, fast_requeue);
+	}
+
+	/* in case of SIGA errors we must process the error immediately */
+	if (used >= q->u.out.scan_threshold || rc)
+		qdio_tasklet_schedule(q);
+	else
+		/* free the SBALs in case of no further traffic */
+		if (!timer_pending(&q->u.out.timer) &&
+		    likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
+			mod_timer(&q->u.out.timer, jiffies + HZ);
+	return rc;
+}
+
+/**
+ * do_QDIO - process input or output buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @callflags: input or output and special flags from the program
+ * @q_nr: queue number
+ * @bufnr: buffer number
+ * @count: how many buffers to process
+ */
+int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
+	    int q_nr, unsigned int bufnr, unsigned int count)
+{
+	struct qdio_irq *irq_ptr;
+
+	if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
+		return -EINVAL;
+
+	irq_ptr = cdev->private->qdio_data;
+	if (!irq_ptr)
+		return -ENODEV;
+
+	DBF_DEV_EVENT(DBF_INFO, irq_ptr,
+		      "do%02x b:%02x c:%02x", callflags, bufnr, count);
+
+	if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
+		return -EIO;
+	if (!count)
+		return 0;
+	if (callflags & QDIO_FLAG_SYNC_INPUT)
+		return handle_inbound(irq_ptr->input_qs[q_nr],
+				      callflags, bufnr, count);
+	else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
+		return handle_outbound(irq_ptr->output_qs[q_nr],
+				       callflags, bufnr, count);
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(do_QDIO);
+
+/**
+ * qdio_start_irq - process input buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ *
+ * Return codes
+ *   0 - success
+ *   1 - irqs not started since new data is available
+ */
+int qdio_start_irq(struct ccw_device *cdev, int nr)
+{
+	struct qdio_q *q;
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+	if (!irq_ptr)
+		return -ENODEV;
+	q = irq_ptr->input_qs[nr];
+
+	clear_nonshared_ind(irq_ptr);
+	qdio_stop_polling(q);
+	clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
+
+	/*
+	 * We need to check again to not lose initiative after
+	 * resetting the ACK state.
+	 */
+	if (test_nonshared_ind(irq_ptr))
+		goto rescan;
+	if (!qdio_inbound_q_done(q))
+		goto rescan;
+	return 0;
+
+rescan:
+	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+			     &q->u.in.queue_irq_state))
+		return 0;
+	else
+		return 1;
+
+}
+EXPORT_SYMBOL(qdio_start_irq);
+
+/**
+ * qdio_get_next_buffers - process input buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ * @bufnr: first filled buffer number
+ * @error: buffers are in error state
+ *
+ * Return codes
+ *   < 0 - error
+ *   = 0 - no new buffers found
+ *   > 0 - number of processed buffers
+ */
+int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
+			  int *error)
+{
+	struct qdio_q *q;
+	int start, end;
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+	if (!irq_ptr)
+		return -ENODEV;
+	q = irq_ptr->input_qs[nr];
+
+	/*
+	 * Cannot rely on automatic sync after interrupt since queues may
+	 * also be examined without interrupt.
+	 */
+	if (need_siga_sync(q))
+		qdio_sync_queues(q);
+
+	/* check the PCI capable outbound queues. */
+	qdio_check_outbound_after_thinint(q);
+
+	if (!qdio_inbound_q_moved(q))
+		return 0;
+
+	/* Note: upper-layer MUST stop processing immediately here ... */
+	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
+		return -EIO;
+
+	start = q->first_to_kick;
+	end = q->first_to_check;
+	*bufnr = start;
+	*error = q->qdio_error;
+
+	/* for the next time */
+	q->first_to_kick = end;
+	q->qdio_error = 0;
+	return sub_buf(end, start);
+}
+EXPORT_SYMBOL(qdio_get_next_buffers);
+
+/**
+ * qdio_stop_irq - disable interrupt processing for the device
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ *
+ * Return codes
+ *   0 - interrupts were already disabled
+ *   1 - interrupts successfully disabled
+ */
+int qdio_stop_irq(struct ccw_device *cdev, int nr)
+{
+	struct qdio_q *q;
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+	if (!irq_ptr)
+		return -ENODEV;
+	q = irq_ptr->input_qs[nr];
+
+	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+			     &q->u.in.queue_irq_state))
+		return 0;
+	else
+		return 1;
+}
+EXPORT_SYMBOL(qdio_stop_irq);
+
+/**
+ * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
+ * @schid:		Subchannel ID.
+ * @cnc:		Boolean Change-Notification Control
+ * @response:		Response code will be stored at this address
+ * @cb: 		Callback function will be executed for each element
+ *			of the address list
+ * @priv:		Pointer to pass to the callback function.
+ *
+ * Performs "Store-network-bridging-information list" operation and calls
+ * the callback function for every entry in the list. If "change-
+ * notification-control" is set, further changes in the address list
+ * will be reported via the IPA command.
+ */
+int qdio_pnso_brinfo(struct subchannel_id schid,
+		int cnc, u16 *response,
+		void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
+				void *entry),
+		void *priv)
+{
+	struct chsc_pnso_area *rr;
+	int rc;
+	u32 prev_instance = 0;
+	int isfirstblock = 1;
+	int i, size, elems;
+
+	rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
+	if (rr == NULL)
+		return -ENOMEM;
+	do {
+		/* on the first iteration, naihdr.resume_token will be zero */
+		rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
+		if (rc != 0 && rc != -EBUSY)
+			goto out;
+		if (rr->response.code != 1) {
+			rc = -EIO;
+			continue;
+		} else
+			rc = 0;
+
+		if (cb == NULL)
+			continue;
+
+		size = rr->naihdr.naids;
+		elems = (rr->response.length -
+				sizeof(struct chsc_header) -
+				sizeof(struct chsc_brinfo_naihdr)) /
+				size;
+
+		if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
+			/* Inform the caller that they need to scrap */
+			/* the data that was already reported via cb */
+				rc = -EAGAIN;
+				break;
+		}
+		isfirstblock = 0;
+		prev_instance = rr->naihdr.instance;
+		for (i = 0; i < elems; i++)
+			switch (size) {
+			case sizeof(struct qdio_brinfo_entry_l3_ipv6):
+				(*cb)(priv, l3_ipv6_addr,
+						&rr->entries.l3_ipv6[i]);
+				break;
+			case sizeof(struct qdio_brinfo_entry_l3_ipv4):
+				(*cb)(priv, l3_ipv4_addr,
+						&rr->entries.l3_ipv4[i]);
+				break;
+			case sizeof(struct qdio_brinfo_entry_l2):
+				(*cb)(priv, l2_addr_lnid,
+						&rr->entries.l2[i]);
+				break;
+			default:
+				WARN_ON_ONCE(1);
+				rc = -EIO;
+				goto out;
+			}
+	} while (rr->response.code == 0x0107 ||  /* channel busy */
+		  (rr->response.code == 1 && /* list stored */
+		   /* resume token is non-zero => list incomplete */
+		   (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
+	(*response) = rr->response.code;
+
+out:
+	free_page((unsigned long)rr);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
+
+static int __init init_QDIO(void)
+{
+	int rc;
+
+	rc = qdio_debug_init();
+	if (rc)
+		return rc;
+	rc = qdio_setup_init();
+	if (rc)
+		goto out_debug;
+	rc = tiqdio_allocate_memory();
+	if (rc)
+		goto out_cache;
+	rc = tiqdio_register_thinints();
+	if (rc)
+		goto out_ti;
+	return 0;
+
+out_ti:
+	tiqdio_free_memory();
+out_cache:
+	qdio_setup_exit();
+out_debug:
+	qdio_debug_exit();
+	return rc;
+}
+
+static void __exit exit_QDIO(void)
+{
+	tiqdio_unregister_thinints();
+	tiqdio_free_memory();
+	qdio_setup_exit();
+	qdio_debug_exit();
+}
+
+module_init(init_QDIO);
+module_exit(exit_QDIO);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
new file mode 100644
index 0000000..78f1be4
--- /dev/null
+++ b/drivers/s390/cio/qdio_setup.c
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * qdio queue initialization
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <asm/qdio.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "chsc.h"
+#include "qdio.h"
+#include "qdio_debug.h"
+
+#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
+
+static struct kmem_cache *qdio_q_cache;
+static struct kmem_cache *qdio_aob_cache;
+
+struct qaob *qdio_allocate_aob(void)
+{
+	return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(qdio_allocate_aob);
+
+void qdio_release_aob(struct qaob *aob)
+{
+	kmem_cache_free(qdio_aob_cache, aob);
+}
+EXPORT_SYMBOL_GPL(qdio_release_aob);
+
+/**
+ * qdio_free_buffers() - free qdio buffers
+ * @buf: array of pointers to qdio buffers
+ * @count: number of qdio buffers to free
+ */
+void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count)
+{
+	int pos;
+
+	for (pos = 0; pos < count; pos += QBUFF_PER_PAGE)
+		free_page((unsigned long) buf[pos]);
+}
+EXPORT_SYMBOL_GPL(qdio_free_buffers);
+
+/**
+ * qdio_alloc_buffers() - allocate qdio buffers
+ * @buf: array of pointers to qdio buffers
+ * @count: number of qdio buffers to allocate
+ */
+int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count)
+{
+	int pos;
+
+	for (pos = 0; pos < count; pos += QBUFF_PER_PAGE) {
+		buf[pos] = (void *) get_zeroed_page(GFP_KERNEL);
+		if (!buf[pos]) {
+			qdio_free_buffers(buf, count);
+			return -ENOMEM;
+		}
+	}
+	for (pos = 0; pos < count; pos++)
+		if (pos % QBUFF_PER_PAGE)
+			buf[pos] = buf[pos - 1] + 1;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_alloc_buffers);
+
+/**
+ * qdio_reset_buffers() - reset qdio buffers
+ * @buf: array of pointers to qdio buffers
+ * @count: number of qdio buffers that will be zeroed
+ */
+void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count)
+{
+	int pos;
+
+	for (pos = 0; pos < count; pos++)
+		memset(buf[pos], 0, sizeof(struct qdio_buffer));
+}
+EXPORT_SYMBOL_GPL(qdio_reset_buffers);
+
+/*
+ * qebsm is only available under 64bit but the adapter sets the feature
+ * flag anyway, so we manually override it.
+ */
+static inline int qebsm_possible(void)
+{
+	return css_general_characteristics.qebsm;
+}
+
+/*
+ * qib_param_field: pointer to 128 bytes or NULL, if no param field
+ * nr_input_qs: pointer to nr_queues*128 words of data or NULL
+ */
+static void set_impl_params(struct qdio_irq *irq_ptr,
+			    unsigned int qib_param_field_format,
+			    unsigned char *qib_param_field,
+			    unsigned long *input_slib_elements,
+			    unsigned long *output_slib_elements)
+{
+	struct qdio_q *q;
+	int i, j;
+
+	if (!irq_ptr)
+		return;
+
+	irq_ptr->qib.pfmt = qib_param_field_format;
+	if (qib_param_field)
+		memcpy(irq_ptr->qib.parm, qib_param_field,
+		       QDIO_MAX_BUFFERS_PER_Q);
+
+	if (!input_slib_elements)
+		goto output;
+
+	for_each_input_queue(irq_ptr, q, i) {
+		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+			q->slib->slibe[j].parms =
+				input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
+	}
+output:
+	if (!output_slib_elements)
+		return;
+
+	for_each_output_queue(irq_ptr, q, i) {
+		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+			q->slib->slibe[j].parms =
+				output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
+	}
+}
+
+static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
+{
+	struct qdio_q *q;
+	int i;
+
+	for (i = 0; i < nr_queues; i++) {
+		q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
+		if (!q)
+			return -ENOMEM;
+
+		q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
+		if (!q->slib) {
+			kmem_cache_free(qdio_q_cache, q);
+			return -ENOMEM;
+		}
+		irq_ptr_qs[i] = q;
+	}
+	return 0;
+}
+
+int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs)
+{
+	int rc;
+
+	rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
+	if (rc)
+		return rc;
+	rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
+	return rc;
+}
+
+static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
+			      qdio_handler_t *handler, int i)
+{
+	struct slib *slib = q->slib;
+
+	/* queue must be cleared for qdio_establish */
+	memset(q, 0, sizeof(*q));
+	memset(slib, 0, PAGE_SIZE);
+	q->slib = slib;
+	q->irq_ptr = irq_ptr;
+	q->mask = 1 << (31 - i);
+	q->nr = i;
+	q->handler = handler;
+}
+
+static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
+				void **sbals_array, int i)
+{
+	struct qdio_q *prev;
+	int j;
+
+	DBF_HEX(&q, sizeof(void *));
+	q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
+
+	/* fill in sbal */
+	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+		q->sbal[j] = *sbals_array++;
+
+	/* fill in slib */
+	if (i > 0) {
+		prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
+			: irq_ptr->output_qs[i - 1];
+		prev->slib->nsliba = (unsigned long)q->slib;
+	}
+
+	q->slib->sla = (unsigned long)q->sl;
+	q->slib->slsba = (unsigned long)&q->slsb.val[0];
+
+	/* fill in sl */
+	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+		q->sl->element[j].sbal = (unsigned long)q->sbal[j];
+}
+
+static void setup_queues(struct qdio_irq *irq_ptr,
+			 struct qdio_initialize *qdio_init)
+{
+	struct qdio_q *q;
+	void **input_sbal_array = qdio_init->input_sbal_addr_array;
+	void **output_sbal_array = qdio_init->output_sbal_addr_array;
+	struct qdio_outbuf_state *output_sbal_state_array =
+				  qdio_init->output_sbal_state_array;
+	int i;
+
+	for_each_input_queue(irq_ptr, q, i) {
+		DBF_EVENT("inq:%1d", i);
+		setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
+
+		q->is_input_q = 1;
+		q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ?
+				qdio_init->queue_start_poll_array[i] : NULL;
+
+		setup_storage_lists(q, irq_ptr, input_sbal_array, i);
+		input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
+
+		if (is_thinint_irq(irq_ptr)) {
+			tasklet_init(&q->tasklet, tiqdio_inbound_processing,
+				     (unsigned long) q);
+		} else {
+			tasklet_init(&q->tasklet, qdio_inbound_processing,
+				     (unsigned long) q);
+		}
+	}
+
+	for_each_output_queue(irq_ptr, q, i) {
+		DBF_EVENT("outq:%1d", i);
+		setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
+
+		q->u.out.sbal_state = output_sbal_state_array;
+		output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
+
+		q->is_input_q = 0;
+		q->u.out.scan_threshold = qdio_init->scan_threshold;
+		setup_storage_lists(q, irq_ptr, output_sbal_array, i);
+		output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
+
+		tasklet_init(&q->tasklet, qdio_outbound_processing,
+			     (unsigned long) q);
+		timer_setup(&q->u.out.timer, qdio_outbound_timer, 0);
+	}
+}
+
+static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
+{
+	if (qdioac & AC1_SIGA_INPUT_NEEDED)
+		irq_ptr->siga_flag.input = 1;
+	if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
+		irq_ptr->siga_flag.output = 1;
+	if (qdioac & AC1_SIGA_SYNC_NEEDED)
+		irq_ptr->siga_flag.sync = 1;
+	if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
+		irq_ptr->siga_flag.sync_after_ai = 1;
+	if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
+		irq_ptr->siga_flag.sync_out_after_pci = 1;
+}
+
+static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
+				  unsigned char qdioac, unsigned long token)
+{
+	if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
+		goto no_qebsm;
+	if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
+	    (!(qdioac & AC1_SC_QEBSM_ENABLED)))
+		goto no_qebsm;
+
+	irq_ptr->sch_token = token;
+
+	DBF_EVENT("V=V:1");
+	DBF_EVENT("%8lx", irq_ptr->sch_token);
+	return;
+
+no_qebsm:
+	irq_ptr->sch_token = 0;
+	irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
+	DBF_EVENT("noV=V");
+}
+
+/*
+ * If there is a qdio_irq we use the chsc_page and store the information
+ * in the qdio_irq, otherwise we copy it to the specified structure.
+ */
+int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
+			struct subchannel_id *schid,
+			struct qdio_ssqd_desc *data)
+{
+	struct chsc_ssqd_area *ssqd;
+	int rc;
+
+	DBF_EVENT("getssqd:%4x", schid->sch_no);
+	if (!irq_ptr) {
+		ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL);
+		if (!ssqd)
+			return -ENOMEM;
+	} else {
+		ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
+	}
+
+	rc = chsc_ssqd(*schid, ssqd);
+	if (rc)
+		goto out;
+
+	if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
+	    !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
+	    (ssqd->qdio_ssqd.sch != schid->sch_no))
+		rc = -EINVAL;
+
+	if (!rc)
+		memcpy(data, &ssqd->qdio_ssqd, sizeof(*data));
+
+out:
+	if (!irq_ptr)
+		free_page((unsigned long)ssqd);
+
+	return rc;
+}
+
+void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
+{
+	unsigned char qdioac;
+	int rc;
+
+	rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, &irq_ptr->ssqd_desc);
+	if (rc) {
+		DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no);
+		DBF_ERROR("rc:%x", rc);
+		/* all flags set, worst case */
+		qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
+			 AC1_SIGA_SYNC_NEEDED;
+	} else
+		qdioac = irq_ptr->ssqd_desc.qdioac1;
+
+	check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
+	process_ac_flags(irq_ptr, qdioac);
+	DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2);
+	DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
+}
+
+void qdio_release_memory(struct qdio_irq *irq_ptr)
+{
+	struct qdio_q *q;
+	int i;
+
+	/*
+	 * Must check queue array manually since irq_ptr->nr_input_queues /
+	 * irq_ptr->nr_input_queues may not yet be set.
+	 */
+	for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
+		q = irq_ptr->input_qs[i];
+		if (q) {
+			free_page((unsigned long) q->slib);
+			kmem_cache_free(qdio_q_cache, q);
+		}
+	}
+	for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
+		q = irq_ptr->output_qs[i];
+		if (q) {
+			if (q->u.out.use_cq) {
+				int n;
+
+				for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
+					struct qaob *aob = q->u.out.aobs[n];
+					if (aob) {
+						qdio_release_aob(aob);
+						q->u.out.aobs[n] = NULL;
+					}
+				}
+
+				qdio_disable_async_operation(&q->u.out);
+			}
+			free_page((unsigned long) q->slib);
+			kmem_cache_free(qdio_q_cache, q);
+		}
+	}
+	free_page((unsigned long) irq_ptr->qdr);
+	free_page(irq_ptr->chsc_page);
+	free_page((unsigned long) irq_ptr);
+}
+
+static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
+				     struct qdio_q **irq_ptr_qs,
+				     int i, int nr)
+{
+	irq_ptr->qdr->qdf0[i + nr].sliba =
+		(unsigned long)irq_ptr_qs[i]->slib;
+
+	irq_ptr->qdr->qdf0[i + nr].sla =
+		(unsigned long)irq_ptr_qs[i]->sl;
+
+	irq_ptr->qdr->qdf0[i + nr].slsba =
+		(unsigned long)&irq_ptr_qs[i]->slsb.val[0];
+
+	irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
+	irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
+	irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
+	irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
+}
+
+static void setup_qdr(struct qdio_irq *irq_ptr,
+		      struct qdio_initialize *qdio_init)
+{
+	int i;
+
+	irq_ptr->qdr->qfmt = qdio_init->q_format;
+	irq_ptr->qdr->ac = qdio_init->qdr_ac;
+	irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
+	irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
+	irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
+	irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
+	irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
+	irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
+
+	for (i = 0; i < qdio_init->no_input_qs; i++)
+		__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
+
+	for (i = 0; i < qdio_init->no_output_qs; i++)
+		__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i,
+					 qdio_init->no_input_qs);
+}
+
+static void setup_qib(struct qdio_irq *irq_ptr,
+		      struct qdio_initialize *init_data)
+{
+	if (qebsm_possible())
+		irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
+
+	irq_ptr->qib.rflags |= init_data->qib_rflags;
+
+	irq_ptr->qib.qfmt = init_data->q_format;
+	if (init_data->no_input_qs)
+		irq_ptr->qib.isliba =
+			(unsigned long)(irq_ptr->input_qs[0]->slib);
+	if (init_data->no_output_qs)
+		irq_ptr->qib.osliba =
+			(unsigned long)(irq_ptr->output_qs[0]->slib);
+	memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
+}
+
+int qdio_setup_irq(struct qdio_initialize *init_data)
+{
+	struct ciw *ciw;
+	struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
+
+	memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
+	memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
+	memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
+	memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
+	memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
+
+	irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
+	irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
+
+	/* wipes qib.ac, required by ar7063 */
+	memset(irq_ptr->qdr, 0, sizeof(struct qdr));
+
+	irq_ptr->int_parm = init_data->int_parm;
+	irq_ptr->nr_input_qs = init_data->no_input_qs;
+	irq_ptr->nr_output_qs = init_data->no_output_qs;
+	irq_ptr->cdev = init_data->cdev;
+	ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
+	setup_queues(irq_ptr, init_data);
+
+	setup_qib(irq_ptr, init_data);
+	qdio_setup_thinint(irq_ptr);
+	set_impl_params(irq_ptr, init_data->qib_param_field_format,
+			init_data->qib_param_field,
+			init_data->input_slib_elements,
+			init_data->output_slib_elements);
+
+	/* fill input and output descriptors */
+	setup_qdr(irq_ptr, init_data);
+
+	/* qdr, qib, sls, slsbs, slibs, sbales are filled now */
+
+	/* get qdio commands */
+	ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
+	if (!ciw) {
+		DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
+		return -EINVAL;
+	}
+	irq_ptr->equeue = *ciw;
+
+	ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
+	if (!ciw) {
+		DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
+		return -EINVAL;
+	}
+	irq_ptr->aqueue = *ciw;
+
+	/* set new interrupt handler */
+	spin_lock_irq(get_ccwdev_lock(irq_ptr->cdev));
+	irq_ptr->orig_handler = init_data->cdev->handler;
+	init_data->cdev->handler = qdio_int_handler;
+	spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev));
+	return 0;
+}
+
+void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
+				struct ccw_device *cdev)
+{
+	char s[80];
+
+	snprintf(s, 80, "qdio: %s %s on SC %x using "
+		 "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
+		 dev_name(&cdev->dev),
+		 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
+			((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
+		 irq_ptr->schid.sch_no,
+		 is_thinint_irq(irq_ptr),
+		 (irq_ptr->sch_token) ? 1 : 0,
+		 (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0,
+		 css_general_characteristics.aif_tdd,
+		 (irq_ptr->siga_flag.input) ? "R" : " ",
+		 (irq_ptr->siga_flag.output) ? "W" : " ",
+		 (irq_ptr->siga_flag.sync) ? "S" : " ",
+		 (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
+		 (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
+	printk(KERN_INFO "%s", s);
+}
+
+int qdio_enable_async_operation(struct qdio_output_q *outq)
+{
+	outq->aobs = kcalloc(QDIO_MAX_BUFFERS_PER_Q, sizeof(struct qaob *),
+			     GFP_ATOMIC);
+	if (!outq->aobs) {
+		outq->use_cq = 0;
+		return -ENOMEM;
+	}
+	outq->use_cq = 1;
+	return 0;
+}
+
+void qdio_disable_async_operation(struct qdio_output_q *q)
+{
+	kfree(q->aobs);
+	q->aobs = NULL;
+	q->use_cq = 0;
+}
+
+int __init qdio_setup_init(void)
+{
+	int rc;
+
+	qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
+					 256, 0, NULL);
+	if (!qdio_q_cache)
+		return -ENOMEM;
+
+	qdio_aob_cache = kmem_cache_create("qdio_aob",
+					sizeof(struct qaob),
+					sizeof(struct qaob),
+					0,
+					NULL);
+	if (!qdio_aob_cache) {
+		rc = -ENOMEM;
+		goto free_qdio_q_cache;
+	}
+
+	/* Check for OSA/FCP thin interrupts (bit 67). */
+	DBF_EVENT("thinint:%1d",
+		  (css_general_characteristics.aif_osa) ? 1 : 0);
+
+	/* Check for QEBSM support in general (bit 58). */
+	DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
+	rc = 0;
+out:
+	return rc;
+free_qdio_q_cache:
+	kmem_cache_destroy(qdio_q_cache);
+	goto out;
+}
+
+void qdio_setup_exit(void)
+{
+	kmem_cache_destroy(qdio_aob_cache);
+	kmem_cache_destroy(qdio_q_cache);
+}
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
new file mode 100644
index 0000000..07dea60
--- /dev/null
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2000, 2009
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ *	      Cornelia Huck <cornelia.huck@de.ibm.com>
+ *	      Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/kernel_stat.h>
+#include <linux/atomic.h>
+#include <linux/rculist.h>
+
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include <asm/airq.h>
+#include <asm/isc.h>
+
+#include "cio.h"
+#include "ioasm.h"
+#include "qdio.h"
+#include "qdio_debug.h"
+
+/*
+ * Restriction: only 63 iqdio subchannels would have its own indicator,
+ * after that, subsequent subchannels share one indicator
+ */
+#define TIQDIO_NR_NONSHARED_IND		63
+#define TIQDIO_NR_INDICATORS		(TIQDIO_NR_NONSHARED_IND + 1)
+#define TIQDIO_SHARED_IND		63
+
+/* device state change indicators */
+struct indicator_t {
+	u32 ind;	/* u32 because of compare-and-swap performance */
+	atomic_t count; /* use count, 0 or 1 for non-shared indicators */
+};
+
+/* list of thin interrupt input queues */
+static LIST_HEAD(tiq_list);
+static DEFINE_MUTEX(tiq_list_lock);
+
+/* Adapter interrupt definitions */
+static void tiqdio_thinint_handler(struct airq_struct *airq);
+
+static struct airq_struct tiqdio_airq = {
+	.handler = tiqdio_thinint_handler,
+	.isc = QDIO_AIRQ_ISC,
+};
+
+static struct indicator_t *q_indicators;
+
+u64 last_ai_time;
+
+/* returns addr for the device state change indicator */
+static u32 *get_indicator(void)
+{
+	int i;
+
+	for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
+		if (!atomic_cmpxchg(&q_indicators[i].count, 0, 1))
+			return &q_indicators[i].ind;
+
+	/* use the shared indicator */
+	atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
+	return &q_indicators[TIQDIO_SHARED_IND].ind;
+}
+
+static void put_indicator(u32 *addr)
+{
+	struct indicator_t *ind = container_of(addr, struct indicator_t, ind);
+
+	if (!addr)
+		return;
+	atomic_dec(&ind->count);
+}
+
+void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
+{
+	mutex_lock(&tiq_list_lock);
+	list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
+	mutex_unlock(&tiq_list_lock);
+	xchg(irq_ptr->dsci, 1 << 7);
+}
+
+void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
+{
+	struct qdio_q *q;
+
+	q = irq_ptr->input_qs[0];
+	/* if establish triggered an error */
+	if (!q || !q->entry.prev || !q->entry.next)
+		return;
+
+	mutex_lock(&tiq_list_lock);
+	list_del_rcu(&q->entry);
+	mutex_unlock(&tiq_list_lock);
+	synchronize_rcu();
+}
+
+static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
+{
+	return irq_ptr->nr_input_qs > 1;
+}
+
+static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
+{
+	return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
+}
+
+static inline int shared_ind(struct qdio_irq *irq_ptr)
+{
+	return references_shared_dsci(irq_ptr) ||
+		has_multiple_inq_on_dsci(irq_ptr);
+}
+
+void clear_nonshared_ind(struct qdio_irq *irq_ptr)
+{
+	if (!is_thinint_irq(irq_ptr))
+		return;
+	if (shared_ind(irq_ptr))
+		return;
+	xchg(irq_ptr->dsci, 0);
+}
+
+int test_nonshared_ind(struct qdio_irq *irq_ptr)
+{
+	if (!is_thinint_irq(irq_ptr))
+		return 0;
+	if (shared_ind(irq_ptr))
+		return 0;
+	if (*irq_ptr->dsci)
+		return 1;
+	else
+		return 0;
+}
+
+static inline u32 clear_shared_ind(void)
+{
+	if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count))
+		return 0;
+	return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
+}
+
+static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
+{
+	struct qdio_q *q;
+	int i;
+
+	if (!references_shared_dsci(irq) &&
+	    has_multiple_inq_on_dsci(irq))
+		xchg(irq->dsci, 0);
+
+	for_each_input_queue(irq, q, i) {
+		if (q->u.in.queue_start_poll) {
+			/* skip if polling is enabled or already in work */
+			if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
+					     &q->u.in.queue_irq_state)) {
+				qperf_inc(q, int_discarded);
+				continue;
+			}
+
+			/* avoid dsci clear here, done after processing */
+			q->u.in.queue_start_poll(irq->cdev, q->nr,
+						 irq->int_parm);
+		} else {
+			if (!shared_ind(irq))
+				xchg(irq->dsci, 0);
+
+			/*
+			 * Call inbound processing but not directly
+			 * since that could starve other thinint queues.
+			 */
+			tasklet_schedule(&q->tasklet);
+		}
+	}
+}
+
+/**
+ * tiqdio_thinint_handler - thin interrupt handler for qdio
+ * @airq: pointer to adapter interrupt descriptor
+ */
+static void tiqdio_thinint_handler(struct airq_struct *airq)
+{
+	u32 si_used = clear_shared_ind();
+	struct qdio_q *q;
+
+	last_ai_time = S390_lowcore.int_clock;
+	inc_irq_stat(IRQIO_QAI);
+
+	/* protect tiq_list entries, only changed in activate or shutdown */
+	rcu_read_lock();
+
+	/* check for work on all inbound thinint queues */
+	list_for_each_entry_rcu(q, &tiq_list, entry) {
+		struct qdio_irq *irq;
+
+		/* only process queues from changed sets */
+		irq = q->irq_ptr;
+		if (unlikely(references_shared_dsci(irq))) {
+			if (!si_used)
+				continue;
+		} else if (!*irq->dsci)
+			continue;
+
+		tiqdio_call_inq_handlers(irq);
+
+		qperf_inc(q, adapter_int);
+	}
+	rcu_read_unlock();
+}
+
+static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
+{
+	struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;
+	u64 summary_indicator_addr, subchannel_indicator_addr;
+	int rc;
+
+	if (reset) {
+		summary_indicator_addr = 0;
+		subchannel_indicator_addr = 0;
+	} else {
+		summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr);
+		subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci);
+	}
+
+	rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
+		       subchannel_indicator_addr);
+	if (rc) {
+		DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
+			  scssc->response.code);
+		goto out;
+	}
+
+	DBF_EVENT("setscind");
+	DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr));
+	DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr));
+out:
+	return rc;
+}
+
+/* allocate non-shared indicators and shared indicator */
+int __init tiqdio_allocate_memory(void)
+{
+	q_indicators = kcalloc(TIQDIO_NR_INDICATORS,
+			       sizeof(struct indicator_t),
+			       GFP_KERNEL);
+	if (!q_indicators)
+		return -ENOMEM;
+	return 0;
+}
+
+void tiqdio_free_memory(void)
+{
+	kfree(q_indicators);
+}
+
+int __init tiqdio_register_thinints(void)
+{
+	int rc;
+
+	rc = register_adapter_interrupt(&tiqdio_airq);
+	if (rc) {
+		DBF_EVENT("RTI:%x", rc);
+		return rc;
+	}
+	return 0;
+}
+
+int qdio_establish_thinint(struct qdio_irq *irq_ptr)
+{
+	if (!is_thinint_irq(irq_ptr))
+		return 0;
+	return set_subchannel_ind(irq_ptr, 0);
+}
+
+void qdio_setup_thinint(struct qdio_irq *irq_ptr)
+{
+	if (!is_thinint_irq(irq_ptr))
+		return;
+	irq_ptr->dsci = get_indicator();
+	DBF_HEX(&irq_ptr->dsci, sizeof(void *));
+}
+
+void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
+{
+	if (!is_thinint_irq(irq_ptr))
+		return;
+
+	/* reset adapter interrupt indicators */
+	set_subchannel_ind(irq_ptr, 1);
+	put_indicator(irq_ptr->dsci);
+}
+
+void __exit tiqdio_unregister_thinints(void)
+{
+	WARN_ON(!list_empty(&tiq_list));
+	unregister_adapter_interrupt(&tiqdio_airq);
+}
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
new file mode 100644
index 0000000..6bca1d5
--- /dev/null
+++ b/drivers/s390/cio/scm.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Recognize and maintain s390 storage class memory.
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <asm/eadm.h>
+#include "chsc.h"
+
+static struct device *scm_root;
+
+#define to_scm_dev(n) container_of(n, struct scm_device, dev)
+#define	to_scm_drv(d) container_of(d, struct scm_driver, drv)
+
+static int scmdev_probe(struct device *dev)
+{
+	struct scm_device *scmdev = to_scm_dev(dev);
+	struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+
+	return scmdrv->probe ? scmdrv->probe(scmdev) : -ENODEV;
+}
+
+static int scmdev_remove(struct device *dev)
+{
+	struct scm_device *scmdev = to_scm_dev(dev);
+	struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+
+	return scmdrv->remove ? scmdrv->remove(scmdev) : -ENODEV;
+}
+
+static int scmdev_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	return add_uevent_var(env, "MODALIAS=scm:scmdev");
+}
+
+static struct bus_type scm_bus_type = {
+	.name  = "scm",
+	.probe = scmdev_probe,
+	.remove = scmdev_remove,
+	.uevent = scmdev_uevent,
+};
+
+/**
+ * scm_driver_register() - register a scm driver
+ * @scmdrv: driver to be registered
+ */
+int scm_driver_register(struct scm_driver *scmdrv)
+{
+	struct device_driver *drv = &scmdrv->drv;
+
+	drv->bus = &scm_bus_type;
+
+	return driver_register(drv);
+}
+EXPORT_SYMBOL_GPL(scm_driver_register);
+
+/**
+ * scm_driver_unregister() - deregister a scm driver
+ * @scmdrv: driver to be deregistered
+ */
+void scm_driver_unregister(struct scm_driver *scmdrv)
+{
+	driver_unregister(&scmdrv->drv);
+}
+EXPORT_SYMBOL_GPL(scm_driver_unregister);
+
+void scm_irq_handler(struct aob *aob, blk_status_t error)
+{
+	struct aob_rq_header *aobrq = (void *) aob->request.data;
+	struct scm_device *scmdev = aobrq->scmdev;
+	struct scm_driver *scmdrv = to_scm_drv(scmdev->dev.driver);
+
+	scmdrv->handler(scmdev, aobrq->data, error);
+}
+EXPORT_SYMBOL_GPL(scm_irq_handler);
+
+#define scm_attr(name)							\
+static ssize_t show_##name(struct device *dev,				\
+	       struct device_attribute *attr, char *buf)		\
+{									\
+	struct scm_device *scmdev = to_scm_dev(dev);			\
+	int ret;							\
+									\
+	device_lock(dev);						\
+	ret = sprintf(buf, "%u\n", scmdev->attrs.name);			\
+	device_unlock(dev);						\
+									\
+	return ret;							\
+}									\
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
+
+scm_attr(persistence);
+scm_attr(oper_state);
+scm_attr(data_state);
+scm_attr(rank);
+scm_attr(release);
+scm_attr(res_id);
+
+static struct attribute *scmdev_attrs[] = {
+	&dev_attr_persistence.attr,
+	&dev_attr_oper_state.attr,
+	&dev_attr_data_state.attr,
+	&dev_attr_rank.attr,
+	&dev_attr_release.attr,
+	&dev_attr_res_id.attr,
+	NULL,
+};
+
+static struct attribute_group scmdev_attr_group = {
+	.attrs = scmdev_attrs,
+};
+
+static const struct attribute_group *scmdev_attr_groups[] = {
+	&scmdev_attr_group,
+	NULL,
+};
+
+static void scmdev_release(struct device *dev)
+{
+	struct scm_device *scmdev = to_scm_dev(dev);
+
+	kfree(scmdev);
+}
+
+static void scmdev_setup(struct scm_device *scmdev, struct sale *sale,
+			 unsigned int size, unsigned int max_blk_count)
+{
+	dev_set_name(&scmdev->dev, "%016llx", (unsigned long long) sale->sa);
+	scmdev->nr_max_block = max_blk_count;
+	scmdev->address = sale->sa;
+	scmdev->size = 1UL << size;
+	scmdev->attrs.rank = sale->rank;
+	scmdev->attrs.persistence = sale->p;
+	scmdev->attrs.oper_state = sale->op_state;
+	scmdev->attrs.data_state = sale->data_state;
+	scmdev->attrs.rank = sale->rank;
+	scmdev->attrs.release = sale->r;
+	scmdev->attrs.res_id = sale->rid;
+	scmdev->dev.parent = scm_root;
+	scmdev->dev.bus = &scm_bus_type;
+	scmdev->dev.release = scmdev_release;
+	scmdev->dev.groups = scmdev_attr_groups;
+}
+
+/*
+ * Check for state-changes, notify the driver and userspace.
+ */
+static void scmdev_update(struct scm_device *scmdev, struct sale *sale)
+{
+	struct scm_driver *scmdrv;
+	bool changed;
+
+	device_lock(&scmdev->dev);
+	changed = scmdev->attrs.rank != sale->rank ||
+		  scmdev->attrs.oper_state != sale->op_state;
+	scmdev->attrs.rank = sale->rank;
+	scmdev->attrs.oper_state = sale->op_state;
+	if (!scmdev->dev.driver)
+		goto out;
+	scmdrv = to_scm_drv(scmdev->dev.driver);
+	if (changed && scmdrv->notify)
+		scmdrv->notify(scmdev, SCM_CHANGE);
+out:
+	device_unlock(&scmdev->dev);
+	if (changed)
+		kobject_uevent(&scmdev->dev.kobj, KOBJ_CHANGE);
+}
+
+static int check_address(struct device *dev, void *data)
+{
+	struct scm_device *scmdev = to_scm_dev(dev);
+	struct sale *sale = data;
+
+	return scmdev->address == sale->sa;
+}
+
+static struct scm_device *scmdev_find(struct sale *sale)
+{
+	struct device *dev;
+
+	dev = bus_find_device(&scm_bus_type, NULL, sale, check_address);
+
+	return dev ? to_scm_dev(dev) : NULL;
+}
+
+static int scm_add(struct chsc_scm_info *scm_info, size_t num)
+{
+	struct sale *sale, *scmal = scm_info->scmal;
+	struct scm_device *scmdev;
+	int ret;
+
+	for (sale = scmal; sale < scmal + num; sale++) {
+		scmdev = scmdev_find(sale);
+		if (scmdev) {
+			scmdev_update(scmdev, sale);
+			/* Release reference from scm_find(). */
+			put_device(&scmdev->dev);
+			continue;
+		}
+		scmdev = kzalloc(sizeof(*scmdev), GFP_KERNEL);
+		if (!scmdev)
+			return -ENODEV;
+		scmdev_setup(scmdev, sale, scm_info->is, scm_info->mbc);
+		ret = device_register(&scmdev->dev);
+		if (ret) {
+			/* Release reference from device_initialize(). */
+			put_device(&scmdev->dev);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+int scm_update_information(void)
+{
+	struct chsc_scm_info *scm_info;
+	u64 token = 0;
+	size_t num;
+	int ret;
+
+	scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+	if (!scm_info)
+		return -ENOMEM;
+
+	do {
+		ret = chsc_scm_info(scm_info, token);
+		if (ret)
+			break;
+
+		num = (scm_info->response.length -
+		       (offsetof(struct chsc_scm_info, scmal) -
+			offsetof(struct chsc_scm_info, response))
+		      ) / sizeof(struct sale);
+
+		ret = scm_add(scm_info, num);
+		if (ret)
+			break;
+
+		token = scm_info->restok;
+	} while (token);
+
+	free_page((unsigned long)scm_info);
+
+	return ret;
+}
+
+static int scm_dev_avail(struct device *dev, void *unused)
+{
+	struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+	struct scm_device *scmdev = to_scm_dev(dev);
+
+	if (dev->driver && scmdrv->notify)
+		scmdrv->notify(scmdev, SCM_AVAIL);
+
+	return 0;
+}
+
+int scm_process_availability_information(void)
+{
+	return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail);
+}
+
+static int __init scm_init(void)
+{
+	int ret;
+
+	ret = bus_register(&scm_bus_type);
+	if (ret)
+		return ret;
+
+	scm_root = root_device_register("scm");
+	if (IS_ERR(scm_root)) {
+		bus_unregister(&scm_bus_type);
+		return PTR_ERR(scm_root);
+	}
+
+	scm_update_information();
+	return 0;
+}
+subsys_initcall_sync(scm_init);
diff --git a/drivers/s390/cio/trace.c b/drivers/s390/cio/trace.c
new file mode 100644
index 0000000..e331cd9
--- /dev/null
+++ b/drivers/s390/cio/trace.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tracepoint definitions for s390_cio
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+ */
+
+#include <asm/crw.h>
+#include "cio.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_stsch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_msch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_tsch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_tpi);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_ssch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_csch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_hsch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_xsch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_rsch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_rchp);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_chsc);
diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h
new file mode 100644
index 0000000..0ebb29b
--- /dev/null
+++ b/drivers/s390/cio/trace.h
@@ -0,0 +1,426 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Tracepoint header for the s390 Common I/O layer (CIO)
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <asm/crw.h>
+#include <uapi/asm/chpid.h>
+#include <uapi/asm/schid.h>
+#include "cio.h"
+#include "orb.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM s390
+
+#if !defined(_TRACE_S390_CIO_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_S390_CIO_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(s390_class_schib,
+	TP_PROTO(struct subchannel_id schid, struct schib *schib, int cc),
+	TP_ARGS(schid, schib, cc),
+	TP_STRUCT__entry(
+		__field(u8, cssid)
+		__field(u8, ssid)
+		__field(u16, schno)
+		__field(u16, devno)
+		__field_struct(struct schib, schib)
+		__field(u8, pmcw_ena)
+		__field(u8, pmcw_st)
+		__field(u8, pmcw_dnv)
+		__field(u16, pmcw_dev)
+		__field(u8, pmcw_lpm)
+		__field(u8, pmcw_pnom)
+		__field(u8, pmcw_lpum)
+		__field(u8, pmcw_pim)
+		__field(u8, pmcw_pam)
+		__field(u8, pmcw_pom)
+		__field(u64, pmcw_chpid)
+		__field(int, cc)
+	),
+	TP_fast_assign(
+		__entry->cssid = schid.cssid;
+		__entry->ssid = schid.ssid;
+		__entry->schno = schid.sch_no;
+		__entry->devno = schib->pmcw.dev;
+		__entry->schib = *schib;
+		__entry->pmcw_ena = schib->pmcw.ena;
+		__entry->pmcw_st = schib->pmcw.ena;
+		__entry->pmcw_dnv = schib->pmcw.dnv;
+		__entry->pmcw_dev = schib->pmcw.dev;
+		__entry->pmcw_lpm = schib->pmcw.lpm;
+		__entry->pmcw_pnom = schib->pmcw.pnom;
+		__entry->pmcw_lpum = schib->pmcw.lpum;
+		__entry->pmcw_pim = schib->pmcw.pim;
+		__entry->pmcw_pam = schib->pmcw.pam;
+		__entry->pmcw_pom = schib->pmcw.pom;
+		memcpy(&__entry->pmcw_chpid, &schib->pmcw.chpid, 8);
+		__entry->cc = cc;
+	),
+	TP_printk("schid=%x.%x.%04x cc=%d ena=%d st=%d dnv=%d dev=%04x "
+		  "lpm=0x%02x pnom=0x%02x lpum=0x%02x pim=0x%02x pam=0x%02x "
+		  "pom=0x%02x chpids=%016llx",
+		  __entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
+		  __entry->pmcw_ena, __entry->pmcw_st,
+		  __entry->pmcw_dnv, __entry->pmcw_dev,
+		  __entry->pmcw_lpm, __entry->pmcw_pnom,
+		  __entry->pmcw_lpum, __entry->pmcw_pim,
+		  __entry->pmcw_pam, __entry->pmcw_pom,
+		  __entry->pmcw_chpid
+	)
+);
+
+/**
+ * s390_cio_stsch -  Store Subchannel instruction (STSCH) was performed
+ * @schid: Subchannel ID
+ * @schib: Subchannel-Information block
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schib, s390_cio_stsch,
+	TP_PROTO(struct subchannel_id schid, struct schib *schib, int cc),
+	TP_ARGS(schid, schib, cc)
+);
+
+/**
+ * s390_cio_msch -  Modify Subchannel instruction (MSCH) was performed
+ * @schid: Subchannel ID
+ * @schib: Subchannel-Information block
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schib, s390_cio_msch,
+	TP_PROTO(struct subchannel_id schid, struct schib *schib, int cc),
+	TP_ARGS(schid, schib, cc)
+);
+
+/**
+ * s390_cio_tsch - Test Subchannel instruction (TSCH) was performed
+ * @schid: Subchannel ID
+ * @irb: Interruption-Response Block
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_tsch,
+	TP_PROTO(struct subchannel_id schid, struct irb *irb, int cc),
+	TP_ARGS(schid, irb, cc),
+	TP_STRUCT__entry(
+		__field(u8, cssid)
+		__field(u8, ssid)
+		__field(u16, schno)
+		__field_struct(struct irb, irb)
+		__field(u8, scsw_dcc)
+		__field(u8, scsw_pno)
+		__field(u8, scsw_fctl)
+		__field(u8, scsw_actl)
+		__field(u8, scsw_stctl)
+		__field(u8, scsw_dstat)
+		__field(u8, scsw_cstat)
+		__field(int, cc)
+	),
+	TP_fast_assign(
+		__entry->cssid = schid.cssid;
+		__entry->ssid = schid.ssid;
+		__entry->schno = schid.sch_no;
+		__entry->irb = *irb;
+		__entry->scsw_dcc = scsw_cc(&irb->scsw);
+		__entry->scsw_pno = scsw_pno(&irb->scsw);
+		__entry->scsw_fctl = scsw_fctl(&irb->scsw);
+		__entry->scsw_actl = scsw_actl(&irb->scsw);
+		__entry->scsw_stctl = scsw_stctl(&irb->scsw);
+		__entry->scsw_dstat = scsw_dstat(&irb->scsw);
+		__entry->scsw_cstat = scsw_cstat(&irb->scsw);
+		__entry->cc = cc;
+	),
+	TP_printk("schid=%x.%x.%04x cc=%d dcc=%d pno=%d fctl=0x%x actl=0x%x "
+		  "stctl=0x%x dstat=0x%x cstat=0x%x",
+		  __entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
+		  __entry->scsw_dcc, __entry->scsw_pno,
+		  __entry->scsw_fctl, __entry->scsw_actl,
+		  __entry->scsw_stctl,
+		  __entry->scsw_dstat, __entry->scsw_cstat
+	)
+);
+
+/**
+ * s390_cio_tpi - Test Pending Interruption instruction (TPI) was performed
+ * @addr: Address of the I/O interruption code or %NULL
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_tpi,
+	TP_PROTO(struct tpi_info *addr, int cc),
+	TP_ARGS(addr, cc),
+	TP_STRUCT__entry(
+		__field(int, cc)
+		__field_struct(struct tpi_info, tpi_info)
+		__field(u8, cssid)
+		__field(u8, ssid)
+		__field(u16, schno)
+		__field(u8, adapter_IO)
+		__field(u8, isc)
+		__field(u8, type)
+	),
+	TP_fast_assign(
+		__entry->cc = cc;
+		if (cc != 0)
+			memset(&__entry->tpi_info, 0, sizeof(struct tpi_info));
+		else if (addr)
+			__entry->tpi_info = *addr;
+		else {
+			memcpy(&__entry->tpi_info, &S390_lowcore.subchannel_id,
+			       sizeof(struct tpi_info));
+		}
+		__entry->cssid = __entry->tpi_info.schid.cssid;
+		__entry->ssid = __entry->tpi_info.schid.ssid;
+		__entry->schno = __entry->tpi_info.schid.sch_no;
+		__entry->adapter_IO = __entry->tpi_info.adapter_IO;
+		__entry->isc = __entry->tpi_info.isc;
+		__entry->type = __entry->tpi_info.type;
+	),
+	TP_printk("schid=%x.%x.%04x cc=%d a=%d isc=%d type=%d",
+		  __entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
+		  __entry->adapter_IO, __entry->isc,
+		  __entry->type
+	)
+);
+
+/**
+ * s390_cio_ssch - Start Subchannel instruction (SSCH) was performed
+ * @schid: Subchannel ID
+ * @orb: Operation-Request Block
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_ssch,
+	TP_PROTO(struct subchannel_id schid, union orb *orb, int cc),
+	TP_ARGS(schid, orb, cc),
+	TP_STRUCT__entry(
+		__field(u8, cssid)
+		__field(u8, ssid)
+		__field(u16, schno)
+		__field_struct(union orb, orb)
+		__field(int, cc)
+	),
+	TP_fast_assign(
+		__entry->cssid = schid.cssid;
+		__entry->ssid = schid.ssid;
+		__entry->schno = schid.sch_no;
+		__entry->orb = *orb;
+		__entry->cc = cc;
+	),
+	TP_printk("schid=%x.%x.%04x cc=%d", __entry->cssid, __entry->ssid,
+		  __entry->schno, __entry->cc
+	)
+);
+
+DECLARE_EVENT_CLASS(s390_class_schid,
+	TP_PROTO(struct subchannel_id schid, int cc),
+	TP_ARGS(schid, cc),
+	TP_STRUCT__entry(
+		__field(u8, cssid)
+		__field(u8, ssid)
+		__field(u16, schno)
+		__field(int, cc)
+	),
+	TP_fast_assign(
+		__entry->cssid = schid.cssid;
+		__entry->ssid = schid.ssid;
+		__entry->schno = schid.sch_no;
+		__entry->cc = cc;
+	),
+	TP_printk("schid=%x.%x.%04x cc=%d", __entry->cssid, __entry->ssid,
+		  __entry->schno, __entry->cc
+	)
+);
+
+/**
+ * s390_cio_csch - Clear Subchannel instruction (CSCH) was performed
+ * @schid: Subchannel ID
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schid, s390_cio_csch,
+	TP_PROTO(struct subchannel_id schid, int cc),
+	TP_ARGS(schid, cc)
+);
+
+/**
+ * s390_cio_hsch - Halt Subchannel instruction (HSCH) was performed
+ * @schid: Subchannel ID
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schid, s390_cio_hsch,
+	TP_PROTO(struct subchannel_id schid, int cc),
+	TP_ARGS(schid, cc)
+);
+
+/**
+ * s390_cio_xsch - Cancel Subchannel instruction (XSCH) was performed
+ * @schid: Subchannel ID
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schid, s390_cio_xsch,
+	TP_PROTO(struct subchannel_id schid, int cc),
+	TP_ARGS(schid, cc)
+);
+
+/**
+ * s390_cio_rsch - Resume Subchannel instruction (RSCH) was performed
+ * @schid: Subchannel ID
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schid, s390_cio_rsch,
+	TP_PROTO(struct subchannel_id schid, int cc),
+	TP_ARGS(schid, cc)
+);
+
+/**
+ * s390_cio_rchp - Reset Channel Path (RCHP) instruction was performed
+ * @chpid: Channel-Path Identifier
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_rchp,
+	TP_PROTO(struct chp_id chpid, int cc),
+	TP_ARGS(chpid, cc),
+	TP_STRUCT__entry(
+		__field(u8, cssid)
+		__field(u8, id)
+		__field(int, cc)
+	),
+	TP_fast_assign(
+		__entry->cssid = chpid.cssid;
+		__entry->id = chpid.id;
+		__entry->cc = cc;
+	),
+	TP_printk("chpid=%x.%02x cc=%d", __entry->cssid, __entry->id,
+		  __entry->cc
+	)
+);
+
+#define CHSC_MAX_REQUEST_LEN		64
+#define CHSC_MAX_RESPONSE_LEN		64
+
+/**
+ * s390_cio_chsc - Channel Subsystem Call (CHSC) instruction was performed
+ * @chsc: CHSC block
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_chsc,
+	TP_PROTO(struct chsc_header *chsc, int cc),
+	TP_ARGS(chsc, cc),
+	TP_STRUCT__entry(
+		__field(int, cc)
+		__field(u16, code)
+		__field(u16, rcode)
+		__array(u8, request, CHSC_MAX_REQUEST_LEN)
+		__array(u8, response, CHSC_MAX_RESPONSE_LEN)
+	),
+	TP_fast_assign(
+		__entry->cc = cc;
+		__entry->code = chsc->code;
+		memcpy(&entry->request, chsc,
+		       min_t(u16, chsc->length, CHSC_MAX_REQUEST_LEN));
+		chsc = (struct chsc_header *) ((char *) chsc + chsc->length);
+		__entry->rcode = chsc->code;
+		memcpy(&entry->response, chsc,
+		       min_t(u16, chsc->length, CHSC_MAX_RESPONSE_LEN));
+	),
+	TP_printk("code=0x%04x cc=%d rcode=0x%04x", __entry->code,
+		  __entry->cc, __entry->rcode)
+);
+
+/**
+ * s390_cio_interrupt - An I/O interrupt occurred
+ * @tpi_info: Address of the I/O interruption code
+ */
+TRACE_EVENT(s390_cio_interrupt,
+	TP_PROTO(struct tpi_info *tpi_info),
+	TP_ARGS(tpi_info),
+	TP_STRUCT__entry(
+		__field_struct(struct tpi_info, tpi_info)
+		__field(u8, cssid)
+		__field(u8, ssid)
+		__field(u16, schno)
+		__field(u8, isc)
+		__field(u8, type)
+	),
+	TP_fast_assign(
+		__entry->tpi_info = *tpi_info;
+		__entry->cssid = tpi_info->schid.cssid;
+		__entry->ssid = tpi_info->schid.ssid;
+		__entry->schno = tpi_info->schid.sch_no;
+		__entry->isc = tpi_info->isc;
+		__entry->type = tpi_info->type;
+	),
+	TP_printk("schid=%x.%x.%04x isc=%d type=%d",
+		  __entry->cssid, __entry->ssid, __entry->schno,
+		  __entry->isc, __entry->type
+	)
+);
+
+/**
+ * s390_cio_adapter_int - An adapter interrupt occurred
+ * @tpi_info: Address of the I/O interruption code
+ */
+TRACE_EVENT(s390_cio_adapter_int,
+	TP_PROTO(struct tpi_info *tpi_info),
+	TP_ARGS(tpi_info),
+	TP_STRUCT__entry(
+		__field_struct(struct tpi_info, tpi_info)
+		__field(u8, isc)
+	),
+	TP_fast_assign(
+		__entry->tpi_info = *tpi_info;
+		__entry->isc = tpi_info->isc;
+	),
+	TP_printk("isc=%d", __entry->isc)
+);
+
+/**
+ * s390_cio_stcrw - Store Channel Report Word (STCRW) was performed
+ * @crw: Channel Report Word
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_stcrw,
+	TP_PROTO(struct crw *crw, int cc),
+	TP_ARGS(crw, cc),
+	TP_STRUCT__entry(
+		__field_struct(struct crw, crw)
+		__field(int, cc)
+		__field(u8, slct)
+		__field(u8, oflw)
+		__field(u8, chn)
+		__field(u8, rsc)
+		__field(u8, anc)
+		__field(u8, erc)
+		__field(u16, rsid)
+	),
+	TP_fast_assign(
+		__entry->crw = *crw;
+		__entry->cc = cc;
+		__entry->slct = crw->slct;
+		__entry->oflw = crw->oflw;
+		__entry->chn = crw->chn;
+		__entry->rsc = crw->rsc;
+		__entry->anc = crw->anc;
+		__entry->erc = crw->erc;
+		__entry->rsid = crw->rsid;
+	),
+	TP_printk("cc=%d slct=%d oflw=%d chn=%d rsc=%d anc=%d erc=0x%x "
+		  "rsid=0x%x",
+		  __entry->cc, __entry->slct, __entry->oflw,
+		  __entry->chn, __entry->rsc,  __entry->anc,
+		  __entry->erc, __entry->rsid
+	)
+);
+
+#endif /* _TRACE_S390_CIO_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
new file mode 100644
index 0000000..70a006b
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -0,0 +1,880 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * channel program interfaces
+ *
+ * Copyright IBM Corp. 2017
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/vfio.h>
+#include <asm/idals.h>
+
+#include "vfio_ccw_cp.h"
+
+/*
+ * Max length for ccw chain.
+ * XXX: Limit to 256, need to check more?
+ */
+#define CCWCHAIN_LEN_MAX	256
+
+struct pfn_array {
+	/* Starting guest physical I/O address. */
+	unsigned long		pa_iova;
+	/* Array that stores PFNs of the pages need to pin. */
+	unsigned long		*pa_iova_pfn;
+	/* Array that receives PFNs of the pages pinned. */
+	unsigned long		*pa_pfn;
+	/* Number of pages pinned from @pa_iova. */
+	int			pa_nr;
+};
+
+struct pfn_array_table {
+	struct pfn_array	*pat_pa;
+	int			pat_nr;
+};
+
+struct ccwchain {
+	struct list_head	next;
+	struct ccw1		*ch_ccw;
+	/* Guest physical address of the current chain. */
+	u64			ch_iova;
+	/* Count of the valid ccws in chain. */
+	int			ch_len;
+	/* Pinned PAGEs for the original data. */
+	struct pfn_array_table	*ch_pat;
+};
+
+/*
+ * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory
+ * @pa: pfn_array on which to perform the operation
+ * @mdev: the mediated device to perform pin/unpin operations
+ * @iova: target guest physical address
+ * @len: number of bytes that should be pinned from @iova
+ *
+ * Attempt to allocate memory for PFNs, and pin user pages in memory.
+ *
+ * Usage of pfn_array:
+ * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
+ * this structure will be filled in by this function.
+ *
+ * Returns:
+ *   Number of pages pinned on success.
+ *   If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially,
+ *   returns -EINVAL.
+ *   If no pages were pinned, returns -errno.
+ */
+static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
+			       u64 iova, unsigned int len)
+{
+	int i, ret = 0;
+
+	if (!len)
+		return 0;
+
+	if (pa->pa_nr || pa->pa_iova_pfn)
+		return -EINVAL;
+
+	pa->pa_iova = iova;
+
+	pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+	if (!pa->pa_nr)
+		return -EINVAL;
+
+	pa->pa_iova_pfn = kcalloc(pa->pa_nr,
+				  sizeof(*pa->pa_iova_pfn) +
+				  sizeof(*pa->pa_pfn),
+				  GFP_KERNEL);
+	if (unlikely(!pa->pa_iova_pfn))
+		return -ENOMEM;
+	pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
+
+	pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
+	for (i = 1; i < pa->pa_nr; i++)
+		pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
+
+	ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
+			     IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
+
+	if (ret < 0) {
+		goto err_out;
+	} else if (ret > 0 && ret != pa->pa_nr) {
+		vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
+		ret = -EINVAL;
+		goto err_out;
+	}
+
+	return ret;
+
+err_out:
+	pa->pa_nr = 0;
+	kfree(pa->pa_iova_pfn);
+	pa->pa_iova_pfn = NULL;
+
+	return ret;
+}
+
+/* Unpin the pages before releasing the memory. */
+static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
+{
+	vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
+	pa->pa_nr = 0;
+	kfree(pa->pa_iova_pfn);
+}
+
+static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
+{
+	pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
+	if (unlikely(ZERO_OR_NULL_PTR(pat->pat_pa))) {
+		pat->pat_nr = 0;
+		return -ENOMEM;
+	}
+
+	pat->pat_nr = nr;
+
+	return 0;
+}
+
+static void pfn_array_table_unpin_free(struct pfn_array_table *pat,
+				       struct device *mdev)
+{
+	int i;
+
+	for (i = 0; i < pat->pat_nr; i++)
+		pfn_array_unpin_free(pat->pat_pa + i, mdev);
+
+	if (pat->pat_nr) {
+		kfree(pat->pat_pa);
+		pat->pat_pa = NULL;
+		pat->pat_nr = 0;
+	}
+}
+
+static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
+					unsigned long iova)
+{
+	struct pfn_array *pa = pat->pat_pa;
+	unsigned long iova_pfn = iova >> PAGE_SHIFT;
+	int i, j;
+
+	for (i = 0; i < pat->pat_nr; i++, pa++)
+		for (j = 0; j < pa->pa_nr; j++)
+			if (pa->pa_iova_pfn[j] == iova_pfn)
+				return true;
+
+	return false;
+}
+/* Create the list idal words for a pfn_array_table. */
+static inline void pfn_array_table_idal_create_words(
+	struct pfn_array_table *pat,
+	unsigned long *idaws)
+{
+	struct pfn_array *pa;
+	int i, j, k;
+
+	/*
+	 * Idal words (execept the first one) rely on the memory being 4k
+	 * aligned. If a user virtual address is 4K aligned, then it's
+	 * corresponding kernel physical address will also be 4K aligned. Thus
+	 * there will be no problem here to simply use the phys to create an
+	 * idaw.
+	 */
+	k = 0;
+	for (i = 0; i < pat->pat_nr; i++) {
+		pa = pat->pat_pa + i;
+		for (j = 0; j < pa->pa_nr; j++) {
+			idaws[k] = pa->pa_pfn[j] << PAGE_SHIFT;
+			if (k == 0)
+				idaws[k] += pa->pa_iova & (PAGE_SIZE - 1);
+			k++;
+		}
+	}
+}
+
+
+/*
+ * Within the domain (@mdev), copy @n bytes from a guest physical
+ * address (@iova) to a host physical address (@to).
+ */
+static long copy_from_iova(struct device *mdev,
+			   void *to, u64 iova,
+			   unsigned long n)
+{
+	struct pfn_array pa = {0};
+	u64 from;
+	int i, ret;
+	unsigned long l, m;
+
+	ret = pfn_array_alloc_pin(&pa, mdev, iova, n);
+	if (ret <= 0)
+		return ret;
+
+	l = n;
+	for (i = 0; i < pa.pa_nr; i++) {
+		from = pa.pa_pfn[i] << PAGE_SHIFT;
+		m = PAGE_SIZE;
+		if (i == 0) {
+			from += iova & (PAGE_SIZE - 1);
+			m -= iova & (PAGE_SIZE - 1);
+		}
+
+		m = min(l, m);
+		memcpy(to + (n - l), (void *)from, m);
+
+		l -= m;
+		if (l == 0)
+			break;
+	}
+
+	pfn_array_unpin_free(&pa, mdev);
+
+	return l;
+}
+
+static long copy_ccw_from_iova(struct channel_program *cp,
+			       struct ccw1 *to, u64 iova,
+			       unsigned long len)
+{
+	struct ccw0 ccw0;
+	struct ccw1 *pccw1;
+	int ret;
+	int i;
+
+	ret = copy_from_iova(cp->mdev, to, iova, len * sizeof(struct ccw1));
+	if (ret)
+		return ret;
+
+	if (!cp->orb.cmd.fmt) {
+		pccw1 = to;
+		for (i = 0; i < len; i++) {
+			ccw0 = *(struct ccw0 *)pccw1;
+			if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) {
+				pccw1->cmd_code = CCW_CMD_TIC;
+				pccw1->flags = 0;
+				pccw1->count = 0;
+			} else {
+				pccw1->cmd_code = ccw0.cmd_code;
+				pccw1->flags = ccw0.flags;
+				pccw1->count = ccw0.count;
+			}
+			pccw1->cda = ccw0.cda;
+			pccw1++;
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * Helpers to operate ccwchain.
+ */
+#define ccw_is_test(_ccw) (((_ccw)->cmd_code & 0x0F) == 0)
+
+#define ccw_is_noop(_ccw) ((_ccw)->cmd_code == CCW_CMD_NOOP)
+
+#define ccw_is_tic(_ccw) ((_ccw)->cmd_code == CCW_CMD_TIC)
+
+#define ccw_is_idal(_ccw) ((_ccw)->flags & CCW_FLAG_IDA)
+
+
+#define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC))
+
+static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
+{
+	struct ccwchain *chain;
+	void *data;
+	size_t size;
+
+	/* Make ccw address aligned to 8. */
+	size = ((sizeof(*chain) + 7L) & -8L) +
+		sizeof(*chain->ch_ccw) * len +
+		sizeof(*chain->ch_pat) * len;
+	chain = kzalloc(size, GFP_DMA | GFP_KERNEL);
+	if (!chain)
+		return NULL;
+
+	data = (u8 *)chain + ((sizeof(*chain) + 7L) & -8L);
+	chain->ch_ccw = (struct ccw1 *)data;
+
+	data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
+	chain->ch_pat = (struct pfn_array_table *)data;
+
+	chain->ch_len = len;
+
+	list_add_tail(&chain->next, &cp->ccwchain_list);
+
+	return chain;
+}
+
+static void ccwchain_free(struct ccwchain *chain)
+{
+	list_del(&chain->next);
+	kfree(chain);
+}
+
+/* Free resource for a ccw that allocated memory for its cda. */
+static void ccwchain_cda_free(struct ccwchain *chain, int idx)
+{
+	struct ccw1 *ccw = chain->ch_ccw + idx;
+
+	if (ccw_is_test(ccw) || ccw_is_noop(ccw) || ccw_is_tic(ccw))
+		return;
+	if (!ccw->count)
+		return;
+
+	kfree((void *)(u64)ccw->cda);
+}
+
+/* Unpin the pages then free the memory resources. */
+static void cp_unpin_free(struct channel_program *cp)
+{
+	struct ccwchain *chain, *temp;
+	int i;
+
+	list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
+		for (i = 0; i < chain->ch_len; i++) {
+			pfn_array_table_unpin_free(chain->ch_pat + i,
+						   cp->mdev);
+			ccwchain_cda_free(chain, i);
+		}
+		ccwchain_free(chain);
+	}
+}
+
+/**
+ * ccwchain_calc_length - calculate the length of the ccw chain.
+ * @iova: guest physical address of the target ccw chain
+ * @cp: channel_program on which to perform the operation
+ *
+ * This is the chain length not considering any TICs.
+ * You need to do a new round for each TIC target.
+ *
+ * The program is also validated for absence of not yet supported
+ * indirect data addressing scenarios.
+ *
+ * Returns: the length of the ccw chain or -errno.
+ */
+static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
+{
+	struct ccw1 *ccw, *p;
+	int cnt;
+
+	/*
+	 * Copy current chain from guest to host kernel.
+	 * Currently the chain length is limited to CCWCHAIN_LEN_MAX (256).
+	 * So copying 2K is enough (safe).
+	 */
+	p = ccw = kcalloc(CCWCHAIN_LEN_MAX, sizeof(*ccw), GFP_KERNEL);
+	if (!ccw)
+		return -ENOMEM;
+
+	cnt = copy_ccw_from_iova(cp, ccw, iova, CCWCHAIN_LEN_MAX);
+	if (cnt) {
+		kfree(ccw);
+		return cnt;
+	}
+
+	cnt = 0;
+	do {
+		cnt++;
+
+		/*
+		 * As we don't want to fail direct addressing even if the
+		 * orb specified one of the unsupported formats, we defer
+		 * checking for IDAWs in unsupported formats to here.
+		 */
+		if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) {
+			kfree(p);
+			return -EOPNOTSUPP;
+		}
+
+		if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
+			break;
+
+		ccw++;
+	} while (cnt < CCWCHAIN_LEN_MAX + 1);
+
+	if (cnt == CCWCHAIN_LEN_MAX + 1)
+		cnt = -EINVAL;
+
+	kfree(p);
+	return cnt;
+}
+
+static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
+{
+	struct ccwchain *chain;
+	u32 ccw_head, ccw_tail;
+
+	list_for_each_entry(chain, &cp->ccwchain_list, next) {
+		ccw_head = chain->ch_iova;
+		ccw_tail = ccw_head + (chain->ch_len - 1) * sizeof(struct ccw1);
+
+		if ((ccw_head <= tic->cda) && (tic->cda <= ccw_tail))
+			return 1;
+	}
+
+	return 0;
+}
+
+static int ccwchain_loop_tic(struct ccwchain *chain,
+			     struct channel_program *cp);
+
+static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp)
+{
+	struct ccwchain *chain;
+	int len, ret;
+
+	/* May transfer to an existing chain. */
+	if (tic_target_chain_exists(tic, cp))
+		return 0;
+
+	/* Get chain length. */
+	len = ccwchain_calc_length(tic->cda, cp);
+	if (len < 0)
+		return len;
+
+	/* Need alloc a new chain for this one. */
+	chain = ccwchain_alloc(cp, len);
+	if (!chain)
+		return -ENOMEM;
+	chain->ch_iova = tic->cda;
+
+	/* Copy the new chain from user. */
+	ret = copy_ccw_from_iova(cp, chain->ch_ccw, tic->cda, len);
+	if (ret) {
+		ccwchain_free(chain);
+		return ret;
+	}
+
+	/* Loop for tics on this new chain. */
+	return ccwchain_loop_tic(chain, cp);
+}
+
+/* Loop for TICs. */
+static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp)
+{
+	struct ccw1 *tic;
+	int i, ret;
+
+	for (i = 0; i < chain->ch_len; i++) {
+		tic = chain->ch_ccw + i;
+
+		if (!ccw_is_tic(tic))
+			continue;
+
+		ret = ccwchain_handle_tic(tic, cp);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int ccwchain_fetch_tic(struct ccwchain *chain,
+			      int idx,
+			      struct channel_program *cp)
+{
+	struct ccw1 *ccw = chain->ch_ccw + idx;
+	struct ccwchain *iter;
+	u32 ccw_head, ccw_tail;
+
+	list_for_each_entry(iter, &cp->ccwchain_list, next) {
+		ccw_head = iter->ch_iova;
+		ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1);
+
+		if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) {
+			ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
+						     (ccw->cda - ccw_head));
+			return 0;
+		}
+	}
+
+	return -EFAULT;
+}
+
+static int ccwchain_fetch_direct(struct ccwchain *chain,
+				 int idx,
+				 struct channel_program *cp)
+{
+	struct ccw1 *ccw;
+	struct pfn_array_table *pat;
+	unsigned long *idaws;
+	int ret;
+
+	ccw = chain->ch_ccw + idx;
+
+	if (!ccw->count) {
+		/*
+		 * We just want the translation result of any direct ccw
+		 * to be an IDA ccw, so let's add the IDA flag for it.
+		 * Although the flag will be ignored by firmware.
+		 */
+		ccw->flags |= CCW_FLAG_IDA;
+		return 0;
+	}
+
+	/*
+	 * Pin data page(s) in memory.
+	 * The number of pages actually is the count of the idaws which will be
+	 * needed when translating a direct ccw to a idal ccw.
+	 */
+	pat = chain->ch_pat + idx;
+	ret = pfn_array_table_init(pat, 1);
+	if (ret)
+		goto out_init;
+
+	ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
+	if (ret < 0)
+		goto out_unpin;
+
+	/* Translate this direct ccw to a idal ccw. */
+	idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
+	if (!idaws) {
+		ret = -ENOMEM;
+		goto out_unpin;
+	}
+	ccw->cda = (__u32) virt_to_phys(idaws);
+	ccw->flags |= CCW_FLAG_IDA;
+
+	pfn_array_table_idal_create_words(pat, idaws);
+
+	return 0;
+
+out_unpin:
+	pfn_array_table_unpin_free(pat, cp->mdev);
+out_init:
+	ccw->cda = 0;
+	return ret;
+}
+
+static int ccwchain_fetch_idal(struct ccwchain *chain,
+			       int idx,
+			       struct channel_program *cp)
+{
+	struct ccw1 *ccw;
+	struct pfn_array_table *pat;
+	unsigned long *idaws;
+	u64 idaw_iova;
+	unsigned int idaw_nr, idaw_len;
+	int i, ret;
+
+	ccw = chain->ch_ccw + idx;
+
+	if (!ccw->count)
+		return 0;
+
+	/* Calculate size of idaws. */
+	ret = copy_from_iova(cp->mdev, &idaw_iova, ccw->cda, sizeof(idaw_iova));
+	if (ret)
+		return ret;
+	idaw_nr = idal_nr_words((void *)(idaw_iova), ccw->count);
+	idaw_len = idaw_nr * sizeof(*idaws);
+
+	/* Pin data page(s) in memory. */
+	pat = chain->ch_pat + idx;
+	ret = pfn_array_table_init(pat, idaw_nr);
+	if (ret)
+		goto out_init;
+
+	/* Translate idal ccw to use new allocated idaws. */
+	idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
+	if (!idaws) {
+		ret = -ENOMEM;
+		goto out_unpin;
+	}
+
+	ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idaw_len);
+	if (ret)
+		goto out_free_idaws;
+
+	ccw->cda = virt_to_phys(idaws);
+
+	for (i = 0; i < idaw_nr; i++) {
+		idaw_iova = *(idaws + i);
+
+		ret = pfn_array_alloc_pin(pat->pat_pa + i, cp->mdev,
+					  idaw_iova, 1);
+		if (ret < 0)
+			goto out_free_idaws;
+	}
+
+	pfn_array_table_idal_create_words(pat, idaws);
+
+	return 0;
+
+out_free_idaws:
+	kfree(idaws);
+out_unpin:
+	pfn_array_table_unpin_free(pat, cp->mdev);
+out_init:
+	ccw->cda = 0;
+	return ret;
+}
+
+/*
+ * Fetch one ccw.
+ * To reduce memory copy, we'll pin the cda page in memory,
+ * and to get rid of the cda 2G limitiaion of ccw1, we'll translate
+ * direct ccws to idal ccws.
+ */
+static int ccwchain_fetch_one(struct ccwchain *chain,
+			      int idx,
+			      struct channel_program *cp)
+{
+	struct ccw1 *ccw = chain->ch_ccw + idx;
+
+	if (ccw_is_test(ccw) || ccw_is_noop(ccw))
+		return 0;
+
+	if (ccw_is_tic(ccw))
+		return ccwchain_fetch_tic(chain, idx, cp);
+
+	if (ccw_is_idal(ccw))
+		return ccwchain_fetch_idal(chain, idx, cp);
+
+	return ccwchain_fetch_direct(chain, idx, cp);
+}
+
+/**
+ * cp_init() - allocate ccwchains for a channel program.
+ * @cp: channel_program on which to perform the operation
+ * @mdev: the mediated device to perform pin/unpin operations
+ * @orb: control block for the channel program from the guest
+ *
+ * This creates one or more ccwchain(s), and copies the raw data of
+ * the target channel program from @orb->cmd.iova to the new ccwchain(s).
+ *
+ * Limitations:
+ * 1. Supports only prefetch enabled mode.
+ * 2. Supports idal(c64) ccw chaining.
+ * 3. Supports 4k idaw.
+ *
+ * Returns:
+ *   %0 on success and a negative error value on failure.
+ */
+int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
+{
+	u64 iova = orb->cmd.cpa;
+	struct ccwchain *chain;
+	int len, ret;
+
+	/*
+	 * XXX:
+	 * Only support prefetch enable mode now.
+	 */
+	if (!orb->cmd.pfch)
+		return -EOPNOTSUPP;
+
+	INIT_LIST_HEAD(&cp->ccwchain_list);
+	memcpy(&cp->orb, orb, sizeof(*orb));
+	cp->mdev = mdev;
+
+	/* Get chain length. */
+	len = ccwchain_calc_length(iova, cp);
+	if (len < 0)
+		return len;
+
+	/* Alloc mem for the head chain. */
+	chain = ccwchain_alloc(cp, len);
+	if (!chain)
+		return -ENOMEM;
+	chain->ch_iova = iova;
+
+	/* Copy the head chain from guest. */
+	ret = copy_ccw_from_iova(cp, chain->ch_ccw, iova, len);
+	if (ret) {
+		ccwchain_free(chain);
+		return ret;
+	}
+
+	/* Now loop for its TICs. */
+	ret = ccwchain_loop_tic(chain, cp);
+	if (ret)
+		cp_unpin_free(cp);
+	/* It is safe to force: if not set but idals used
+	 * ccwchain_calc_length returns an error.
+	 */
+	cp->orb.cmd.c64 = 1;
+
+	return ret;
+}
+
+
+/**
+ * cp_free() - free resources for channel program.
+ * @cp: channel_program on which to perform the operation
+ *
+ * This unpins the memory pages and frees the memory space occupied by
+ * @cp, which must have been returned by a previous call to cp_init().
+ * Otherwise, undefined behavior occurs.
+ */
+void cp_free(struct channel_program *cp)
+{
+	cp_unpin_free(cp);
+}
+
+/**
+ * cp_prefetch() - translate a guest physical address channel program to
+ *                 a real-device runnable channel program.
+ * @cp: channel_program on which to perform the operation
+ *
+ * This function translates the guest-physical-address channel program
+ * and stores the result to ccwchain list. @cp must have been
+ * initialized by a previous call with cp_init(). Otherwise, undefined
+ * behavior occurs.
+ * For each chain composing the channel program:
+ * - On entry ch_len holds the count of CCWs to be translated.
+ * - On exit ch_len is adjusted to the count of successfully translated CCWs.
+ * This allows cp_free to find in ch_len the count of CCWs to free in a chain.
+ *
+ * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
+ * as helpers to do ccw chain translation inside the kernel. Basically
+ * they accept a channel program issued by a virtual machine, and
+ * translate the channel program to a real-device runnable channel
+ * program.
+ *
+ * These APIs will copy the ccws into kernel-space buffers, and update
+ * the guest phsical addresses with their corresponding host physical
+ * addresses.  Then channel I/O device drivers could issue the
+ * translated channel program to real devices to perform an I/O
+ * operation.
+ *
+ * These interfaces are designed to support translation only for
+ * channel programs, which are generated and formatted by a
+ * guest. Thus this will make it possible for things like VFIO to
+ * leverage the interfaces to passthrough a channel I/O mediated
+ * device in QEMU.
+ *
+ * We support direct ccw chaining by translating them to idal ccws.
+ *
+ * Returns:
+ *   %0 on success and a negative error value on failure.
+ */
+int cp_prefetch(struct channel_program *cp)
+{
+	struct ccwchain *chain;
+	int len, idx, ret;
+
+	list_for_each_entry(chain, &cp->ccwchain_list, next) {
+		len = chain->ch_len;
+		for (idx = 0; idx < len; idx++) {
+			ret = ccwchain_fetch_one(chain, idx, cp);
+			if (ret)
+				goto out_err;
+		}
+	}
+
+	return 0;
+out_err:
+	/* Only cleanup the chain elements that were actually translated. */
+	chain->ch_len = idx;
+	list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
+		chain->ch_len = 0;
+	}
+	return ret;
+}
+
+/**
+ * cp_get_orb() - get the orb of the channel program
+ * @cp: channel_program on which to perform the operation
+ * @intparm: new intparm for the returned orb
+ * @lpm: candidate value of the logical-path mask for the returned orb
+ *
+ * This function returns the address of the updated orb of the channel
+ * program. Channel I/O device drivers could use this orb to issue a
+ * ssch.
+ */
+union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm)
+{
+	union orb *orb;
+	struct ccwchain *chain;
+	struct ccw1 *cpa;
+
+	orb = &cp->orb;
+
+	orb->cmd.intparm = intparm;
+	orb->cmd.fmt = 1;
+	orb->cmd.key = PAGE_DEFAULT_KEY >> 4;
+
+	if (orb->cmd.lpm == 0)
+		orb->cmd.lpm = lpm;
+
+	chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next);
+	cpa = chain->ch_ccw;
+	orb->cmd.cpa = (__u32) __pa(cpa);
+
+	return orb;
+}
+
+/**
+ * cp_update_scsw() - update scsw for a channel program.
+ * @cp: channel_program on which to perform the operation
+ * @scsw: I/O results of the channel program and also the target to be
+ *        updated
+ *
+ * @scsw contains the I/O results of the channel program that pointed
+ * to by @cp. However what @scsw->cpa stores is a host physical
+ * address, which is meaningless for the guest, which is waiting for
+ * the I/O results.
+ *
+ * This function updates @scsw->cpa to its coressponding guest physical
+ * address.
+ */
+void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
+{
+	struct ccwchain *chain;
+	u32 cpa = scsw->cmd.cpa;
+	u32 ccw_head, ccw_tail;
+
+	/*
+	 * LATER:
+	 * For now, only update the cmd.cpa part. We may need to deal with
+	 * other portions of the schib as well, even if we don't return them
+	 * in the ioctl directly. Path status changes etc.
+	 */
+	list_for_each_entry(chain, &cp->ccwchain_list, next) {
+		ccw_head = (u32)(u64)chain->ch_ccw;
+		ccw_tail = (u32)(u64)(chain->ch_ccw + chain->ch_len - 1);
+
+		if ((ccw_head <= cpa) && (cpa <= ccw_tail)) {
+			/*
+			 * (cpa - ccw_head) is the offset value of the host
+			 * physical ccw to its chain head.
+			 * Adding this value to the guest physical ccw chain
+			 * head gets us the guest cpa.
+			 */
+			cpa = chain->ch_iova + (cpa - ccw_head);
+			break;
+		}
+	}
+
+	scsw->cmd.cpa = cpa;
+}
+
+/**
+ * cp_iova_pinned() - check if an iova is pinned for a ccw chain.
+ * @cp: channel_program on which to perform the operation
+ * @iova: the iova to check
+ *
+ * If the @iova is currently pinned for the ccw chain, return true;
+ * else return false.
+ */
+bool cp_iova_pinned(struct channel_program *cp, u64 iova)
+{
+	struct ccwchain *chain;
+	int i;
+
+	list_for_each_entry(chain, &cp->ccwchain_list, next) {
+		for (i = 0; i < chain->ch_len; i++)
+			if (pfn_array_table_iova_pinned(chain->ch_pat + i,
+							iova))
+				return true;
+	}
+
+	return false;
+}
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
new file mode 100644
index 0000000..a4b74fb
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * channel program interfaces
+ *
+ * Copyright IBM Corp. 2017
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ */
+
+#ifndef _VFIO_CCW_CP_H_
+#define _VFIO_CCW_CP_H_
+
+#include <asm/cio.h>
+#include <asm/scsw.h>
+
+#include "orb.h"
+
+/**
+ * struct channel_program - manage information for channel program
+ * @ccwchain_list: list head of ccwchains
+ * @orb: orb for the currently processed ssch request
+ * @mdev: the mediated device to perform page pinning/unpinning
+ *
+ * @ccwchain_list is the head of a ccwchain list, that contents the
+ * translated result of the guest channel program that pointed out by
+ * the iova parameter when calling cp_init.
+ */
+struct channel_program {
+	struct list_head ccwchain_list;
+	union orb orb;
+	struct device *mdev;
+};
+
+extern int cp_init(struct channel_program *cp, struct device *mdev,
+		   union orb *orb);
+extern void cp_free(struct channel_program *cp);
+extern int cp_prefetch(struct channel_program *cp);
+extern union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm);
+extern void cp_update_scsw(struct channel_program *cp, union scsw *scsw);
+extern bool cp_iova_pinned(struct channel_program *cp, u64 iova);
+
+#endif
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
new file mode 100644
index 0000000..f47d16b
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VFIO based Physical Subchannel device driver
+ *
+ * Copyright IBM Corp. 2017
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/mdev.h>
+
+#include <asm/isc.h>
+
+#include "ioasm.h"
+#include "css.h"
+#include "vfio_ccw_private.h"
+
+struct workqueue_struct *vfio_ccw_work_q;
+struct kmem_cache *vfio_ccw_io_region;
+
+/*
+ * Helpers
+ */
+int vfio_ccw_sch_quiesce(struct subchannel *sch)
+{
+	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+	DECLARE_COMPLETION_ONSTACK(completion);
+	int iretry, ret = 0;
+
+	spin_lock_irq(sch->lock);
+	if (!sch->schib.pmcw.ena)
+		goto out_unlock;
+	ret = cio_disable_subchannel(sch);
+	if (ret != -EBUSY)
+		goto out_unlock;
+
+	do {
+		iretry = 255;
+
+		ret = cio_cancel_halt_clear(sch, &iretry);
+		while (ret == -EBUSY) {
+			/*
+			 * Flush all I/O and wait for
+			 * cancel/halt/clear completion.
+			 */
+			private->completion = &completion;
+			spin_unlock_irq(sch->lock);
+
+			wait_for_completion_timeout(&completion, 3*HZ);
+
+			spin_lock_irq(sch->lock);
+			private->completion = NULL;
+			flush_workqueue(vfio_ccw_work_q);
+			ret = cio_cancel_halt_clear(sch, &iretry);
+		};
+
+		ret = cio_disable_subchannel(sch);
+	} while (ret == -EBUSY);
+out_unlock:
+	private->state = VFIO_CCW_STATE_NOT_OPER;
+	spin_unlock_irq(sch->lock);
+	return ret;
+}
+
+static void vfio_ccw_sch_io_todo(struct work_struct *work)
+{
+	struct vfio_ccw_private *private;
+	struct irb *irb;
+
+	private = container_of(work, struct vfio_ccw_private, io_work);
+	irb = &private->irb;
+
+	if (scsw_is_solicited(&irb->scsw)) {
+		cp_update_scsw(&private->cp, &irb->scsw);
+		cp_free(&private->cp);
+	}
+	memcpy(private->io_region->irb_area, irb, sizeof(*irb));
+
+	if (private->io_trigger)
+		eventfd_signal(private->io_trigger, 1);
+
+	if (private->mdev)
+		private->state = VFIO_CCW_STATE_IDLE;
+}
+
+/*
+ * Css driver callbacks
+ */
+static void vfio_ccw_sch_irq(struct subchannel *sch)
+{
+	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+
+	inc_irq_stat(IRQIO_CIO);
+	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
+}
+
+static int vfio_ccw_sch_probe(struct subchannel *sch)
+{
+	struct pmcw *pmcw = &sch->schib.pmcw;
+	struct vfio_ccw_private *private;
+	int ret;
+
+	if (pmcw->qf) {
+		dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
+			 dev_name(&sch->dev));
+		return -ENODEV;
+	}
+
+	private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+	if (!private)
+		return -ENOMEM;
+
+	private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
+					       GFP_KERNEL | GFP_DMA);
+	if (!private->io_region) {
+		kfree(private);
+		return -ENOMEM;
+	}
+
+	private->sch = sch;
+	dev_set_drvdata(&sch->dev, private);
+
+	spin_lock_irq(sch->lock);
+	private->state = VFIO_CCW_STATE_NOT_OPER;
+	sch->isc = VFIO_CCW_ISC;
+	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+	spin_unlock_irq(sch->lock);
+	if (ret)
+		goto out_free;
+
+	ret = vfio_ccw_mdev_reg(sch);
+	if (ret)
+		goto out_disable;
+
+	INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
+	atomic_set(&private->avail, 1);
+	private->state = VFIO_CCW_STATE_STANDBY;
+
+	return 0;
+
+out_disable:
+	cio_disable_subchannel(sch);
+out_free:
+	dev_set_drvdata(&sch->dev, NULL);
+	kmem_cache_free(vfio_ccw_io_region, private->io_region);
+	kfree(private);
+	return ret;
+}
+
+static int vfio_ccw_sch_remove(struct subchannel *sch)
+{
+	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+
+	vfio_ccw_sch_quiesce(sch);
+
+	vfio_ccw_mdev_unreg(sch);
+
+	dev_set_drvdata(&sch->dev, NULL);
+
+	kmem_cache_free(vfio_ccw_io_region, private->io_region);
+	kfree(private);
+
+	return 0;
+}
+
+static void vfio_ccw_sch_shutdown(struct subchannel *sch)
+{
+	vfio_ccw_sch_quiesce(sch);
+}
+
+/**
+ * vfio_ccw_sch_event - process subchannel event
+ * @sch: subchannel
+ * @process: non-zero if function is called in process context
+ *
+ * An unspecified event occurred for this subchannel. Adjust data according
+ * to the current operational state of the subchannel. Return zero when the
+ * event has been handled sufficiently or -EAGAIN when this function should
+ * be called again in process context.
+ */
+static int vfio_ccw_sch_event(struct subchannel *sch, int process)
+{
+	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+	unsigned long flags;
+	int rc = -EAGAIN;
+
+	spin_lock_irqsave(sch->lock, flags);
+	if (!device_is_registered(&sch->dev))
+		goto out_unlock;
+
+	if (work_pending(&sch->todo_work))
+		goto out_unlock;
+
+	if (cio_update_schib(sch)) {
+		vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+		rc = 0;
+		goto out_unlock;
+	}
+
+	private = dev_get_drvdata(&sch->dev);
+	if (private->state == VFIO_CCW_STATE_NOT_OPER) {
+		private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
+				 VFIO_CCW_STATE_STANDBY;
+	}
+	rc = 0;
+
+out_unlock:
+	spin_unlock_irqrestore(sch->lock, flags);
+
+	return rc;
+}
+
+static struct css_device_id vfio_ccw_sch_ids[] = {
+	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
+	{ /* end of list */ },
+};
+MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
+
+static struct css_driver vfio_ccw_sch_driver = {
+	.drv = {
+		.name = "vfio_ccw",
+		.owner = THIS_MODULE,
+	},
+	.subchannel_type = vfio_ccw_sch_ids,
+	.irq = vfio_ccw_sch_irq,
+	.probe = vfio_ccw_sch_probe,
+	.remove = vfio_ccw_sch_remove,
+	.shutdown = vfio_ccw_sch_shutdown,
+	.sch_event = vfio_ccw_sch_event,
+};
+
+static int __init vfio_ccw_sch_init(void)
+{
+	int ret;
+
+	vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
+	if (!vfio_ccw_work_q)
+		return -ENOMEM;
+
+	vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
+					sizeof(struct ccw_io_region), 0,
+					SLAB_ACCOUNT, 0,
+					sizeof(struct ccw_io_region), NULL);
+	if (!vfio_ccw_io_region) {
+		destroy_workqueue(vfio_ccw_work_q);
+		return -ENOMEM;
+	}
+
+	isc_register(VFIO_CCW_ISC);
+	ret = css_driver_register(&vfio_ccw_sch_driver);
+	if (ret) {
+		isc_unregister(VFIO_CCW_ISC);
+		kmem_cache_destroy(vfio_ccw_io_region);
+		destroy_workqueue(vfio_ccw_work_q);
+	}
+
+	return ret;
+}
+
+static void __exit vfio_ccw_sch_exit(void)
+{
+	css_driver_unregister(&vfio_ccw_sch_driver);
+	isc_unregister(VFIO_CCW_ISC);
+	kmem_cache_destroy(vfio_ccw_io_region);
+	destroy_workqueue(vfio_ccw_work_q);
+}
+module_init(vfio_ccw_sch_init);
+module_exit(vfio_ccw_sch_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
new file mode 100644
index 0000000..f94aa01
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Finite state machine for vfio-ccw device handling
+ *
+ * Copyright IBM Corp. 2017
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ */
+
+#include <linux/vfio.h>
+#include <linux/mdev.h>
+
+#include "ioasm.h"
+#include "vfio_ccw_private.h"
+
+#define CREATE_TRACE_POINTS
+#include "vfio_ccw_trace.h"
+
+static int fsm_io_helper(struct vfio_ccw_private *private)
+{
+	struct subchannel *sch;
+	union orb *orb;
+	int ccode;
+	__u8 lpm;
+	unsigned long flags;
+	int ret;
+
+	sch = private->sch;
+
+	spin_lock_irqsave(sch->lock, flags);
+	private->state = VFIO_CCW_STATE_BUSY;
+
+	orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
+
+	/* Issue "Start Subchannel" */
+	ccode = ssch(sch->schid, orb);
+
+	switch (ccode) {
+	case 0:
+		/*
+		 * Initialize device status information
+		 */
+		sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
+		ret = 0;
+		break;
+	case 1:		/* Status pending */
+	case 2:		/* Busy */
+		ret = -EBUSY;
+		break;
+	case 3:		/* Device/path not operational */
+	{
+		lpm = orb->cmd.lpm;
+		if (lpm != 0)
+			sch->lpm &= ~lpm;
+		else
+			sch->lpm = 0;
+
+		if (cio_update_schib(sch))
+			ret = -ENODEV;
+		else
+			ret = sch->lpm ? -EACCES : -ENODEV;
+		break;
+	}
+	default:
+		ret = ccode;
+	}
+	spin_unlock_irqrestore(sch->lock, flags);
+	return ret;
+}
+
+static void fsm_notoper(struct vfio_ccw_private *private,
+			enum vfio_ccw_event event)
+{
+	struct subchannel *sch = private->sch;
+
+	/*
+	 * TODO:
+	 * Probably we should send the machine check to the guest.
+	 */
+	css_sched_sch_todo(sch, SCH_TODO_UNREG);
+	private->state = VFIO_CCW_STATE_NOT_OPER;
+}
+
+/*
+ * No operation action.
+ */
+static void fsm_nop(struct vfio_ccw_private *private,
+		    enum vfio_ccw_event event)
+{
+}
+
+static void fsm_io_error(struct vfio_ccw_private *private,
+			 enum vfio_ccw_event event)
+{
+	pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
+	private->io_region->ret_code = -EIO;
+}
+
+static void fsm_io_busy(struct vfio_ccw_private *private,
+			enum vfio_ccw_event event)
+{
+	private->io_region->ret_code = -EBUSY;
+}
+
+static void fsm_disabled_irq(struct vfio_ccw_private *private,
+			     enum vfio_ccw_event event)
+{
+	struct subchannel *sch = private->sch;
+
+	/*
+	 * An interrupt in a disabled state means a previous disable was not
+	 * successful - should not happen, but we try to disable again.
+	 */
+	cio_disable_subchannel(sch);
+}
+inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
+{
+	return p->sch->schid;
+}
+
+/*
+ * Deal with the ccw command request from the userspace.
+ */
+static void fsm_io_request(struct vfio_ccw_private *private,
+			   enum vfio_ccw_event event)
+{
+	union orb *orb;
+	union scsw *scsw = &private->scsw;
+	struct ccw_io_region *io_region = private->io_region;
+	struct mdev_device *mdev = private->mdev;
+	char *errstr = "request";
+
+	private->state = VFIO_CCW_STATE_BOXED;
+
+	memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
+
+	if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
+		orb = (union orb *)io_region->orb_area;
+
+		/* Don't try to build a cp if transport mode is specified. */
+		if (orb->tm.b) {
+			io_region->ret_code = -EOPNOTSUPP;
+			errstr = "transport mode";
+			goto err_out;
+		}
+		io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
+					      orb);
+		if (io_region->ret_code) {
+			errstr = "cp init";
+			goto err_out;
+		}
+
+		io_region->ret_code = cp_prefetch(&private->cp);
+		if (io_region->ret_code) {
+			errstr = "cp prefetch";
+			cp_free(&private->cp);
+			goto err_out;
+		}
+
+		/* Start channel program and wait for I/O interrupt. */
+		io_region->ret_code = fsm_io_helper(private);
+		if (io_region->ret_code) {
+			errstr = "cp fsm_io_helper";
+			cp_free(&private->cp);
+			goto err_out;
+		}
+		return;
+	} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
+		/* XXX: Handle halt. */
+		io_region->ret_code = -EOPNOTSUPP;
+		goto err_out;
+	} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
+		/* XXX: Handle clear. */
+		io_region->ret_code = -EOPNOTSUPP;
+		goto err_out;
+	}
+
+err_out:
+	private->state = VFIO_CCW_STATE_IDLE;
+	trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
+			       io_region->ret_code, errstr);
+}
+
+/*
+ * Got an interrupt for a normal io (state busy).
+ */
+static void fsm_irq(struct vfio_ccw_private *private,
+		    enum vfio_ccw_event event)
+{
+	struct irb *irb = this_cpu_ptr(&cio_irb);
+
+	memcpy(&private->irb, irb, sizeof(*irb));
+
+	queue_work(vfio_ccw_work_q, &private->io_work);
+
+	if (private->completion)
+		complete(private->completion);
+}
+
+/*
+ * Device statemachine
+ */
+fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
+	[VFIO_CCW_STATE_NOT_OPER] = {
+		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_nop,
+		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
+		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_disabled_irq,
+	},
+	[VFIO_CCW_STATE_STANDBY] = {
+		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
+		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
+		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
+	},
+	[VFIO_CCW_STATE_IDLE] = {
+		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
+		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_request,
+		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
+	},
+	[VFIO_CCW_STATE_BOXED] = {
+		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
+		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_busy,
+		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
+	},
+	[VFIO_CCW_STATE_BUSY] = {
+		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
+		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_busy,
+		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
+	},
+};
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
new file mode 100644
index 0000000..f673e10
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Physical device callbacks for vfio_ccw
+ *
+ * Copyright IBM Corp. 2017
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ */
+
+#include <linux/vfio.h>
+#include <linux/mdev.h>
+
+#include "vfio_ccw_private.h"
+
+static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
+{
+	struct vfio_ccw_private *private;
+	struct subchannel *sch;
+	int ret;
+
+	private = dev_get_drvdata(mdev_parent_dev(mdev));
+	sch = private->sch;
+	/*
+	 * TODO:
+	 * In the cureent stage, some things like "no I/O running" and "no
+	 * interrupt pending" are clear, but we are not sure what other state
+	 * we need to care about.
+	 * There are still a lot more instructions need to be handled. We
+	 * should come back here later.
+	 */
+	ret = vfio_ccw_sch_quiesce(sch);
+	if (ret)
+		return ret;
+
+	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+	if (!ret)
+		private->state = VFIO_CCW_STATE_IDLE;
+
+	return ret;
+}
+
+static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
+				  unsigned long action,
+				  void *data)
+{
+	struct vfio_ccw_private *private =
+		container_of(nb, struct vfio_ccw_private, nb);
+
+	/*
+	 * Vendor drivers MUST unpin pages in response to an
+	 * invalidation.
+	 */
+	if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
+		struct vfio_iommu_type1_dma_unmap *unmap = data;
+
+		if (!cp_iova_pinned(&private->cp, unmap->iova))
+			return NOTIFY_OK;
+
+		if (vfio_ccw_mdev_reset(private->mdev))
+			return NOTIFY_BAD;
+
+		cp_free(&private->cp);
+		return NOTIFY_OK;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
+{
+	return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
+}
+static MDEV_TYPE_ATTR_RO(name);
+
+static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
+			       char *buf)
+{
+	return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
+}
+static MDEV_TYPE_ATTR_RO(device_api);
+
+static ssize_t available_instances_show(struct kobject *kobj,
+					struct device *dev, char *buf)
+{
+	struct vfio_ccw_private *private = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n", atomic_read(&private->avail));
+}
+static MDEV_TYPE_ATTR_RO(available_instances);
+
+static struct attribute *mdev_types_attrs[] = {
+	&mdev_type_attr_name.attr,
+	&mdev_type_attr_device_api.attr,
+	&mdev_type_attr_available_instances.attr,
+	NULL,
+};
+
+static struct attribute_group mdev_type_group = {
+	.name  = "io",
+	.attrs = mdev_types_attrs,
+};
+
+static struct attribute_group *mdev_type_groups[] = {
+	&mdev_type_group,
+	NULL,
+};
+
+static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
+{
+	struct vfio_ccw_private *private =
+		dev_get_drvdata(mdev_parent_dev(mdev));
+
+	if (private->state == VFIO_CCW_STATE_NOT_OPER)
+		return -ENODEV;
+
+	if (atomic_dec_if_positive(&private->avail) < 0)
+		return -EPERM;
+
+	private->mdev = mdev;
+	private->state = VFIO_CCW_STATE_IDLE;
+
+	return 0;
+}
+
+static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
+{
+	struct vfio_ccw_private *private =
+		dev_get_drvdata(mdev_parent_dev(mdev));
+
+	if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
+	    (private->state != VFIO_CCW_STATE_STANDBY)) {
+		if (!vfio_ccw_mdev_reset(mdev))
+			private->state = VFIO_CCW_STATE_STANDBY;
+		/* The state will be NOT_OPER on error. */
+	}
+
+	private->mdev = NULL;
+	atomic_inc(&private->avail);
+
+	return 0;
+}
+
+static int vfio_ccw_mdev_open(struct mdev_device *mdev)
+{
+	struct vfio_ccw_private *private =
+		dev_get_drvdata(mdev_parent_dev(mdev));
+	unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
+
+	private->nb.notifier_call = vfio_ccw_mdev_notifier;
+
+	return vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+				      &events, &private->nb);
+}
+
+static void vfio_ccw_mdev_release(struct mdev_device *mdev)
+{
+	struct vfio_ccw_private *private =
+		dev_get_drvdata(mdev_parent_dev(mdev));
+
+	vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+				 &private->nb);
+}
+
+static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
+				  char __user *buf,
+				  size_t count,
+				  loff_t *ppos)
+{
+	struct vfio_ccw_private *private;
+	struct ccw_io_region *region;
+
+	if (*ppos + count > sizeof(*region))
+		return -EINVAL;
+
+	private = dev_get_drvdata(mdev_parent_dev(mdev));
+	region = private->io_region;
+	if (copy_to_user(buf, (void *)region + *ppos, count))
+		return -EFAULT;
+
+	return count;
+}
+
+static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
+				   const char __user *buf,
+				   size_t count,
+				   loff_t *ppos)
+{
+	struct vfio_ccw_private *private;
+	struct ccw_io_region *region;
+
+	if (*ppos + count > sizeof(*region))
+		return -EINVAL;
+
+	private = dev_get_drvdata(mdev_parent_dev(mdev));
+	if (private->state != VFIO_CCW_STATE_IDLE)
+		return -EACCES;
+
+	region = private->io_region;
+	if (copy_from_user((void *)region + *ppos, buf, count))
+		return -EFAULT;
+
+	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
+	if (region->ret_code != 0) {
+		private->state = VFIO_CCW_STATE_IDLE;
+		return region->ret_code;
+	}
+
+	return count;
+}
+
+static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info)
+{
+	info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
+	info->num_regions = VFIO_CCW_NUM_REGIONS;
+	info->num_irqs = VFIO_CCW_NUM_IRQS;
+
+	return 0;
+}
+
+static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
+					 u16 *cap_type_id,
+					 void **cap_type)
+{
+	switch (info->index) {
+	case VFIO_CCW_CONFIG_REGION_INDEX:
+		info->offset = 0;
+		info->size = sizeof(struct ccw_io_region);
+		info->flags = VFIO_REGION_INFO_FLAG_READ
+			      | VFIO_REGION_INFO_FLAG_WRITE;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
+{
+	if (info->index != VFIO_CCW_IO_IRQ_INDEX)
+		return -EINVAL;
+
+	info->count = 1;
+	info->flags = VFIO_IRQ_INFO_EVENTFD;
+
+	return 0;
+}
+
+static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
+				  uint32_t flags,
+				  void __user *data)
+{
+	struct vfio_ccw_private *private;
+	struct eventfd_ctx **ctx;
+
+	if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
+		return -EINVAL;
+
+	private = dev_get_drvdata(mdev_parent_dev(mdev));
+	ctx = &private->io_trigger;
+
+	switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
+	case VFIO_IRQ_SET_DATA_NONE:
+	{
+		if (*ctx)
+			eventfd_signal(*ctx, 1);
+		return 0;
+	}
+	case VFIO_IRQ_SET_DATA_BOOL:
+	{
+		uint8_t trigger;
+
+		if (get_user(trigger, (uint8_t __user *)data))
+			return -EFAULT;
+
+		if (trigger && *ctx)
+			eventfd_signal(*ctx, 1);
+		return 0;
+	}
+	case VFIO_IRQ_SET_DATA_EVENTFD:
+	{
+		int32_t fd;
+
+		if (get_user(fd, (int32_t __user *)data))
+			return -EFAULT;
+
+		if (fd == -1) {
+			if (*ctx)
+				eventfd_ctx_put(*ctx);
+			*ctx = NULL;
+		} else if (fd >= 0) {
+			struct eventfd_ctx *efdctx;
+
+			efdctx = eventfd_ctx_fdget(fd);
+			if (IS_ERR(efdctx))
+				return PTR_ERR(efdctx);
+
+			if (*ctx)
+				eventfd_ctx_put(*ctx);
+
+			*ctx = efdctx;
+		} else
+			return -EINVAL;
+
+		return 0;
+	}
+	default:
+		return -EINVAL;
+	}
+}
+
+static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
+				   unsigned int cmd,
+				   unsigned long arg)
+{
+	int ret = 0;
+	unsigned long minsz;
+
+	switch (cmd) {
+	case VFIO_DEVICE_GET_INFO:
+	{
+		struct vfio_device_info info;
+
+		minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+		if (copy_from_user(&info, (void __user *)arg, minsz))
+			return -EFAULT;
+
+		if (info.argsz < minsz)
+			return -EINVAL;
+
+		ret = vfio_ccw_mdev_get_device_info(&info);
+		if (ret)
+			return ret;
+
+		return copy_to_user((void __user *)arg, &info, minsz);
+	}
+	case VFIO_DEVICE_GET_REGION_INFO:
+	{
+		struct vfio_region_info info;
+		u16 cap_type_id = 0;
+		void *cap_type = NULL;
+
+		minsz = offsetofend(struct vfio_region_info, offset);
+
+		if (copy_from_user(&info, (void __user *)arg, minsz))
+			return -EFAULT;
+
+		if (info.argsz < minsz)
+			return -EINVAL;
+
+		ret = vfio_ccw_mdev_get_region_info(&info, &cap_type_id,
+						    &cap_type);
+		if (ret)
+			return ret;
+
+		return copy_to_user((void __user *)arg, &info, minsz);
+	}
+	case VFIO_DEVICE_GET_IRQ_INFO:
+	{
+		struct vfio_irq_info info;
+
+		minsz = offsetofend(struct vfio_irq_info, count);
+
+		if (copy_from_user(&info, (void __user *)arg, minsz))
+			return -EFAULT;
+
+		if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
+			return -EINVAL;
+
+		ret = vfio_ccw_mdev_get_irq_info(&info);
+		if (ret)
+			return ret;
+
+		if (info.count == -1)
+			return -EINVAL;
+
+		return copy_to_user((void __user *)arg, &info, minsz);
+	}
+	case VFIO_DEVICE_SET_IRQS:
+	{
+		struct vfio_irq_set hdr;
+		size_t data_size;
+		void __user *data;
+
+		minsz = offsetofend(struct vfio_irq_set, count);
+
+		if (copy_from_user(&hdr, (void __user *)arg, minsz))
+			return -EFAULT;
+
+		ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
+							 VFIO_CCW_NUM_IRQS,
+							 &data_size);
+		if (ret)
+			return ret;
+
+		data = (void __user *)(arg + minsz);
+		return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, data);
+	}
+	case VFIO_DEVICE_RESET:
+		return vfio_ccw_mdev_reset(mdev);
+	default:
+		return -ENOTTY;
+	}
+}
+
+static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
+	.owner			= THIS_MODULE,
+	.supported_type_groups  = mdev_type_groups,
+	.create			= vfio_ccw_mdev_create,
+	.remove			= vfio_ccw_mdev_remove,
+	.open			= vfio_ccw_mdev_open,
+	.release		= vfio_ccw_mdev_release,
+	.read			= vfio_ccw_mdev_read,
+	.write			= vfio_ccw_mdev_write,
+	.ioctl			= vfio_ccw_mdev_ioctl,
+};
+
+int vfio_ccw_mdev_reg(struct subchannel *sch)
+{
+	return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
+}
+
+void vfio_ccw_mdev_unreg(struct subchannel *sch)
+{
+	mdev_unregister_device(&sch->dev);
+}
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
new file mode 100644
index 0000000..078e46f
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Private stuff for vfio_ccw driver
+ *
+ * Copyright IBM Corp. 2017
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ */
+
+#ifndef _VFIO_CCW_PRIVATE_H_
+#define _VFIO_CCW_PRIVATE_H_
+
+#include <linux/completion.h>
+#include <linux/eventfd.h>
+#include <linux/workqueue.h>
+#include <linux/vfio_ccw.h>
+
+#include "css.h"
+#include "vfio_ccw_cp.h"
+
+/**
+ * struct vfio_ccw_private
+ * @sch: pointer to the subchannel
+ * @state: internal state of the device
+ * @completion: synchronization helper of the I/O completion
+ * @avail: available for creating a mediated device
+ * @mdev: pointer to the mediated device
+ * @nb: notifier for vfio events
+ * @io_region: MMIO region to input/output I/O arguments/results
+ * @cp: channel program for the current I/O operation
+ * @irb: irb info received from interrupt
+ * @scsw: scsw info
+ * @io_trigger: eventfd ctx for signaling userspace I/O results
+ * @io_work: work for deferral process of I/O handling
+ */
+struct vfio_ccw_private {
+	struct subchannel	*sch;
+	int			state;
+	struct completion	*completion;
+	atomic_t		avail;
+	struct mdev_device	*mdev;
+	struct notifier_block	nb;
+	struct ccw_io_region	*io_region;
+
+	struct channel_program	cp;
+	struct irb		irb;
+	union scsw		scsw;
+
+	struct eventfd_ctx	*io_trigger;
+	struct work_struct	io_work;
+} __aligned(8);
+
+extern int vfio_ccw_mdev_reg(struct subchannel *sch);
+extern void vfio_ccw_mdev_unreg(struct subchannel *sch);
+
+extern int vfio_ccw_sch_quiesce(struct subchannel *sch);
+
+/*
+ * States of the device statemachine.
+ */
+enum vfio_ccw_state {
+	VFIO_CCW_STATE_NOT_OPER,
+	VFIO_CCW_STATE_STANDBY,
+	VFIO_CCW_STATE_IDLE,
+	VFIO_CCW_STATE_BOXED,
+	VFIO_CCW_STATE_BUSY,
+	/* last element! */
+	NR_VFIO_CCW_STATES
+};
+
+/*
+ * Asynchronous events of the device statemachine.
+ */
+enum vfio_ccw_event {
+	VFIO_CCW_EVENT_NOT_OPER,
+	VFIO_CCW_EVENT_IO_REQ,
+	VFIO_CCW_EVENT_INTERRUPT,
+	/* last element! */
+	NR_VFIO_CCW_EVENTS
+};
+
+/*
+ * Action called through jumptable.
+ */
+typedef void (fsm_func_t)(struct vfio_ccw_private *, enum vfio_ccw_event);
+extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS];
+
+static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private,
+				     int event)
+{
+	vfio_ccw_jumptable[private->state][event](private, event);
+}
+
+extern struct workqueue_struct *vfio_ccw_work_q;
+
+#endif
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h
new file mode 100644
index 0000000..b1da53d
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_trace.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Tracepoints for vfio_ccw driver
+ *
+ * Copyright IBM Corp. 2018
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ *            Halil Pasic <pasic@linux.vnet.ibm.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vfio_ccw
+
+#if !defined(_VFIO_CCW_TRACE_) || defined(TRACE_HEADER_MULTI_READ)
+#define _VFIO_CCW_TRACE_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vfio_ccw_io_fctl,
+	TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr),
+	TP_ARGS(fctl, schid, errno, errstr),
+
+	TP_STRUCT__entry(
+		__field(int, fctl)
+		__field_struct(struct subchannel_id, schid)
+		__field(int, errno)
+		__field(char*, errstr)
+	),
+
+	TP_fast_assign(
+		__entry->fctl = fctl;
+		__entry->schid = schid;
+		__entry->errno = errno;
+		__entry->errstr = errstr;
+	),
+
+	TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s",
+		  __entry->schid.cssid,
+		  __entry->schid.ssid,
+		  __entry->schid.sch_no,
+		  __entry->fctl,
+		  __entry->errno,
+		  __entry->errstr)
+);
+
+#endif /* _VFIO_CCW_TRACE_ */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE vfio_ccw_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
new file mode 100644
index 0000000..b59af54
--- /dev/null
+++ b/drivers/s390/crypto/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# S/390 crypto devices
+#
+
+ap-objs := ap_bus.o ap_card.o ap_queue.o
+obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
+# zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
+zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
+zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
+obj-$(CONFIG_ZCRYPT) += zcrypt.o
+# adapter drivers depend on ap.o and zcrypt.o
+obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
+
+# pkey kernel module
+pkey-objs := pkey_api.o
+obj-$(CONFIG_PKEY) += pkey.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
new file mode 100644
index 0000000..f039266
--- /dev/null
+++ b/drivers/s390/crypto/ap_bus.c
@@ -0,0 +1,1625 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2006, 2012
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *	      Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *	      Felix Beck <felix.beck@de.ibm.com>
+ *	      Holger Dengler <hd@linux.vnet.ibm.com>
+ *
+ * Adjunct processor bus.
+ */
+
+#define KMSG_COMPONENT "ap"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel_stat.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/suspend.h>
+#include <asm/airq.h>
+#include <linux/atomic.h>
+#include <asm/isc.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <asm/facility.h>
+#include <linux/crypto.h>
+#include <linux/mod_devicetable.h>
+#include <linux/debugfs.h>
+#include <linux/ctype.h>
+
+#include "ap_bus.h"
+#include "ap_debug.h"
+
+/*
+ * Module parameters; note though this file itself isn't modular.
+ */
+int ap_domain_index = -1;	/* Adjunct Processor Domain Index */
+static DEFINE_SPINLOCK(ap_domain_lock);
+module_param_named(domain, ap_domain_index, int, 0440);
+MODULE_PARM_DESC(domain, "domain index for ap devices");
+EXPORT_SYMBOL(ap_domain_index);
+
+static int ap_thread_flag;
+module_param_named(poll_thread, ap_thread_flag, int, 0440);
+MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
+
+static char *apm_str;
+module_param_named(apmask, apm_str, charp, 0440);
+MODULE_PARM_DESC(apmask, "AP bus adapter mask.");
+
+static char *aqm_str;
+module_param_named(aqmask, aqm_str, charp, 0440);
+MODULE_PARM_DESC(aqmask, "AP bus domain mask.");
+
+static struct device *ap_root_device;
+
+DEFINE_SPINLOCK(ap_list_lock);
+LIST_HEAD(ap_card_list);
+
+/* Default permissions (card and domain masking) */
+static struct ap_perms {
+	DECLARE_BITMAP(apm, AP_DEVICES);
+	DECLARE_BITMAP(aqm, AP_DOMAINS);
+} ap_perms;
+static DEFINE_MUTEX(ap_perms_mutex);
+
+static struct ap_config_info *ap_configuration;
+static bool initialised;
+
+/*
+ * AP bus related debug feature things.
+ */
+debug_info_t *ap_dbf_info;
+
+/*
+ * Workqueue timer for bus rescan.
+ */
+static struct timer_list ap_config_timer;
+static int ap_config_time = AP_CONFIG_TIME;
+static void ap_scan_bus(struct work_struct *);
+static DECLARE_WORK(ap_scan_work, ap_scan_bus);
+
+/*
+ * Tasklet & timer for AP request polling and interrupts
+ */
+static void ap_tasklet_fn(unsigned long);
+static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0);
+static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
+static struct task_struct *ap_poll_kthread;
+static DEFINE_MUTEX(ap_poll_thread_mutex);
+static DEFINE_SPINLOCK(ap_poll_timer_lock);
+static struct hrtimer ap_poll_timer;
+/*
+ * In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
+ * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.
+ */
+static unsigned long long poll_timeout = 250000;
+
+/* Suspend flag */
+static int ap_suspend_flag;
+/* Maximum domain id */
+static int ap_max_domain_id;
+/*
+ * Flag to check if domain was set through module parameter domain=. This is
+ * important when supsend and resume is done in a z/VM environment where the
+ * domain might change.
+ */
+static int user_set_domain;
+static struct bus_type ap_bus_type;
+
+/* Adapter interrupt definitions */
+static void ap_interrupt_handler(struct airq_struct *airq);
+
+static int ap_airq_flag;
+
+static struct airq_struct ap_airq = {
+	.handler = ap_interrupt_handler,
+	.isc = AP_ISC,
+};
+
+/**
+ * ap_using_interrupts() - Returns non-zero if interrupt support is
+ * available.
+ */
+static inline int ap_using_interrupts(void)
+{
+	return ap_airq_flag;
+}
+
+/**
+ * ap_airq_ptr() - Get the address of the adapter interrupt indicator
+ *
+ * Returns the address of the local-summary-indicator of the adapter
+ * interrupt handler for AP, or NULL if adapter interrupts are not
+ * available.
+ */
+void *ap_airq_ptr(void)
+{
+	if (ap_using_interrupts())
+		return ap_airq.lsi_ptr;
+	return NULL;
+}
+
+/**
+ * ap_interrupts_available(): Test if AP interrupts are available.
+ *
+ * Returns 1 if AP interrupts are available.
+ */
+static int ap_interrupts_available(void)
+{
+	return test_facility(65);
+}
+
+/**
+ * ap_configuration_available(): Test if AP configuration
+ * information is available.
+ *
+ * Returns 1 if AP configuration information is available.
+ */
+static int ap_configuration_available(void)
+{
+	return test_facility(12);
+}
+
+/**
+ * ap_apft_available(): Test if AP facilities test (APFT)
+ * facility is available.
+ *
+ * Returns 1 if APFT is is available.
+ */
+static int ap_apft_available(void)
+{
+	return test_facility(15);
+}
+
+/*
+ * ap_qact_available(): Test if the PQAP(QACT) subfunction is available.
+ *
+ * Returns 1 if the QACT subfunction is available.
+ */
+static inline int ap_qact_available(void)
+{
+	if (ap_configuration)
+		return ap_configuration->qact;
+	return 0;
+}
+
+/*
+ * ap_query_configuration(): Fetch cryptographic config info
+ *
+ * Returns the ap configuration info fetched via PQAP(QCI).
+ * On success 0 is returned, on failure a negative errno
+ * is returned, e.g. if the PQAP(QCI) instruction is not
+ * available, the return value will be -EOPNOTSUPP.
+ */
+static inline int ap_query_configuration(struct ap_config_info *info)
+{
+	if (!ap_configuration_available())
+		return -EOPNOTSUPP;
+	if (!info)
+		return -EINVAL;
+	return ap_qci(info);
+}
+EXPORT_SYMBOL(ap_query_configuration);
+
+/**
+ * ap_init_configuration(): Allocate and query configuration array.
+ */
+static void ap_init_configuration(void)
+{
+	if (!ap_configuration_available())
+		return;
+
+	ap_configuration = kzalloc(sizeof(*ap_configuration), GFP_KERNEL);
+	if (!ap_configuration)
+		return;
+	if (ap_query_configuration(ap_configuration) != 0) {
+		kfree(ap_configuration);
+		ap_configuration = NULL;
+		return;
+	}
+}
+
+/*
+ * ap_test_config(): helper function to extract the nrth bit
+ *		     within the unsigned int array field.
+ */
+static inline int ap_test_config(unsigned int *field, unsigned int nr)
+{
+	return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
+}
+
+/*
+ * ap_test_config_card_id(): Test, whether an AP card ID is configured.
+ * @id AP card ID
+ *
+ * Returns 0 if the card is not configured
+ *	   1 if the card is configured or
+ *	     if the configuration information is not available
+ */
+static inline int ap_test_config_card_id(unsigned int id)
+{
+	if (!ap_configuration)	/* QCI not supported */
+		return 1;
+	return ap_test_config(ap_configuration->apm, id);
+}
+
+/*
+ * ap_test_config_domain(): Test, whether an AP usage domain is configured.
+ * @domain AP usage domain ID
+ *
+ * Returns 0 if the usage domain is not configured
+ *	   1 if the usage domain is configured or
+ *	     if the configuration information is not available
+ */
+static inline int ap_test_config_domain(unsigned int domain)
+{
+	if (!ap_configuration)	/* QCI not supported */
+		return domain < 16;
+	return ap_test_config(ap_configuration->aqm, domain);
+}
+
+/**
+ * ap_query_queue(): Check if an AP queue is available.
+ * @qid: The AP queue number
+ * @queue_depth: Pointer to queue depth value
+ * @device_type: Pointer to device type value
+ * @facilities: Pointer to facility indicator
+ */
+static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
+			  unsigned int *facilities)
+{
+	struct ap_queue_status status;
+	unsigned long info;
+	int nd;
+
+	if (!ap_test_config_card_id(AP_QID_CARD(qid)))
+		return -ENODEV;
+
+	status = ap_test_queue(qid, ap_apft_available(), &info);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		*queue_depth = (int)(info & 0xff);
+		*device_type = (int)((info >> 24) & 0xff);
+		*facilities = (unsigned int)(info >> 32);
+		/* Update maximum domain id */
+		nd = (info >> 16) & 0xff;
+		/* if N bit is available, z13 and newer */
+		if ((info & (1UL << 57)) && nd > 0)
+			ap_max_domain_id = nd;
+		else /* older machine types */
+			ap_max_domain_id = 15;
+		switch (*device_type) {
+			/* For CEX2 and CEX3 the available functions
+			 * are not refrected by the facilities bits.
+			 * Instead it is coded into the type. So here
+			 * modify the function bits based on the type.
+			 */
+		case AP_DEVICE_TYPE_CEX2A:
+		case AP_DEVICE_TYPE_CEX3A:
+			*facilities |= 0x08000000;
+			break;
+		case AP_DEVICE_TYPE_CEX2C:
+		case AP_DEVICE_TYPE_CEX3C:
+			*facilities |= 0x10000000;
+			break;
+		default:
+			break;
+		}
+		return 0;
+	case AP_RESPONSE_Q_NOT_AVAIL:
+	case AP_RESPONSE_DECONFIGURED:
+	case AP_RESPONSE_CHECKSTOPPED:
+	case AP_RESPONSE_INVALID_ADDRESS:
+		return -ENODEV;
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+	case AP_RESPONSE_OTHERWISE_CHANGED:
+	case AP_RESPONSE_BUSY:
+		return -EBUSY;
+	default:
+		BUG();
+	}
+}
+
+void ap_wait(enum ap_wait wait)
+{
+	ktime_t hr_time;
+
+	switch (wait) {
+	case AP_WAIT_AGAIN:
+	case AP_WAIT_INTERRUPT:
+		if (ap_using_interrupts())
+			break;
+		if (ap_poll_kthread) {
+			wake_up(&ap_poll_wait);
+			break;
+		}
+		/* Fall through */
+	case AP_WAIT_TIMEOUT:
+		spin_lock_bh(&ap_poll_timer_lock);
+		if (!hrtimer_is_queued(&ap_poll_timer)) {
+			hr_time = poll_timeout;
+			hrtimer_forward_now(&ap_poll_timer, hr_time);
+			hrtimer_restart(&ap_poll_timer);
+		}
+		spin_unlock_bh(&ap_poll_timer_lock);
+		break;
+	case AP_WAIT_NONE:
+	default:
+		break;
+	}
+}
+
+/**
+ * ap_request_timeout(): Handling of request timeouts
+ * @t: timer making this callback
+ *
+ * Handles request timeouts.
+ */
+void ap_request_timeout(struct timer_list *t)
+{
+	struct ap_queue *aq = from_timer(aq, t, timeout);
+
+	if (ap_suspend_flag)
+		return;
+	spin_lock_bh(&aq->lock);
+	ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT));
+	spin_unlock_bh(&aq->lock);
+}
+
+/**
+ * ap_poll_timeout(): AP receive polling for finished AP requests.
+ * @unused: Unused pointer.
+ *
+ * Schedules the AP tasklet using a high resolution timer.
+ */
+static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
+{
+	if (!ap_suspend_flag)
+		tasklet_schedule(&ap_tasklet);
+	return HRTIMER_NORESTART;
+}
+
+/**
+ * ap_interrupt_handler() - Schedule ap_tasklet on interrupt
+ * @airq: pointer to adapter interrupt descriptor
+ */
+static void ap_interrupt_handler(struct airq_struct *airq)
+{
+	inc_irq_stat(IRQIO_APB);
+	if (!ap_suspend_flag)
+		tasklet_schedule(&ap_tasklet);
+}
+
+/**
+ * ap_tasklet_fn(): Tasklet to poll all AP devices.
+ * @dummy: Unused variable
+ *
+ * Poll all AP devices on the bus.
+ */
+static void ap_tasklet_fn(unsigned long dummy)
+{
+	struct ap_card *ac;
+	struct ap_queue *aq;
+	enum ap_wait wait = AP_WAIT_NONE;
+
+	/* Reset the indicator if interrupts are used. Thus new interrupts can
+	 * be received. Doing it in the beginning of the tasklet is therefor
+	 * important that no requests on any AP get lost.
+	 */
+	if (ap_using_interrupts())
+		xchg(ap_airq.lsi_ptr, 0);
+
+	spin_lock_bh(&ap_list_lock);
+	for_each_ap_card(ac) {
+		for_each_ap_queue(aq, ac) {
+			spin_lock_bh(&aq->lock);
+			wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL));
+			spin_unlock_bh(&aq->lock);
+		}
+	}
+	spin_unlock_bh(&ap_list_lock);
+
+	ap_wait(wait);
+}
+
+static int ap_pending_requests(void)
+{
+	struct ap_card *ac;
+	struct ap_queue *aq;
+
+	spin_lock_bh(&ap_list_lock);
+	for_each_ap_card(ac) {
+		for_each_ap_queue(aq, ac) {
+			if (aq->queue_count == 0)
+				continue;
+			spin_unlock_bh(&ap_list_lock);
+			return 1;
+		}
+	}
+	spin_unlock_bh(&ap_list_lock);
+	return 0;
+}
+
+/**
+ * ap_poll_thread(): Thread that polls for finished requests.
+ * @data: Unused pointer
+ *
+ * AP bus poll thread. The purpose of this thread is to poll for
+ * finished requests in a loop if there is a "free" cpu - that is
+ * a cpu that doesn't have anything better to do. The polling stops
+ * as soon as there is another task or if all messages have been
+ * delivered.
+ */
+static int ap_poll_thread(void *data)
+{
+	DECLARE_WAITQUEUE(wait, current);
+
+	set_user_nice(current, MAX_NICE);
+	set_freezable();
+	while (!kthread_should_stop()) {
+		add_wait_queue(&ap_poll_wait, &wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (ap_suspend_flag || !ap_pending_requests()) {
+			schedule();
+			try_to_freeze();
+		}
+		set_current_state(TASK_RUNNING);
+		remove_wait_queue(&ap_poll_wait, &wait);
+		if (need_resched()) {
+			schedule();
+			try_to_freeze();
+			continue;
+		}
+		ap_tasklet_fn(0);
+	}
+
+	return 0;
+}
+
+static int ap_poll_thread_start(void)
+{
+	int rc;
+
+	if (ap_using_interrupts() || ap_poll_kthread)
+		return 0;
+	mutex_lock(&ap_poll_thread_mutex);
+	ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
+	rc = PTR_ERR_OR_ZERO(ap_poll_kthread);
+	if (rc)
+		ap_poll_kthread = NULL;
+	mutex_unlock(&ap_poll_thread_mutex);
+	return rc;
+}
+
+static void ap_poll_thread_stop(void)
+{
+	if (!ap_poll_kthread)
+		return;
+	mutex_lock(&ap_poll_thread_mutex);
+	kthread_stop(ap_poll_kthread);
+	ap_poll_kthread = NULL;
+	mutex_unlock(&ap_poll_thread_mutex);
+}
+
+#define is_card_dev(x) ((x)->parent == ap_root_device)
+#define is_queue_dev(x) ((x)->parent != ap_root_device)
+
+/**
+ * ap_bus_match()
+ * @dev: Pointer to device
+ * @drv: Pointer to device_driver
+ *
+ * AP bus driver registration/unregistration.
+ */
+static int ap_bus_match(struct device *dev, struct device_driver *drv)
+{
+	struct ap_driver *ap_drv = to_ap_drv(drv);
+	struct ap_device_id *id;
+
+	/*
+	 * Compare device type of the device with the list of
+	 * supported types of the device_driver.
+	 */
+	for (id = ap_drv->ids; id->match_flags; id++) {
+		if (is_card_dev(dev) &&
+		    id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE &&
+		    id->dev_type == to_ap_dev(dev)->device_type)
+			return 1;
+		if (is_queue_dev(dev) &&
+		    id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE &&
+		    id->dev_type == to_ap_dev(dev)->device_type)
+			return 1;
+	}
+	return 0;
+}
+
+/**
+ * ap_uevent(): Uevent function for AP devices.
+ * @dev: Pointer to device
+ * @env: Pointer to kobj_uevent_env
+ *
+ * It sets up a single environment variable DEV_TYPE which contains the
+ * hardware device type.
+ */
+static int ap_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct ap_device *ap_dev = to_ap_dev(dev);
+	int retval = 0;
+
+	if (!ap_dev)
+		return -ENODEV;
+
+	/* Set up DEV_TYPE environment variable. */
+	retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
+	if (retval)
+		return retval;
+
+	/* Add MODALIAS= */
+	retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
+
+	return retval;
+}
+
+static int ap_dev_suspend(struct device *dev)
+{
+	struct ap_device *ap_dev = to_ap_dev(dev);
+
+	if (ap_dev->drv && ap_dev->drv->suspend)
+		ap_dev->drv->suspend(ap_dev);
+	return 0;
+}
+
+static int ap_dev_resume(struct device *dev)
+{
+	struct ap_device *ap_dev = to_ap_dev(dev);
+
+	if (ap_dev->drv && ap_dev->drv->resume)
+		ap_dev->drv->resume(ap_dev);
+	return 0;
+}
+
+static void ap_bus_suspend(void)
+{
+	AP_DBF(DBF_DEBUG, "%s running\n", __func__);
+
+	ap_suspend_flag = 1;
+	/*
+	 * Disable scanning for devices, thus we do not want to scan
+	 * for them after removing.
+	 */
+	flush_work(&ap_scan_work);
+	tasklet_disable(&ap_tasklet);
+}
+
+static int __ap_card_devices_unregister(struct device *dev, void *dummy)
+{
+	if (is_card_dev(dev))
+		device_unregister(dev);
+	return 0;
+}
+
+static int __ap_queue_devices_unregister(struct device *dev, void *dummy)
+{
+	if (is_queue_dev(dev))
+		device_unregister(dev);
+	return 0;
+}
+
+static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
+{
+	if (is_queue_dev(dev) &&
+	    AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data)
+		device_unregister(dev);
+	return 0;
+}
+
+static void ap_bus_resume(void)
+{
+	int rc;
+
+	AP_DBF(DBF_DEBUG, "%s running\n", __func__);
+
+	/* remove all queue devices */
+	bus_for_each_dev(&ap_bus_type, NULL, NULL,
+			 __ap_queue_devices_unregister);
+	/* remove all card devices */
+	bus_for_each_dev(&ap_bus_type, NULL, NULL,
+			 __ap_card_devices_unregister);
+
+	/* Reset thin interrupt setting */
+	if (ap_interrupts_available() && !ap_using_interrupts()) {
+		rc = register_adapter_interrupt(&ap_airq);
+		ap_airq_flag = (rc == 0);
+	}
+	if (!ap_interrupts_available() && ap_using_interrupts()) {
+		unregister_adapter_interrupt(&ap_airq);
+		ap_airq_flag = 0;
+	}
+	/* Reset domain */
+	if (!user_set_domain)
+		ap_domain_index = -1;
+	/* Get things going again */
+	ap_suspend_flag = 0;
+	if (ap_airq_flag)
+		xchg(ap_airq.lsi_ptr, 0);
+	tasklet_enable(&ap_tasklet);
+	queue_work(system_long_wq, &ap_scan_work);
+}
+
+static int ap_power_event(struct notifier_block *this, unsigned long event,
+			  void *ptr)
+{
+	switch (event) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		ap_bus_suspend();
+		break;
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		ap_bus_resume();
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
+}
+static struct notifier_block ap_power_notifier = {
+	.notifier_call = ap_power_event,
+};
+
+static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, ap_dev_resume);
+
+static struct bus_type ap_bus_type = {
+	.name = "ap",
+	.match = &ap_bus_match,
+	.uevent = &ap_uevent,
+	.pm = &ap_bus_pm_ops,
+};
+
+static int __ap_revise_reserved(struct device *dev, void *dummy)
+{
+	int rc, card, queue, devres, drvres;
+
+	if (is_queue_dev(dev)) {
+		card = AP_QID_CARD(to_ap_queue(dev)->qid);
+		queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
+		mutex_lock(&ap_perms_mutex);
+		devres = test_bit_inv(card, ap_perms.apm)
+			&& test_bit_inv(queue, ap_perms.aqm);
+		mutex_unlock(&ap_perms_mutex);
+		drvres = to_ap_drv(dev->driver)->flags
+			& AP_DRIVER_FLAG_DEFAULT;
+		if (!!devres != !!drvres) {
+			AP_DBF(DBF_DEBUG, "reprobing queue=%02x.%04x\n",
+			       card, queue);
+			rc = device_reprobe(dev);
+		}
+	}
+
+	return 0;
+}
+
+static void ap_bus_revise_bindings(void)
+{
+	bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_revise_reserved);
+}
+
+int ap_owned_by_def_drv(int card, int queue)
+{
+	int rc = 0;
+
+	if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
+		return -EINVAL;
+
+	mutex_lock(&ap_perms_mutex);
+
+	if (test_bit_inv(card, ap_perms.apm)
+	    && test_bit_inv(queue, ap_perms.aqm))
+		rc = 1;
+
+	mutex_unlock(&ap_perms_mutex);
+
+	return rc;
+}
+EXPORT_SYMBOL(ap_owned_by_def_drv);
+
+int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
+				       unsigned long *aqm)
+{
+	int card, queue, rc = 0;
+
+	mutex_lock(&ap_perms_mutex);
+
+	for (card = 0; !rc && card < AP_DEVICES; card++)
+		if (test_bit_inv(card, apm) &&
+		    test_bit_inv(card, ap_perms.apm))
+			for (queue = 0; !rc && queue < AP_DOMAINS; queue++)
+				if (test_bit_inv(queue, aqm) &&
+				    test_bit_inv(queue, ap_perms.aqm))
+					rc = 1;
+
+	mutex_unlock(&ap_perms_mutex);
+
+	return rc;
+}
+EXPORT_SYMBOL(ap_apqn_in_matrix_owned_by_def_drv);
+
+static int ap_device_probe(struct device *dev)
+{
+	struct ap_device *ap_dev = to_ap_dev(dev);
+	struct ap_driver *ap_drv = to_ap_drv(dev->driver);
+	int card, queue, devres, drvres, rc;
+
+	if (is_queue_dev(dev)) {
+		/*
+		 * If the apqn is marked as reserved/used by ap bus and
+		 * default drivers, only probe with drivers with the default
+		 * flag set. If it is not marked, only probe with drivers
+		 * with the default flag not set.
+		 */
+		card = AP_QID_CARD(to_ap_queue(dev)->qid);
+		queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
+		mutex_lock(&ap_perms_mutex);
+		devres = test_bit_inv(card, ap_perms.apm)
+			&& test_bit_inv(queue, ap_perms.aqm);
+		mutex_unlock(&ap_perms_mutex);
+		drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
+		if (!!devres != !!drvres)
+			return -ENODEV;
+	}
+
+	/* Add queue/card to list of active queues/cards */
+	spin_lock_bh(&ap_list_lock);
+	if (is_card_dev(dev))
+		list_add(&to_ap_card(dev)->list, &ap_card_list);
+	else
+		list_add(&to_ap_queue(dev)->list,
+			 &to_ap_queue(dev)->card->queues);
+	spin_unlock_bh(&ap_list_lock);
+
+	ap_dev->drv = ap_drv;
+	rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
+
+	if (rc) {
+		spin_lock_bh(&ap_list_lock);
+		if (is_card_dev(dev))
+			list_del_init(&to_ap_card(dev)->list);
+		else
+			list_del_init(&to_ap_queue(dev)->list);
+		spin_unlock_bh(&ap_list_lock);
+		ap_dev->drv = NULL;
+	}
+
+	return rc;
+}
+
+static int ap_device_remove(struct device *dev)
+{
+	struct ap_device *ap_dev = to_ap_dev(dev);
+	struct ap_driver *ap_drv = ap_dev->drv;
+
+	if (ap_drv->remove)
+		ap_drv->remove(ap_dev);
+
+	/* Remove queue/card from list of active queues/cards */
+	spin_lock_bh(&ap_list_lock);
+	if (is_card_dev(dev))
+		list_del_init(&to_ap_card(dev)->list);
+	else
+		list_del_init(&to_ap_queue(dev)->list);
+	spin_unlock_bh(&ap_list_lock);
+
+	return 0;
+}
+
+int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
+		       char *name)
+{
+	struct device_driver *drv = &ap_drv->driver;
+
+	if (!initialised)
+		return -ENODEV;
+
+	drv->bus = &ap_bus_type;
+	drv->probe = ap_device_probe;
+	drv->remove = ap_device_remove;
+	drv->owner = owner;
+	drv->name = name;
+	return driver_register(drv);
+}
+EXPORT_SYMBOL(ap_driver_register);
+
+void ap_driver_unregister(struct ap_driver *ap_drv)
+{
+	driver_unregister(&ap_drv->driver);
+}
+EXPORT_SYMBOL(ap_driver_unregister);
+
+void ap_bus_force_rescan(void)
+{
+	if (ap_suspend_flag)
+		return;
+	/* processing a asynchronous bus rescan */
+	del_timer(&ap_config_timer);
+	queue_work(system_long_wq, &ap_scan_work);
+	flush_work(&ap_scan_work);
+}
+EXPORT_SYMBOL(ap_bus_force_rescan);
+
+/*
+ * hex2bitmap() - parse hex mask string and set bitmap.
+ * Valid strings are "0x012345678" with at least one valid hex number.
+ * Rest of the bitmap to the right is padded with 0. No spaces allowed
+ * within the string, the leading 0x may be omitted.
+ * Returns the bitmask with exactly the bits set as given by the hex
+ * string (both in big endian order).
+ */
+static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
+{
+	int i, n, b;
+
+	/* bits needs to be a multiple of 8 */
+	if (bits & 0x07)
+		return -EINVAL;
+
+	if (str[0] == '0' && str[1] == 'x')
+		str++;
+	if (*str == 'x')
+		str++;
+
+	for (i = 0; isxdigit(*str) && i < bits; str++) {
+		b = hex_to_bin(*str);
+		for (n = 0; n < 4; n++)
+			if (b & (0x08 >> n))
+				set_bit_inv(i + n, bitmap);
+		i += 4;
+	}
+
+	if (*str == '\n')
+		str++;
+	if (*str)
+		return -EINVAL;
+	return 0;
+}
+
+/*
+ * modify_bitmap() - parse bitmask argument and modify an existing
+ * bit mask accordingly. A concatenation (done with ',') of these
+ * terms is recognized:
+ *   +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>]
+ * <bitnr> may be any valid number (hex, decimal or octal) in the range
+ * 0...bits-1; the leading + or - is required. Here are some examples:
+ *   +0-15,+32,-128,-0xFF
+ *   -0-255,+1-16,+0x128
+ *   +1,+2,+3,+4,-5,-7-10
+ * Returns the new bitmap after all changes have been applied. Every
+ * positive value in the string will set a bit and every negative value
+ * in the string will clear a bit. As a bit may be touched more than once,
+ * the last 'operation' wins:
+ * +0-255,-128 = first bits 0-255 will be set, then bit 128 will be
+ * cleared again. All other bits are unmodified.
+ */
+static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
+{
+	int a, i, z;
+	char *np, sign;
+
+	/* bits needs to be a multiple of 8 */
+	if (bits & 0x07)
+		return -EINVAL;
+
+	while (*str) {
+		sign = *str++;
+		if (sign != '+' && sign != '-')
+			return -EINVAL;
+		a = z = simple_strtoul(str, &np, 0);
+		if (str == np || a >= bits)
+			return -EINVAL;
+		str = np;
+		if (*str == '-') {
+			z = simple_strtoul(++str, &np, 0);
+			if (str == np || a > z || z >= bits)
+				return -EINVAL;
+			str = np;
+		}
+		for (i = a; i <= z; i++)
+			if (sign == '+')
+				set_bit_inv(i, bitmap);
+			else
+				clear_bit_inv(i, bitmap);
+		while (*str == ',' || *str == '\n')
+			str++;
+	}
+
+	return 0;
+}
+
+/*
+ * process_mask_arg() - parse a bitmap string and clear/set the
+ * bits in the bitmap accordingly. The string may be given as
+ * absolute value, a hex string like 0x1F2E3D4C5B6A" simple over-
+ * writing the current content of the bitmap. Or as relative string
+ * like "+1-16,-32,-0x40,+128" where only single bits or ranges of
+ * bits are cleared or set. Distinction is done based on the very
+ * first character which may be '+' or '-' for the relative string
+ * and othewise assume to be an absolute value string. If parsing fails
+ * a negative errno value is returned. All arguments and bitmaps are
+ * big endian order.
+ */
+static int process_mask_arg(const char *str,
+			    unsigned long *bitmap, int bits,
+			    struct mutex *lock)
+{
+	unsigned long *newmap, size;
+	int rc;
+
+	/* bits needs to be a multiple of 8 */
+	if (bits & 0x07)
+		return -EINVAL;
+
+	size = BITS_TO_LONGS(bits)*sizeof(unsigned long);
+	newmap = kmalloc(size, GFP_KERNEL);
+	if (!newmap)
+		return -ENOMEM;
+	if (mutex_lock_interruptible(lock)) {
+		kfree(newmap);
+		return -ERESTARTSYS;
+	}
+
+	if (*str == '+' || *str == '-') {
+		memcpy(newmap, bitmap, size);
+		rc = modify_bitmap(str, newmap, bits);
+	} else {
+		memset(newmap, 0, size);
+		rc = hex2bitmap(str, newmap, bits);
+	}
+	if (rc == 0)
+		memcpy(bitmap, newmap, size);
+	mutex_unlock(lock);
+	kfree(newmap);
+	return rc;
+}
+
+/*
+ * AP bus attributes.
+ */
+
+static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
+}
+
+static ssize_t ap_domain_store(struct bus_type *bus,
+			       const char *buf, size_t count)
+{
+	int domain;
+
+	if (sscanf(buf, "%i\n", &domain) != 1 ||
+	    domain < 0 || domain > ap_max_domain_id ||
+	    !test_bit_inv(domain, ap_perms.aqm))
+		return -EINVAL;
+	spin_lock_bh(&ap_domain_lock);
+	ap_domain_index = domain;
+	spin_unlock_bh(&ap_domain_lock);
+
+	AP_DBF(DBF_DEBUG, "stored new default domain=%d\n", domain);
+
+	return count;
+}
+
+static BUS_ATTR_RW(ap_domain);
+
+static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
+{
+	if (!ap_configuration)	/* QCI not supported */
+		return snprintf(buf, PAGE_SIZE, "not supported\n");
+
+	return snprintf(buf, PAGE_SIZE,
+			"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+			ap_configuration->adm[0], ap_configuration->adm[1],
+			ap_configuration->adm[2], ap_configuration->adm[3],
+			ap_configuration->adm[4], ap_configuration->adm[5],
+			ap_configuration->adm[6], ap_configuration->adm[7]);
+}
+
+static BUS_ATTR_RO(ap_control_domain_mask);
+
+static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
+{
+	if (!ap_configuration)	/* QCI not supported */
+		return snprintf(buf, PAGE_SIZE, "not supported\n");
+
+	return snprintf(buf, PAGE_SIZE,
+			"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+			ap_configuration->aqm[0], ap_configuration->aqm[1],
+			ap_configuration->aqm[2], ap_configuration->aqm[3],
+			ap_configuration->aqm[4], ap_configuration->aqm[5],
+			ap_configuration->aqm[6], ap_configuration->aqm[7]);
+}
+
+static BUS_ATTR_RO(ap_usage_domain_mask);
+
+static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			ap_using_interrupts() ? 1 : 0);
+}
+
+static BUS_ATTR_RO(ap_interrupts);
+
+static ssize_t config_time_show(struct bus_type *bus, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
+}
+
+static ssize_t config_time_store(struct bus_type *bus,
+				 const char *buf, size_t count)
+{
+	int time;
+
+	if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
+		return -EINVAL;
+	ap_config_time = time;
+	mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
+	return count;
+}
+
+static BUS_ATTR_RW(config_time);
+
+static ssize_t poll_thread_show(struct bus_type *bus, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
+}
+
+static ssize_t poll_thread_store(struct bus_type *bus,
+				 const char *buf, size_t count)
+{
+	int flag, rc;
+
+	if (sscanf(buf, "%d\n", &flag) != 1)
+		return -EINVAL;
+	if (flag) {
+		rc = ap_poll_thread_start();
+		if (rc)
+			count = rc;
+	} else
+		ap_poll_thread_stop();
+	return count;
+}
+
+static BUS_ATTR_RW(poll_thread);
+
+static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
+}
+
+static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
+				  size_t count)
+{
+	unsigned long long time;
+	ktime_t hr_time;
+
+	/* 120 seconds = maximum poll interval */
+	if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
+	    time > 120000000000ULL)
+		return -EINVAL;
+	poll_timeout = time;
+	hr_time = poll_timeout;
+
+	spin_lock_bh(&ap_poll_timer_lock);
+	hrtimer_cancel(&ap_poll_timer);
+	hrtimer_set_expires(&ap_poll_timer, hr_time);
+	hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
+	spin_unlock_bh(&ap_poll_timer_lock);
+
+	return count;
+}
+
+static BUS_ATTR_RW(poll_timeout);
+
+static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
+{
+	int max_domain_id;
+
+	if (ap_configuration)
+		max_domain_id = ap_max_domain_id ? : -1;
+	else
+		max_domain_id = 15;
+	return snprintf(buf, PAGE_SIZE, "%d\n", max_domain_id);
+}
+
+static BUS_ATTR_RO(ap_max_domain_id);
+
+static ssize_t apmask_show(struct bus_type *bus, char *buf)
+{
+	int rc;
+
+	if (mutex_lock_interruptible(&ap_perms_mutex))
+		return -ERESTARTSYS;
+	rc = snprintf(buf, PAGE_SIZE,
+		      "0x%016lx%016lx%016lx%016lx\n",
+		      ap_perms.apm[0], ap_perms.apm[1],
+		      ap_perms.apm[2], ap_perms.apm[3]);
+	mutex_unlock(&ap_perms_mutex);
+
+	return rc;
+}
+
+static ssize_t apmask_store(struct bus_type *bus, const char *buf,
+			    size_t count)
+{
+	int rc;
+
+	rc = process_mask_arg(buf, ap_perms.apm, AP_DEVICES, &ap_perms_mutex);
+	if (rc)
+		return rc;
+
+	ap_bus_revise_bindings();
+
+	return count;
+}
+
+static BUS_ATTR_RW(apmask);
+
+static ssize_t aqmask_show(struct bus_type *bus, char *buf)
+{
+	int rc;
+
+	if (mutex_lock_interruptible(&ap_perms_mutex))
+		return -ERESTARTSYS;
+	rc = snprintf(buf, PAGE_SIZE,
+		      "0x%016lx%016lx%016lx%016lx\n",
+		      ap_perms.aqm[0], ap_perms.aqm[1],
+		      ap_perms.aqm[2], ap_perms.aqm[3]);
+	mutex_unlock(&ap_perms_mutex);
+
+	return rc;
+}
+
+static ssize_t aqmask_store(struct bus_type *bus, const char *buf,
+			    size_t count)
+{
+	int rc;
+
+	rc = process_mask_arg(buf, ap_perms.aqm, AP_DOMAINS, &ap_perms_mutex);
+	if (rc)
+		return rc;
+
+	ap_bus_revise_bindings();
+
+	return count;
+}
+
+static BUS_ATTR_RW(aqmask);
+
+static struct bus_attribute *const ap_bus_attrs[] = {
+	&bus_attr_ap_domain,
+	&bus_attr_ap_control_domain_mask,
+	&bus_attr_ap_usage_domain_mask,
+	&bus_attr_config_time,
+	&bus_attr_poll_thread,
+	&bus_attr_ap_interrupts,
+	&bus_attr_poll_timeout,
+	&bus_attr_ap_max_domain_id,
+	&bus_attr_apmask,
+	&bus_attr_aqmask,
+	NULL,
+};
+
+/**
+ * ap_select_domain(): Select an AP domain.
+ *
+ * Pick one of the 16 AP domains.
+ */
+static int ap_select_domain(void)
+{
+	int count, max_count, best_domain;
+	struct ap_queue_status status;
+	int i, j;
+
+	/*
+	 * We want to use a single domain. Either the one specified with
+	 * the "domain=" parameter or the domain with the maximum number
+	 * of devices.
+	 */
+	spin_lock_bh(&ap_domain_lock);
+	if (ap_domain_index >= 0) {
+		/* Domain has already been selected. */
+		spin_unlock_bh(&ap_domain_lock);
+		return 0;
+	}
+	best_domain = -1;
+	max_count = 0;
+	for (i = 0; i < AP_DOMAINS; i++) {
+		if (!ap_test_config_domain(i) ||
+		    !test_bit_inv(i, ap_perms.aqm))
+			continue;
+		count = 0;
+		for (j = 0; j < AP_DEVICES; j++) {
+			if (!ap_test_config_card_id(j))
+				continue;
+			status = ap_test_queue(AP_MKQID(j, i),
+					       ap_apft_available(),
+					       NULL);
+			if (status.response_code != AP_RESPONSE_NORMAL)
+				continue;
+			count++;
+		}
+		if (count > max_count) {
+			max_count = count;
+			best_domain = i;
+		}
+	}
+	if (best_domain >= 0) {
+		ap_domain_index = best_domain;
+		AP_DBF(DBF_DEBUG, "new ap_domain_index=%d\n", ap_domain_index);
+		spin_unlock_bh(&ap_domain_lock);
+		return 0;
+	}
+	spin_unlock_bh(&ap_domain_lock);
+	return -ENODEV;
+}
+
+/*
+ * This function checks the type and returns either 0 for not
+ * supported or the highest compatible type value (which may
+ * include the input type value).
+ */
+static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
+{
+	int comp_type = 0;
+
+	/* < CEX2A is not supported */
+	if (rawtype < AP_DEVICE_TYPE_CEX2A)
+		return 0;
+	/* up to CEX6 known and fully supported */
+	if (rawtype <= AP_DEVICE_TYPE_CEX6)
+		return rawtype;
+	/*
+	 * unknown new type > CEX6, check for compatibility
+	 * to the highest known and supported type which is
+	 * currently CEX6 with the help of the QACT function.
+	 */
+	if (ap_qact_available()) {
+		struct ap_queue_status status;
+		union ap_qact_ap_info apinfo = {0};
+
+		apinfo.mode = (func >> 26) & 0x07;
+		apinfo.cat = AP_DEVICE_TYPE_CEX6;
+		status = ap_qact(qid, 0, &apinfo);
+		if (status.response_code == AP_RESPONSE_NORMAL
+		    && apinfo.cat >= AP_DEVICE_TYPE_CEX2A
+		    && apinfo.cat <= AP_DEVICE_TYPE_CEX6)
+			comp_type = apinfo.cat;
+	}
+	if (!comp_type)
+		AP_DBF(DBF_WARN, "queue=%02x.%04x unable to map type %d\n",
+		       AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
+	else if (comp_type != rawtype)
+		AP_DBF(DBF_INFO, "queue=%02x.%04x map type %d to %d\n",
+		       AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype, comp_type);
+	return comp_type;
+}
+
+/*
+ * helper function to be used with bus_find_dev
+ * matches for the card device with the given id
+ */
+static int __match_card_device_with_id(struct device *dev, void *data)
+{
+	return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long) data;
+}
+
+/* helper function to be used with bus_find_dev
+ * matches for the queue device with a given qid
+ */
+static int __match_queue_device_with_qid(struct device *dev, void *data)
+{
+	return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data;
+}
+
+/**
+ * ap_scan_bus(): Scan the AP bus for new devices
+ * Runs periodically, workqueue timer (ap_config_time)
+ */
+static void ap_scan_bus(struct work_struct *unused)
+{
+	struct ap_queue *aq;
+	struct ap_card *ac;
+	struct device *dev;
+	ap_qid_t qid;
+	int comp_type, depth = 0, type = 0;
+	unsigned int func = 0;
+	int rc, id, dom, borked, domains, defdomdevs = 0;
+
+	AP_DBF(DBF_DEBUG, "%s running\n", __func__);
+
+	ap_query_configuration(ap_configuration);
+	if (ap_select_domain() != 0)
+		goto out;
+
+	for (id = 0; id < AP_DEVICES; id++) {
+		/* check if device is registered */
+		dev = bus_find_device(&ap_bus_type, NULL,
+				      (void *)(long) id,
+				      __match_card_device_with_id);
+		ac = dev ? to_ap_card(dev) : NULL;
+		if (!ap_test_config_card_id(id)) {
+			if (dev) {
+				/* Card device has been removed from
+				 * configuration, remove the belonging
+				 * queue devices.
+				 */
+				bus_for_each_dev(&ap_bus_type, NULL,
+					(void *)(long) id,
+					__ap_queue_devices_with_id_unregister);
+				/* now remove the card device */
+				device_unregister(dev);
+				put_device(dev);
+			}
+			continue;
+		}
+		/* According to the configuration there should be a card
+		 * device, so check if there is at least one valid queue
+		 * and maybe create queue devices and the card device.
+		 */
+		domains = 0;
+		for (dom = 0; dom < AP_DOMAINS; dom++) {
+			qid = AP_MKQID(id, dom);
+			dev = bus_find_device(&ap_bus_type, NULL,
+					      (void *)(long) qid,
+					      __match_queue_device_with_qid);
+			aq = dev ? to_ap_queue(dev) : NULL;
+			if (!ap_test_config_domain(dom)) {
+				if (dev) {
+					/* Queue device exists but has been
+					 * removed from configuration.
+					 */
+					device_unregister(dev);
+					put_device(dev);
+				}
+				continue;
+			}
+			rc = ap_query_queue(qid, &depth, &type, &func);
+			if (dev) {
+				spin_lock_bh(&aq->lock);
+				if (rc == -ENODEV ||
+				    /* adapter reconfiguration */
+				    (ac && ac->functions != func))
+					aq->state = AP_STATE_BORKED;
+				borked = aq->state == AP_STATE_BORKED;
+				spin_unlock_bh(&aq->lock);
+				if (borked)	/* Remove broken device */
+					device_unregister(dev);
+				put_device(dev);
+				if (!borked) {
+					domains++;
+					if (dom == ap_domain_index)
+						defdomdevs++;
+					continue;
+				}
+			}
+			if (rc)
+				continue;
+			/* a new queue device is needed, check out comp type */
+			comp_type = ap_get_compatible_type(qid, type, func);
+			if (!comp_type)
+				continue;
+			/* maybe a card device needs to be created first */
+			if (!ac) {
+				ac = ap_card_create(id, depth, type,
+						    comp_type, func);
+				if (!ac)
+					continue;
+				ac->ap_dev.device.bus = &ap_bus_type;
+				ac->ap_dev.device.parent = ap_root_device;
+				dev_set_name(&ac->ap_dev.device,
+					     "card%02x", id);
+				/* Register card with AP bus */
+				rc = device_register(&ac->ap_dev.device);
+				if (rc) {
+					put_device(&ac->ap_dev.device);
+					ac = NULL;
+					break;
+				}
+				/* get it and thus adjust reference counter */
+				get_device(&ac->ap_dev.device);
+			}
+			/* now create the new queue device */
+			aq = ap_queue_create(qid, comp_type);
+			if (!aq)
+				continue;
+			aq->card = ac;
+			aq->ap_dev.device.bus = &ap_bus_type;
+			aq->ap_dev.device.parent = &ac->ap_dev.device;
+			dev_set_name(&aq->ap_dev.device,
+				     "%02x.%04x", id, dom);
+			/* Start with a device reset */
+			spin_lock_bh(&aq->lock);
+			ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+			spin_unlock_bh(&aq->lock);
+			/* Register device */
+			rc = device_register(&aq->ap_dev.device);
+			if (rc) {
+				put_device(&aq->ap_dev.device);
+				continue;
+			}
+			domains++;
+			if (dom == ap_domain_index)
+				defdomdevs++;
+		} /* end domain loop */
+		if (ac) {
+			/* remove card dev if there are no queue devices */
+			if (!domains)
+				device_unregister(&ac->ap_dev.device);
+			put_device(&ac->ap_dev.device);
+		}
+	} /* end device loop */
+
+	if (defdomdevs < 1)
+		AP_DBF(DBF_INFO,
+		       "no queue device with default domain %d available\n",
+		       ap_domain_index);
+
+out:
+	mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
+}
+
+static void ap_config_timeout(struct timer_list *unused)
+{
+	if (ap_suspend_flag)
+		return;
+	queue_work(system_long_wq, &ap_scan_work);
+}
+
+static int __init ap_debug_init(void)
+{
+	ap_dbf_info = debug_register("ap", 1, 1,
+				     DBF_MAX_SPRINTF_ARGS * sizeof(long));
+	debug_register_view(ap_dbf_info, &debug_sprintf_view);
+	debug_set_level(ap_dbf_info, DBF_ERR);
+
+	return 0;
+}
+
+static void __init ap_perms_init(void)
+{
+	/* all resources useable if no kernel parameter string given */
+	memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm));
+	memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm));
+
+	/* apm kernel parameter string */
+	if (apm_str) {
+		memset(&ap_perms.apm, 0, sizeof(ap_perms.apm));
+		process_mask_arg(apm_str, ap_perms.apm, AP_DEVICES,
+				 &ap_perms_mutex);
+	}
+
+	/* aqm kernel parameter string */
+	if (aqm_str) {
+		memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm));
+		process_mask_arg(aqm_str, ap_perms.aqm, AP_DOMAINS,
+				 &ap_perms_mutex);
+	}
+}
+
+/**
+ * ap_module_init(): The module initialization code.
+ *
+ * Initializes the module.
+ */
+static int __init ap_module_init(void)
+{
+	int max_domain_id;
+	int rc, i;
+
+	rc = ap_debug_init();
+	if (rc)
+		return rc;
+
+	if (!ap_instructions_available()) {
+		pr_warn("The hardware system does not support AP instructions\n");
+		return -ENODEV;
+	}
+
+	/* set up the AP permissions (ap and aq masks) */
+	ap_perms_init();
+
+	/* Get AP configuration data if available */
+	ap_init_configuration();
+
+	if (ap_configuration)
+		max_domain_id =
+			ap_max_domain_id ? ap_max_domain_id : AP_DOMAINS - 1;
+	else
+		max_domain_id = 15;
+	if (ap_domain_index < -1 || ap_domain_index > max_domain_id ||
+	    (ap_domain_index >= 0 &&
+	     !test_bit_inv(ap_domain_index, ap_perms.aqm))) {
+		pr_warn("%d is not a valid cryptographic domain\n",
+			ap_domain_index);
+		ap_domain_index = -1;
+	}
+	/* In resume callback we need to know if the user had set the domain.
+	 * If so, we can not just reset it.
+	 */
+	if (ap_domain_index >= 0)
+		user_set_domain = 1;
+
+	if (ap_interrupts_available()) {
+		rc = register_adapter_interrupt(&ap_airq);
+		ap_airq_flag = (rc == 0);
+	}
+
+	/* Create /sys/bus/ap. */
+	rc = bus_register(&ap_bus_type);
+	if (rc)
+		goto out;
+	for (i = 0; ap_bus_attrs[i]; i++) {
+		rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
+		if (rc)
+			goto out_bus;
+	}
+
+	/* Create /sys/devices/ap. */
+	ap_root_device = root_device_register("ap");
+	rc = PTR_ERR_OR_ZERO(ap_root_device);
+	if (rc)
+		goto out_bus;
+
+	/* Setup the AP bus rescan timer. */
+	timer_setup(&ap_config_timer, ap_config_timeout, 0);
+
+	/*
+	 * Setup the high resultion poll timer.
+	 * If we are running under z/VM adjust polling to z/VM polling rate.
+	 */
+	if (MACHINE_IS_VM)
+		poll_timeout = 1500000;
+	spin_lock_init(&ap_poll_timer_lock);
+	hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+	ap_poll_timer.function = ap_poll_timeout;
+
+	/* Start the low priority AP bus poll thread. */
+	if (ap_thread_flag) {
+		rc = ap_poll_thread_start();
+		if (rc)
+			goto out_work;
+	}
+
+	rc = register_pm_notifier(&ap_power_notifier);
+	if (rc)
+		goto out_pm;
+
+	queue_work(system_long_wq, &ap_scan_work);
+	initialised = true;
+
+	return 0;
+
+out_pm:
+	ap_poll_thread_stop();
+out_work:
+	hrtimer_cancel(&ap_poll_timer);
+	root_device_unregister(ap_root_device);
+out_bus:
+	while (i--)
+		bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
+	bus_unregister(&ap_bus_type);
+out:
+	if (ap_using_interrupts())
+		unregister_adapter_interrupt(&ap_airq);
+	kfree(ap_configuration);
+	return rc;
+}
+device_initcall(ap_module_init);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
new file mode 100644
index 0000000..5246cd8
--- /dev/null
+++ b/drivers/s390/crypto/ap_bus.h
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2006, 2012
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *	      Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *	      Felix Beck <felix.beck@de.ibm.com>
+ *	      Holger Dengler <hd@linux.vnet.ibm.com>
+ *
+ * Adjunct processor bus header file.
+ */
+
+#ifndef _AP_BUS_H_
+#define _AP_BUS_H_
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <asm/isc.h>
+#include <asm/ap.h>
+
+#define AP_DEVICES 256		/* Number of AP devices. */
+#define AP_DOMAINS 256		/* Number of AP domains. */
+#define AP_RESET_TIMEOUT (HZ*0.7)	/* Time in ticks for reset timeouts. */
+#define AP_CONFIG_TIME 30	/* Time in seconds between AP bus rescans. */
+#define AP_POLL_TIME 1		/* Time in ticks between receive polls. */
+
+extern int ap_domain_index;
+
+extern spinlock_t ap_list_lock;
+extern struct list_head ap_card_list;
+
+static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
+{
+	return (*ptr & (0x80000000u >> nr)) != 0;
+}
+
+#define AP_RESPONSE_NORMAL		0x00
+#define AP_RESPONSE_Q_NOT_AVAIL		0x01
+#define AP_RESPONSE_RESET_IN_PROGRESS	0x02
+#define AP_RESPONSE_DECONFIGURED	0x03
+#define AP_RESPONSE_CHECKSTOPPED	0x04
+#define AP_RESPONSE_BUSY		0x05
+#define AP_RESPONSE_INVALID_ADDRESS	0x06
+#define AP_RESPONSE_OTHERWISE_CHANGED	0x07
+#define AP_RESPONSE_Q_FULL		0x10
+#define AP_RESPONSE_NO_PENDING_REPLY	0x10
+#define AP_RESPONSE_INDEX_TOO_BIG	0x11
+#define AP_RESPONSE_NO_FIRST_PART	0x13
+#define AP_RESPONSE_MESSAGE_TOO_BIG	0x15
+#define AP_RESPONSE_REQ_FAC_NOT_INST	0x16
+
+/*
+ * Known device types
+ */
+#define AP_DEVICE_TYPE_PCICC	3
+#define AP_DEVICE_TYPE_PCICA	4
+#define AP_DEVICE_TYPE_PCIXCC	5
+#define AP_DEVICE_TYPE_CEX2A	6
+#define AP_DEVICE_TYPE_CEX2C	7
+#define AP_DEVICE_TYPE_CEX3A	8
+#define AP_DEVICE_TYPE_CEX3C	9
+#define AP_DEVICE_TYPE_CEX4	10
+#define AP_DEVICE_TYPE_CEX5	11
+#define AP_DEVICE_TYPE_CEX6	12
+
+/*
+ * Known function facilities
+ */
+#define AP_FUNC_MEX4K 1
+#define AP_FUNC_CRT4K 2
+#define AP_FUNC_COPRO 3
+#define AP_FUNC_ACCEL 4
+#define AP_FUNC_EP11  5
+#define AP_FUNC_APXA  6
+
+/*
+ * AP interrupt states
+ */
+#define AP_INTR_DISABLED	0	/* AP interrupt disabled */
+#define AP_INTR_ENABLED		1	/* AP interrupt enabled */
+
+/*
+ * AP device states
+ */
+enum ap_state {
+	AP_STATE_RESET_START,
+	AP_STATE_RESET_WAIT,
+	AP_STATE_SETIRQ_WAIT,
+	AP_STATE_IDLE,
+	AP_STATE_WORKING,
+	AP_STATE_QUEUE_FULL,
+	AP_STATE_SUSPEND_WAIT,
+	AP_STATE_BORKED,
+	NR_AP_STATES
+};
+
+/*
+ * AP device events
+ */
+enum ap_event {
+	AP_EVENT_POLL,
+	AP_EVENT_TIMEOUT,
+	NR_AP_EVENTS
+};
+
+/*
+ * AP wait behaviour
+ */
+enum ap_wait {
+	AP_WAIT_AGAIN,		/* retry immediately */
+	AP_WAIT_TIMEOUT,	/* wait for timeout */
+	AP_WAIT_INTERRUPT,	/* wait for thin interrupt (if available) */
+	AP_WAIT_NONE,		/* no wait */
+	NR_AP_WAIT
+};
+
+struct ap_device;
+struct ap_message;
+
+/*
+ * The ap driver struct includes a flags field which holds some info for
+ * the ap bus about the driver. Currently only one flag is supported and
+ * used: The DEFAULT flag marks an ap driver as a default driver which is
+ * used together with the apmask and aqmask whitelisting of the ap bus.
+ */
+#define AP_DRIVER_FLAG_DEFAULT 0x0001
+
+struct ap_driver {
+	struct device_driver driver;
+	struct ap_device_id *ids;
+	unsigned int flags;
+
+	int (*probe)(struct ap_device *);
+	void (*remove)(struct ap_device *);
+	void (*suspend)(struct ap_device *);
+	void (*resume)(struct ap_device *);
+};
+
+#define to_ap_drv(x) container_of((x), struct ap_driver, driver)
+
+int ap_driver_register(struct ap_driver *, struct module *, char *);
+void ap_driver_unregister(struct ap_driver *);
+
+struct ap_device {
+	struct device device;
+	struct ap_driver *drv;		/* Pointer to AP device driver. */
+	int device_type;		/* AP device type. */
+};
+
+#define to_ap_dev(x) container_of((x), struct ap_device, device)
+
+struct ap_card {
+	struct ap_device ap_dev;
+	struct list_head list;		/* Private list of AP cards. */
+	struct list_head queues;	/* List of assoc. AP queues */
+	void *private;			/* ap driver private pointer. */
+	int raw_hwtype;			/* AP raw hardware type. */
+	unsigned int functions;		/* AP device function bitfield. */
+	int queue_depth;		/* AP queue depth.*/
+	int id;				/* AP card number. */
+	atomic_t total_request_count;	/* # requests ever for this AP device.*/
+};
+
+#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
+
+struct ap_queue {
+	struct ap_device ap_dev;
+	struct list_head list;		/* Private list of AP queues. */
+	struct ap_card *card;		/* Ptr to assoc. AP card. */
+	spinlock_t lock;		/* Per device lock. */
+	void *private;			/* ap driver private pointer. */
+	ap_qid_t qid;			/* AP queue id. */
+	int interrupt;			/* indicate if interrupts are enabled */
+	int queue_count;		/* # messages currently on AP queue. */
+	enum ap_state state;		/* State of the AP device. */
+	int pendingq_count;		/* # requests on pendingq list. */
+	int requestq_count;		/* # requests on requestq list. */
+	int total_request_count;	/* # requests ever for this AP device.*/
+	int request_timeout;		/* Request timeout in jiffies. */
+	struct timer_list timeout;	/* Timer for request timeouts. */
+	struct list_head pendingq;	/* List of message sent to AP queue. */
+	struct list_head requestq;	/* List of message yet to be sent. */
+	struct ap_message *reply;	/* Per device reply message. */
+};
+
+#define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device)
+
+typedef enum ap_wait (ap_func_t)(struct ap_queue *queue);
+
+struct ap_message {
+	struct list_head list;		/* Request queueing. */
+	unsigned long long psmid;	/* Message id. */
+	void *message;			/* Pointer to message buffer. */
+	size_t length;			/* Message length. */
+	int rc;				/* Return code for this message */
+
+	void *private;			/* ap driver private pointer. */
+	unsigned int special:1;		/* Used for special commands. */
+	/* receive is called from tasklet context */
+	void (*receive)(struct ap_queue *, struct ap_message *,
+			struct ap_message *);
+};
+
+/**
+ * ap_init_message() - Initialize ap_message.
+ * Initialize a message before using. Otherwise this might result in
+ * unexpected behaviour.
+ */
+static inline void ap_init_message(struct ap_message *ap_msg)
+{
+	memset(ap_msg, 0, sizeof(*ap_msg));
+}
+
+/**
+ * ap_release_message() - Release ap_message.
+ * Releases all memory used internal within the ap_message struct
+ * Currently this is the message and private field.
+ */
+static inline void ap_release_message(struct ap_message *ap_msg)
+{
+	kzfree(ap_msg->message);
+	kzfree(ap_msg->private);
+}
+
+#define for_each_ap_card(_ac) \
+	list_for_each_entry(_ac, &ap_card_list, list)
+
+#define for_each_ap_queue(_aq, _ac) \
+	list_for_each_entry(_aq, &(_ac)->queues, list)
+
+/*
+ * Note: don't use ap_send/ap_recv after using ap_queue_message
+ * for the first time. Otherwise the ap message queue will get
+ * confused.
+ */
+int ap_send(ap_qid_t, unsigned long long, void *, size_t);
+int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
+
+enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event);
+enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event);
+
+void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
+void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
+void ap_flush_queue(struct ap_queue *aq);
+
+void *ap_airq_ptr(void);
+void ap_wait(enum ap_wait wait);
+void ap_request_timeout(struct timer_list *t);
+void ap_bus_force_rescan(void);
+
+void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
+struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+void ap_queue_remove(struct ap_queue *aq);
+void ap_queue_suspend(struct ap_device *ap_dev);
+void ap_queue_resume(struct ap_device *ap_dev);
+
+struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
+			       int comp_device_type, unsigned int functions);
+
+/*
+ * check APQN for owned/reserved by ap bus and default driver(s).
+ * Checks if this APQN is or will be in use by the ap bus
+ * and the default set of drivers.
+ * If yes, returns 1, if not returns 0. On error a negative
+ * errno value is returned.
+ */
+int ap_owned_by_def_drv(int card, int queue);
+
+/*
+ * check 'matrix' of APQNs for owned/reserved by ap bus and
+ * default driver(s).
+ * Checks if there is at least one APQN in the given 'matrix'
+ * marked as owned/reserved by the ap bus and default driver(s).
+ * If such an APQN is found the return value is 1, otherwise
+ * 0 is returned. On error a negative errno value is returned.
+ * The parameter apm is a bitmask which should be declared
+ * as DECLARE_BITMAP(apm, AP_DEVICES), the aqm parameter is
+ * similar, should be declared as DECLARE_BITMAP(aqm, AP_DOMAINS).
+ */
+int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
+				       unsigned long *aqm);
+
+#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
new file mode 100644
index 0000000..63b4cc6
--- /dev/null
+++ b/drivers/s390/crypto/ap_card.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Adjunct processor bus, card related code.
+ */
+
+#define KMSG_COMPONENT "ap"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/facility.h>
+
+#include "ap_bus.h"
+
+/*
+ * AP card related attributes.
+ */
+static ssize_t hwtype_show(struct device *dev,
+			   struct device_attribute *attr, char *buf)
+{
+	struct ap_card *ac = to_ap_card(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
+}
+
+static DEVICE_ATTR_RO(hwtype);
+
+static ssize_t raw_hwtype_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct ap_card *ac = to_ap_card(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
+}
+
+static DEVICE_ATTR_RO(raw_hwtype);
+
+static ssize_t depth_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct ap_card *ac = to_ap_card(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
+}
+
+static DEVICE_ATTR_RO(depth);
+
+static ssize_t ap_functions_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct ap_card *ac = to_ap_card(dev);
+
+	return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
+}
+
+static DEVICE_ATTR_RO(ap_functions);
+
+static ssize_t request_count_show(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	struct ap_card *ac = to_ap_card(dev);
+	unsigned int req_cnt;
+
+	req_cnt = 0;
+	spin_lock_bh(&ap_list_lock);
+	req_cnt = atomic_read(&ac->total_request_count);
+	spin_unlock_bh(&ap_list_lock);
+	return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
+}
+
+static ssize_t request_count_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	struct ap_card *ac = to_ap_card(dev);
+	struct ap_queue *aq;
+
+	spin_lock_bh(&ap_list_lock);
+	for_each_ap_queue(aq, ac)
+		aq->total_request_count = 0;
+	spin_unlock_bh(&ap_list_lock);
+	atomic_set(&ac->total_request_count, 0);
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(request_count);
+
+static ssize_t requestq_count_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct ap_card *ac = to_ap_card(dev);
+	struct ap_queue *aq;
+	unsigned int reqq_cnt;
+
+	reqq_cnt = 0;
+	spin_lock_bh(&ap_list_lock);
+	for_each_ap_queue(aq, ac)
+		reqq_cnt += aq->requestq_count;
+	spin_unlock_bh(&ap_list_lock);
+	return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+}
+
+static DEVICE_ATTR_RO(requestq_count);
+
+static ssize_t pendingq_count_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct ap_card *ac = to_ap_card(dev);
+	struct ap_queue *aq;
+	unsigned int penq_cnt;
+
+	penq_cnt = 0;
+	spin_lock_bh(&ap_list_lock);
+	for_each_ap_queue(aq, ac)
+		penq_cnt += aq->pendingq_count;
+	spin_unlock_bh(&ap_list_lock);
+	return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+}
+
+static DEVICE_ATTR_RO(pendingq_count);
+
+static ssize_t modalias_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
+}
+
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *ap_card_dev_attrs[] = {
+	&dev_attr_hwtype.attr,
+	&dev_attr_raw_hwtype.attr,
+	&dev_attr_depth.attr,
+	&dev_attr_ap_functions.attr,
+	&dev_attr_request_count.attr,
+	&dev_attr_requestq_count.attr,
+	&dev_attr_pendingq_count.attr,
+	&dev_attr_modalias.attr,
+	NULL
+};
+
+static struct attribute_group ap_card_dev_attr_group = {
+	.attrs = ap_card_dev_attrs
+};
+
+static const struct attribute_group *ap_card_dev_attr_groups[] = {
+	&ap_card_dev_attr_group,
+	NULL
+};
+
+static struct device_type ap_card_type = {
+	.name = "ap_card",
+	.groups = ap_card_dev_attr_groups,
+};
+
+static void ap_card_device_release(struct device *dev)
+{
+	struct ap_card *ac = to_ap_card(dev);
+
+	if (!list_empty(&ac->list)) {
+		spin_lock_bh(&ap_list_lock);
+		list_del_init(&ac->list);
+		spin_unlock_bh(&ap_list_lock);
+	}
+	kfree(ac);
+}
+
+struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
+			       int comp_type, unsigned int functions)
+{
+	struct ap_card *ac;
+
+	ac = kzalloc(sizeof(*ac), GFP_KERNEL);
+	if (!ac)
+		return NULL;
+	INIT_LIST_HEAD(&ac->list);
+	INIT_LIST_HEAD(&ac->queues);
+	ac->ap_dev.device.release = ap_card_device_release;
+	ac->ap_dev.device.type = &ap_card_type;
+	ac->ap_dev.device_type = comp_type;
+	ac->raw_hwtype = raw_type;
+	ac->queue_depth = queue_depth;
+	ac->functions = functions;
+	ac->id = id;
+	return ac;
+}
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
new file mode 100644
index 0000000..dc675eb
--- /dev/null
+++ b/drivers/s390/crypto/ap_debug.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright IBM Corp. 2016
+ *  Author(s): Harald Freudenberger <freude@de.ibm.com>
+ */
+#ifndef AP_DEBUG_H
+#define AP_DEBUG_H
+
+#include <asm/debug.h>
+
+#define DBF_ERR		3	/* error conditions   */
+#define DBF_WARN	4	/* warning conditions */
+#define DBF_INFO	5	/* informational      */
+#define DBF_DEBUG	6	/* for debugging only */
+
+#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
+#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
+
+#define DBF_MAX_SPRINTF_ARGS 5
+
+#define AP_DBF(...)					\
+	debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
+
+extern debug_info_t *ap_dbf_info;
+
+#endif /* AP_DEBUG_H */
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
new file mode 100644
index 0000000..66f7334
--- /dev/null
+++ b/drivers/s390/crypto/ap_queue.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Adjunct processor bus, queue related code.
+ */
+
+#define KMSG_COMPONENT "ap"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/facility.h>
+
+#include "ap_bus.h"
+
+/**
+ * ap_queue_enable_interruption(): Enable interruption on an AP queue.
+ * @qid: The AP queue number
+ * @ind: the notification indicator byte
+ *
+ * Enables interruption on AP queue via ap_aqic(). Based on the return
+ * value it waits a while and tests the AP queue if interrupts
+ * have been switched on using ap_test_queue().
+ */
+static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
+{
+	struct ap_queue_status status;
+	struct ap_qirq_ctrl qirqctrl = { 0 };
+
+	qirqctrl.ir = 1;
+	qirqctrl.isc = AP_ISC;
+	status = ap_aqic(aq->qid, qirqctrl, ind);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+	case AP_RESPONSE_OTHERWISE_CHANGED:
+		return 0;
+	case AP_RESPONSE_Q_NOT_AVAIL:
+	case AP_RESPONSE_DECONFIGURED:
+	case AP_RESPONSE_CHECKSTOPPED:
+	case AP_RESPONSE_INVALID_ADDRESS:
+		pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
+		       AP_QID_CARD(aq->qid),
+		       AP_QID_QUEUE(aq->qid));
+		return -EOPNOTSUPP;
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+	case AP_RESPONSE_BUSY:
+	default:
+		return -EBUSY;
+	}
+}
+
+/**
+ * __ap_send(): Send message to adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: The program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ * @special: Special Bit
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on NQAP can't happen because the L bit is 1.
+ * Condition code 2 on NQAP also means the send is incomplete,
+ * because a segment boundary was reached. The NQAP is repeated.
+ */
+static inline struct ap_queue_status
+__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
+	  unsigned int special)
+{
+	if (special == 1)
+		qid |= 0x400000UL;
+	return ap_nqap(qid, psmid, msg, length);
+}
+
+int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
+{
+	struct ap_queue_status status;
+
+	status = __ap_send(qid, psmid, msg, length, 0);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		return 0;
+	case AP_RESPONSE_Q_FULL:
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+		return -EBUSY;
+	case AP_RESPONSE_REQ_FAC_NOT_INST:
+		return -EINVAL;
+	default:	/* Device is gone. */
+		return -ENODEV;
+	}
+}
+EXPORT_SYMBOL(ap_send);
+
+int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
+{
+	struct ap_queue_status status;
+
+	if (msg == NULL)
+		return -EINVAL;
+	status = ap_dqap(qid, psmid, msg, length);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		return 0;
+	case AP_RESPONSE_NO_PENDING_REPLY:
+		if (status.queue_empty)
+			return -ENOENT;
+		return -EBUSY;
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+		return -EBUSY;
+	default:
+		return -ENODEV;
+	}
+}
+EXPORT_SYMBOL(ap_recv);
+
+/* State machine definitions and helpers */
+
+static enum ap_wait ap_sm_nop(struct ap_queue *aq)
+{
+	return AP_WAIT_NONE;
+}
+
+/**
+ * ap_sm_recv(): Receive pending reply messages from an AP queue but do
+ *	not change the state of the device.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ */
+static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
+{
+	struct ap_queue_status status;
+	struct ap_message *ap_msg;
+
+	status = ap_dqap(aq->qid, &aq->reply->psmid,
+			 aq->reply->message, aq->reply->length);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		aq->queue_count--;
+		if (aq->queue_count > 0)
+			mod_timer(&aq->timeout,
+				  jiffies + aq->request_timeout);
+		list_for_each_entry(ap_msg, &aq->pendingq, list) {
+			if (ap_msg->psmid != aq->reply->psmid)
+				continue;
+			list_del_init(&ap_msg->list);
+			aq->pendingq_count--;
+			ap_msg->receive(aq, ap_msg, aq->reply);
+			break;
+		}
+	case AP_RESPONSE_NO_PENDING_REPLY:
+		if (!status.queue_empty || aq->queue_count <= 0)
+			break;
+		/* The card shouldn't forget requests but who knows. */
+		aq->queue_count = 0;
+		list_splice_init(&aq->pendingq, &aq->requestq);
+		aq->requestq_count += aq->pendingq_count;
+		aq->pendingq_count = 0;
+		break;
+	default:
+		break;
+	}
+	return status;
+}
+
+/**
+ * ap_sm_read(): Receive pending reply messages from an AP queue.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ */
+static enum ap_wait ap_sm_read(struct ap_queue *aq)
+{
+	struct ap_queue_status status;
+
+	if (!aq->reply)
+		return AP_WAIT_NONE;
+	status = ap_sm_recv(aq);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		if (aq->queue_count > 0) {
+			aq->state = AP_STATE_WORKING;
+			return AP_WAIT_AGAIN;
+		}
+		aq->state = AP_STATE_IDLE;
+		return AP_WAIT_NONE;
+	case AP_RESPONSE_NO_PENDING_REPLY:
+		if (aq->queue_count > 0)
+			return AP_WAIT_INTERRUPT;
+		aq->state = AP_STATE_IDLE;
+		return AP_WAIT_NONE;
+	default:
+		aq->state = AP_STATE_BORKED;
+		return AP_WAIT_NONE;
+	}
+}
+
+/**
+ * ap_sm_suspend_read(): Receive pending reply messages from an AP queue
+ * without changing the device state in between. In suspend mode we don't
+ * allow sending new requests, therefore just fetch pending replies.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE or AP_WAIT_AGAIN
+ */
+static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq)
+{
+	struct ap_queue_status status;
+
+	if (!aq->reply)
+		return AP_WAIT_NONE;
+	status = ap_sm_recv(aq);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		if (aq->queue_count > 0)
+			return AP_WAIT_AGAIN;
+		/* fall through */
+	default:
+		return AP_WAIT_NONE;
+	}
+}
+
+/**
+ * ap_sm_write(): Send messages from the request queue to an AP queue.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ */
+static enum ap_wait ap_sm_write(struct ap_queue *aq)
+{
+	struct ap_queue_status status;
+	struct ap_message *ap_msg;
+
+	if (aq->requestq_count <= 0)
+		return AP_WAIT_NONE;
+	/* Start the next request on the queue. */
+	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
+	status = __ap_send(aq->qid, ap_msg->psmid,
+			   ap_msg->message, ap_msg->length, ap_msg->special);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		aq->queue_count++;
+		if (aq->queue_count == 1)
+			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
+		list_move_tail(&ap_msg->list, &aq->pendingq);
+		aq->requestq_count--;
+		aq->pendingq_count++;
+		if (aq->queue_count < aq->card->queue_depth) {
+			aq->state = AP_STATE_WORKING;
+			return AP_WAIT_AGAIN;
+		}
+		/* fall through */
+	case AP_RESPONSE_Q_FULL:
+		aq->state = AP_STATE_QUEUE_FULL;
+		return AP_WAIT_INTERRUPT;
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+		aq->state = AP_STATE_RESET_WAIT;
+		return AP_WAIT_TIMEOUT;
+	case AP_RESPONSE_MESSAGE_TOO_BIG:
+	case AP_RESPONSE_REQ_FAC_NOT_INST:
+		list_del_init(&ap_msg->list);
+		aq->requestq_count--;
+		ap_msg->rc = -EINVAL;
+		ap_msg->receive(aq, ap_msg, NULL);
+		return AP_WAIT_AGAIN;
+	default:
+		aq->state = AP_STATE_BORKED;
+		return AP_WAIT_NONE;
+	}
+}
+
+/**
+ * ap_sm_read_write(): Send and receive messages to/from an AP queue.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ */
+static enum ap_wait ap_sm_read_write(struct ap_queue *aq)
+{
+	return min(ap_sm_read(aq), ap_sm_write(aq));
+}
+
+/**
+ * ap_sm_reset(): Reset an AP queue.
+ * @qid: The AP queue number
+ *
+ * Submit the Reset command to an AP queue.
+ */
+static enum ap_wait ap_sm_reset(struct ap_queue *aq)
+{
+	struct ap_queue_status status;
+
+	status = ap_rapq(aq->qid);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+		aq->state = AP_STATE_RESET_WAIT;
+		aq->interrupt = AP_INTR_DISABLED;
+		return AP_WAIT_TIMEOUT;
+	case AP_RESPONSE_BUSY:
+		return AP_WAIT_TIMEOUT;
+	case AP_RESPONSE_Q_NOT_AVAIL:
+	case AP_RESPONSE_DECONFIGURED:
+	case AP_RESPONSE_CHECKSTOPPED:
+	default:
+		aq->state = AP_STATE_BORKED;
+		return AP_WAIT_NONE;
+	}
+}
+
+/**
+ * ap_sm_reset_wait(): Test queue for completion of the reset operation
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
+ */
+static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq)
+{
+	struct ap_queue_status status;
+	void *lsi_ptr;
+
+	if (aq->queue_count > 0 && aq->reply)
+		/* Try to read a completed message and get the status */
+		status = ap_sm_recv(aq);
+	else
+		/* Get the status with TAPQ */
+		status = ap_tapq(aq->qid, NULL);
+
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		lsi_ptr = ap_airq_ptr();
+		if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
+			aq->state = AP_STATE_SETIRQ_WAIT;
+		else
+			aq->state = (aq->queue_count > 0) ?
+				AP_STATE_WORKING : AP_STATE_IDLE;
+		return AP_WAIT_AGAIN;
+	case AP_RESPONSE_BUSY:
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+		return AP_WAIT_TIMEOUT;
+	case AP_RESPONSE_Q_NOT_AVAIL:
+	case AP_RESPONSE_DECONFIGURED:
+	case AP_RESPONSE_CHECKSTOPPED:
+	default:
+		aq->state = AP_STATE_BORKED;
+		return AP_WAIT_NONE;
+	}
+}
+
+/**
+ * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
+ */
+static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq)
+{
+	struct ap_queue_status status;
+
+	if (aq->queue_count > 0 && aq->reply)
+		/* Try to read a completed message and get the status */
+		status = ap_sm_recv(aq);
+	else
+		/* Get the status with TAPQ */
+		status = ap_tapq(aq->qid, NULL);
+
+	if (status.irq_enabled == 1) {
+		/* Irqs are now enabled */
+		aq->interrupt = AP_INTR_ENABLED;
+		aq->state = (aq->queue_count > 0) ?
+			AP_STATE_WORKING : AP_STATE_IDLE;
+	}
+
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		if (aq->queue_count > 0)
+			return AP_WAIT_AGAIN;
+		/* fallthrough */
+	case AP_RESPONSE_NO_PENDING_REPLY:
+		return AP_WAIT_TIMEOUT;
+	default:
+		aq->state = AP_STATE_BORKED;
+		return AP_WAIT_NONE;
+	}
+}
+
+/*
+ * AP state machine jump table
+ */
+static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
+	[AP_STATE_RESET_START] = {
+		[AP_EVENT_POLL] = ap_sm_reset,
+		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	},
+	[AP_STATE_RESET_WAIT] = {
+		[AP_EVENT_POLL] = ap_sm_reset_wait,
+		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	},
+	[AP_STATE_SETIRQ_WAIT] = {
+		[AP_EVENT_POLL] = ap_sm_setirq_wait,
+		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	},
+	[AP_STATE_IDLE] = {
+		[AP_EVENT_POLL] = ap_sm_write,
+		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	},
+	[AP_STATE_WORKING] = {
+		[AP_EVENT_POLL] = ap_sm_read_write,
+		[AP_EVENT_TIMEOUT] = ap_sm_reset,
+	},
+	[AP_STATE_QUEUE_FULL] = {
+		[AP_EVENT_POLL] = ap_sm_read,
+		[AP_EVENT_TIMEOUT] = ap_sm_reset,
+	},
+	[AP_STATE_SUSPEND_WAIT] = {
+		[AP_EVENT_POLL] = ap_sm_suspend_read,
+		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	},
+	[AP_STATE_BORKED] = {
+		[AP_EVENT_POLL] = ap_sm_nop,
+		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	},
+};
+
+enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event)
+{
+	return ap_jumptable[aq->state][event](aq);
+}
+
+enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event)
+{
+	enum ap_wait wait;
+
+	while ((wait = ap_sm_event(aq, event)) == AP_WAIT_AGAIN)
+		;
+	return wait;
+}
+
+/*
+ * Power management for queue devices
+ */
+void ap_queue_suspend(struct ap_device *ap_dev)
+{
+	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+
+	/* Poll on the device until all requests are finished. */
+	spin_lock_bh(&aq->lock);
+	aq->state = AP_STATE_SUSPEND_WAIT;
+	while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE)
+		;
+	aq->state = AP_STATE_BORKED;
+	spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_suspend);
+
+void ap_queue_resume(struct ap_device *ap_dev)
+{
+}
+EXPORT_SYMBOL(ap_queue_resume);
+
+/*
+ * AP queue related attributes.
+ */
+static ssize_t request_count_show(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+	unsigned int req_cnt;
+
+	spin_lock_bh(&aq->lock);
+	req_cnt = aq->total_request_count;
+	spin_unlock_bh(&aq->lock);
+	return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
+}
+
+static ssize_t request_count_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+
+	spin_lock_bh(&aq->lock);
+	aq->total_request_count = 0;
+	spin_unlock_bh(&aq->lock);
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(request_count);
+
+static ssize_t requestq_count_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+	unsigned int reqq_cnt = 0;
+
+	spin_lock_bh(&aq->lock);
+	reqq_cnt = aq->requestq_count;
+	spin_unlock_bh(&aq->lock);
+	return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+}
+
+static DEVICE_ATTR_RO(requestq_count);
+
+static ssize_t pendingq_count_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+	unsigned int penq_cnt = 0;
+
+	spin_lock_bh(&aq->lock);
+	penq_cnt = aq->pendingq_count;
+	spin_unlock_bh(&aq->lock);
+	return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+}
+
+static DEVICE_ATTR_RO(pendingq_count);
+
+static ssize_t reset_show(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+	int rc = 0;
+
+	spin_lock_bh(&aq->lock);
+	switch (aq->state) {
+	case AP_STATE_RESET_START:
+	case AP_STATE_RESET_WAIT:
+		rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
+		break;
+	case AP_STATE_WORKING:
+	case AP_STATE_QUEUE_FULL:
+		rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
+		break;
+	default:
+		rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
+	}
+	spin_unlock_bh(&aq->lock);
+	return rc;
+}
+
+static DEVICE_ATTR_RO(reset);
+
+static ssize_t interrupt_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+	int rc = 0;
+
+	spin_lock_bh(&aq->lock);
+	if (aq->state == AP_STATE_SETIRQ_WAIT)
+		rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
+	else if (aq->interrupt == AP_INTR_ENABLED)
+		rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
+	else
+		rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
+	spin_unlock_bh(&aq->lock);
+	return rc;
+}
+
+static DEVICE_ATTR_RO(interrupt);
+
+static struct attribute *ap_queue_dev_attrs[] = {
+	&dev_attr_request_count.attr,
+	&dev_attr_requestq_count.attr,
+	&dev_attr_pendingq_count.attr,
+	&dev_attr_reset.attr,
+	&dev_attr_interrupt.attr,
+	NULL
+};
+
+static struct attribute_group ap_queue_dev_attr_group = {
+	.attrs = ap_queue_dev_attrs
+};
+
+static const struct attribute_group *ap_queue_dev_attr_groups[] = {
+	&ap_queue_dev_attr_group,
+	NULL
+};
+
+static struct device_type ap_queue_type = {
+	.name = "ap_queue",
+	.groups = ap_queue_dev_attr_groups,
+};
+
+static void ap_queue_device_release(struct device *dev)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+
+	if (!list_empty(&aq->list)) {
+		spin_lock_bh(&ap_list_lock);
+		list_del_init(&aq->list);
+		spin_unlock_bh(&ap_list_lock);
+	}
+	kfree(aq);
+}
+
+struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
+{
+	struct ap_queue *aq;
+
+	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
+	if (!aq)
+		return NULL;
+	aq->ap_dev.device.release = ap_queue_device_release;
+	aq->ap_dev.device.type = &ap_queue_type;
+	aq->ap_dev.device_type = device_type;
+	aq->qid = qid;
+	aq->state = AP_STATE_RESET_START;
+	aq->interrupt = AP_INTR_DISABLED;
+	spin_lock_init(&aq->lock);
+	INIT_LIST_HEAD(&aq->list);
+	INIT_LIST_HEAD(&aq->pendingq);
+	INIT_LIST_HEAD(&aq->requestq);
+	timer_setup(&aq->timeout, ap_request_timeout, 0);
+
+	return aq;
+}
+
+void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
+{
+	aq->reply = reply;
+
+	spin_lock_bh(&aq->lock);
+	ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+	spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_init_reply);
+
+/**
+ * ap_queue_message(): Queue a request to an AP device.
+ * @aq: The AP device to queue the message to
+ * @ap_msg: The message that is to be added
+ */
+void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
+{
+	/* For asynchronous message handling a valid receive-callback
+	 * is required.
+	 */
+	BUG_ON(!ap_msg->receive);
+
+	spin_lock_bh(&aq->lock);
+	/* Queue the message. */
+	list_add_tail(&ap_msg->list, &aq->requestq);
+	aq->requestq_count++;
+	aq->total_request_count++;
+	atomic_inc(&aq->card->total_request_count);
+	/* Send/receive as many request from the queue as possible. */
+	ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
+	spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_message);
+
+/**
+ * ap_cancel_message(): Cancel a crypto request.
+ * @aq: The AP device that has the message queued
+ * @ap_msg: The message that is to be removed
+ *
+ * Cancel a crypto request. This is done by removing the request
+ * from the device pending or request queue. Note that the
+ * request stays on the AP queue. When it finishes the message
+ * reply will be discarded because the psmid can't be found.
+ */
+void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
+{
+	struct ap_message *tmp;
+
+	spin_lock_bh(&aq->lock);
+	if (!list_empty(&ap_msg->list)) {
+		list_for_each_entry(tmp, &aq->pendingq, list)
+			if (tmp->psmid == ap_msg->psmid) {
+				aq->pendingq_count--;
+				goto found;
+			}
+		aq->requestq_count--;
+found:
+		list_del_init(&ap_msg->list);
+	}
+	spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_cancel_message);
+
+/**
+ * __ap_flush_queue(): Flush requests.
+ * @aq: Pointer to the AP queue
+ *
+ * Flush all requests from the request/pending queue of an AP device.
+ */
+static void __ap_flush_queue(struct ap_queue *aq)
+{
+	struct ap_message *ap_msg, *next;
+
+	list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
+		list_del_init(&ap_msg->list);
+		aq->pendingq_count--;
+		ap_msg->rc = -EAGAIN;
+		ap_msg->receive(aq, ap_msg, NULL);
+	}
+	list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
+		list_del_init(&ap_msg->list);
+		aq->requestq_count--;
+		ap_msg->rc = -EAGAIN;
+		ap_msg->receive(aq, ap_msg, NULL);
+	}
+}
+
+void ap_flush_queue(struct ap_queue *aq)
+{
+	spin_lock_bh(&aq->lock);
+	__ap_flush_queue(aq);
+	spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_flush_queue);
+
+void ap_queue_remove(struct ap_queue *aq)
+{
+	ap_flush_queue(aq);
+	del_timer_sync(&aq->timeout);
+}
+EXPORT_SYMBOL(ap_queue_remove);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
new file mode 100644
index 0000000..1b4001e
--- /dev/null
+++ b/drivers/s390/crypto/pkey_api.c
@@ -0,0 +1,1226 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  pkey device driver
+ *
+ *  Copyright IBM Corp. 2017
+ *  Author(s): Harald Freudenberger
+ */
+
+#define KMSG_COMPONENT "pkey"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/kallsyms.h>
+#include <linux/debugfs.h>
+#include <asm/zcrypt.h>
+#include <asm/cpacf.h>
+#include <asm/pkey.h>
+
+#include "zcrypt_api.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key interface");
+
+/* Size of parameter block used for all cca requests/replies */
+#define PARMBSIZE 512
+
+/* Size of vardata block used for some of the cca requests/replies */
+#define VARDATASIZE 4096
+
+/*
+ * debug feature data and functions
+ */
+
+static debug_info_t *debug_info;
+
+#define DEBUG_DBG(...)	debug_sprintf_event(debug_info, 6, ##__VA_ARGS__)
+#define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__)
+#define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__)
+#define DEBUG_ERR(...)	debug_sprintf_event(debug_info, 3, ##__VA_ARGS__)
+
+static void __init pkey_debug_init(void)
+{
+	debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long));
+	debug_register_view(debug_info, &debug_sprintf_view);
+	debug_set_level(debug_info, 3);
+}
+
+static void __exit pkey_debug_exit(void)
+{
+	debug_unregister(debug_info);
+}
+
+/* inside view of a secure key token (only type 0x01 version 0x04) */
+struct secaeskeytoken {
+	u8  type;     /* 0x01 for internal key token */
+	u8  res0[3];
+	u8  version;  /* should be 0x04 */
+	u8  res1[1];
+	u8  flag;     /* key flags */
+	u8  res2[1];
+	u64 mkvp;     /* master key verification pattern */
+	u8  key[32];  /* key value (encrypted) */
+	u8  cv[8];    /* control vector */
+	u16 bitsize;  /* key bit size */
+	u16 keysize;  /* key byte size */
+	u8  tvv[4];   /* token validation value */
+} __packed;
+
+/*
+ * Simple check if the token is a valid CCA secure AES key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. Returns 0 on success or errno value on failure.
+ */
+static int check_secaeskeytoken(const u8 *token, int keybitsize)
+{
+	struct secaeskeytoken *t = (struct secaeskeytoken *) token;
+
+	if (t->type != 0x01) {
+		DEBUG_ERR(
+			"%s secure token check failed, type mismatch 0x%02x != 0x01\n",
+			__func__, (int) t->type);
+		return -EINVAL;
+	}
+	if (t->version != 0x04) {
+		DEBUG_ERR(
+			"%s secure token check failed, version mismatch 0x%02x != 0x04\n",
+			__func__, (int) t->version);
+		return -EINVAL;
+	}
+	if (keybitsize > 0 && t->bitsize != keybitsize) {
+		DEBUG_ERR(
+			"%s secure token check failed, bitsize mismatch %d != %d\n",
+			__func__, (int) t->bitsize, keybitsize);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Allocate consecutive memory for request CPRB, request param
+ * block, reply CPRB and reply param block and fill in values
+ * for the common fields. Returns 0 on success or errno value
+ * on failure.
+ */
+static int alloc_and_prep_cprbmem(size_t paramblen,
+				  u8 **pcprbmem,
+				  struct CPRBX **preqCPRB,
+				  struct CPRBX **prepCPRB)
+{
+	u8 *cprbmem;
+	size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen;
+	struct CPRBX *preqcblk, *prepcblk;
+
+	/*
+	 * allocate consecutive memory for request CPRB, request param
+	 * block, reply CPRB and reply param block
+	 */
+	cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL);
+	if (!cprbmem)
+		return -ENOMEM;
+
+	preqcblk = (struct CPRBX *) cprbmem;
+	prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen);
+
+	/* fill request cprb struct */
+	preqcblk->cprb_len = sizeof(struct CPRBX);
+	preqcblk->cprb_ver_id = 0x02;
+	memcpy(preqcblk->func_id, "T2", 2);
+	preqcblk->rpl_msgbl = cprbplusparamblen;
+	if (paramblen) {
+		preqcblk->req_parmb =
+			((u8 *) preqcblk) + sizeof(struct CPRBX);
+		preqcblk->rpl_parmb =
+			((u8 *) prepcblk) + sizeof(struct CPRBX);
+	}
+
+	*pcprbmem = cprbmem;
+	*preqCPRB = preqcblk;
+	*prepCPRB = prepcblk;
+
+	return 0;
+}
+
+/*
+ * Free the cprb memory allocated with the function above.
+ * If the scrub value is not zero, the memory is filled
+ * with zeros before freeing (useful if there was some
+ * clear key material in there).
+ */
+static void free_cprbmem(void *mem, size_t paramblen, int scrub)
+{
+	if (scrub)
+		memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen));
+	kfree(mem);
+}
+
+/*
+ * Helper function to prepare the xcrb struct
+ */
+static inline void prep_xcrb(struct ica_xcRB *pxcrb,
+			     u16 cardnr,
+			     struct CPRBX *preqcblk,
+			     struct CPRBX *prepcblk)
+{
+	memset(pxcrb, 0, sizeof(*pxcrb));
+	pxcrb->agent_ID = 0x4341; /* 'CA' */
+	pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr);
+	pxcrb->request_control_blk_length =
+		preqcblk->cprb_len + preqcblk->req_parml;
+	pxcrb->request_control_blk_addr = (void __user *) preqcblk;
+	pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl;
+	pxcrb->reply_control_blk_addr = (void __user *) prepcblk;
+}
+
+/*
+ * Helper function which calls zcrypt_send_cprb with
+ * memory management segment adjusted to kernel space
+ * so that the copy_from_user called within this
+ * function do in fact copy from kernel space.
+ */
+static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb)
+{
+	int rc;
+	mm_segment_t old_fs = get_fs();
+
+	set_fs(KERNEL_DS);
+	rc = zcrypt_send_cprb(xcrb);
+	set_fs(old_fs);
+
+	return rc;
+}
+
+/*
+ * Generate (random) AES secure key.
+ */
+int pkey_genseckey(u16 cardnr, u16 domain,
+		   u32 keytype, struct pkey_seckey *seckey)
+{
+	int i, rc, keysize;
+	int seckeysize;
+	u8 *mem;
+	struct CPRBX *preqcblk, *prepcblk;
+	struct ica_xcRB xcrb;
+	struct kgreqparm {
+		u8  subfunc_code[2];
+		u16 rule_array_len;
+		struct lv1 {
+			u16 len;
+			char  key_form[8];
+			char  key_length[8];
+			char  key_type1[8];
+			char  key_type2[8];
+		} lv1;
+		struct lv2 {
+			u16 len;
+			struct keyid {
+				u16 len;
+				u16 attr;
+				u8  data[SECKEYBLOBSIZE];
+			} keyid[6];
+		} lv2;
+	} *preqparm;
+	struct kgrepparm {
+		u8  subfunc_code[2];
+		u16 rule_array_len;
+		struct lv3 {
+			u16 len;
+			u16 keyblocklen;
+			struct {
+				u16 toklen;
+				u16 tokattr;
+				u8  tok[0];
+				/* ... some more data ... */
+			} keyblock;
+		} lv3;
+	} *prepparm;
+
+	/* get already prepared memory for 2 cprbs with param block each */
+	rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+	if (rc)
+		return rc;
+
+	/* fill request cprb struct */
+	preqcblk->domain = domain;
+
+	/* fill request cprb param block with KG request */
+	preqparm = (struct kgreqparm *) preqcblk->req_parmb;
+	memcpy(preqparm->subfunc_code, "KG", 2);
+	preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
+	preqparm->lv1.len = sizeof(struct lv1);
+	memcpy(preqparm->lv1.key_form,	 "OP      ", 8);
+	switch (keytype) {
+	case PKEY_KEYTYPE_AES_128:
+		keysize = 16;
+		memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8);
+		break;
+	case PKEY_KEYTYPE_AES_192:
+		keysize = 24;
+		memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8);
+		break;
+	case PKEY_KEYTYPE_AES_256:
+		keysize = 32;
+		memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8);
+		break;
+	default:
+		DEBUG_ERR(
+			"%s unknown/unsupported keytype %d\n",
+			__func__, keytype);
+		rc = -EINVAL;
+		goto out;
+	}
+	memcpy(preqparm->lv1.key_type1,  "AESDATA ", 8);
+	preqparm->lv2.len = sizeof(struct lv2);
+	for (i = 0; i < 6; i++) {
+		preqparm->lv2.keyid[i].len = sizeof(struct keyid);
+		preqparm->lv2.keyid[i].attr = (i == 2 ? 0x30 : 0x10);
+	}
+	preqcblk->req_parml = sizeof(struct kgreqparm);
+
+	/* fill xcrb struct */
+	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+	rc = _zcrypt_send_cprb(&xcrb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
+			__func__, (int) cardnr, (int) domain, rc);
+		goto out;
+	}
+
+	/* check response returncode and reasoncode */
+	if (prepcblk->ccp_rtcode != 0) {
+		DEBUG_ERR(
+			"%s secure key generate failure, card response %d/%d\n",
+			__func__,
+			(int) prepcblk->ccp_rtcode,
+			(int) prepcblk->ccp_rscode);
+		rc = -EIO;
+		goto out;
+	}
+
+	/* process response cprb param block */
+	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepparm = (struct kgrepparm *) prepcblk->rpl_parmb;
+
+	/* check length of the returned secure key token */
+	seckeysize = prepparm->lv3.keyblock.toklen
+		- sizeof(prepparm->lv3.keyblock.toklen)
+		- sizeof(prepparm->lv3.keyblock.tokattr);
+	if (seckeysize != SECKEYBLOBSIZE) {
+		DEBUG_ERR(
+			"%s secure token size mismatch %d != %d bytes\n",
+			__func__, seckeysize, SECKEYBLOBSIZE);
+		rc = -EIO;
+		goto out;
+	}
+
+	/* check secure key token */
+	rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize);
+	if (rc) {
+		rc = -EIO;
+		goto out;
+	}
+
+	/* copy the generated secure key token */
+	memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
+
+out:
+	free_cprbmem(mem, PARMBSIZE, 0);
+	return rc;
+}
+EXPORT_SYMBOL(pkey_genseckey);
+
+/*
+ * Generate an AES secure key with given key value.
+ */
+int pkey_clr2seckey(u16 cardnr, u16 domain, u32 keytype,
+		    const struct pkey_clrkey *clrkey,
+		    struct pkey_seckey *seckey)
+{
+	int rc, keysize, seckeysize;
+	u8 *mem;
+	struct CPRBX *preqcblk, *prepcblk;
+	struct ica_xcRB xcrb;
+	struct cmreqparm {
+		u8  subfunc_code[2];
+		u16 rule_array_len;
+		char  rule_array[8];
+		struct lv1 {
+			u16 len;
+			u8  clrkey[0];
+		} lv1;
+		struct lv2 {
+			u16 len;
+			struct keyid {
+				u16 len;
+				u16 attr;
+				u8  data[SECKEYBLOBSIZE];
+			} keyid;
+		} lv2;
+	} *preqparm;
+	struct lv2 *plv2;
+	struct cmrepparm {
+		u8  subfunc_code[2];
+		u16 rule_array_len;
+		struct lv3 {
+			u16 len;
+			u16 keyblocklen;
+			struct {
+				u16 toklen;
+				u16 tokattr;
+				u8  tok[0];
+				/* ... some more data ... */
+			} keyblock;
+		} lv3;
+	} *prepparm;
+
+	/* get already prepared memory for 2 cprbs with param block each */
+	rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+	if (rc)
+		return rc;
+
+	/* fill request cprb struct */
+	preqcblk->domain = domain;
+
+	/* fill request cprb param block with CM request */
+	preqparm = (struct cmreqparm *) preqcblk->req_parmb;
+	memcpy(preqparm->subfunc_code, "CM", 2);
+	memcpy(preqparm->rule_array, "AES     ", 8);
+	preqparm->rule_array_len =
+		sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
+	switch (keytype) {
+	case PKEY_KEYTYPE_AES_128:
+		keysize = 16;
+		break;
+	case PKEY_KEYTYPE_AES_192:
+		keysize = 24;
+		break;
+	case PKEY_KEYTYPE_AES_256:
+		keysize = 32;
+		break;
+	default:
+		DEBUG_ERR(
+			"%s unknown/unsupported keytype %d\n",
+			__func__, keytype);
+		rc = -EINVAL;
+		goto out;
+	}
+	preqparm->lv1.len = sizeof(struct lv1) + keysize;
+	memcpy(preqparm->lv1.clrkey, clrkey->clrkey, keysize);
+	plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize);
+	plv2->len = sizeof(struct lv2);
+	plv2->keyid.len = sizeof(struct keyid);
+	plv2->keyid.attr = 0x30;
+	preqcblk->req_parml = sizeof(struct cmreqparm) + keysize;
+
+	/* fill xcrb struct */
+	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+	rc = _zcrypt_send_cprb(&xcrb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
+			__func__, (int) cardnr, (int) domain, rc);
+		goto out;
+	}
+
+	/* check response returncode and reasoncode */
+	if (prepcblk->ccp_rtcode != 0) {
+		DEBUG_ERR(
+			"%s clear key import failure, card response %d/%d\n",
+			__func__,
+			(int) prepcblk->ccp_rtcode,
+			(int) prepcblk->ccp_rscode);
+		rc = -EIO;
+		goto out;
+	}
+
+	/* process response cprb param block */
+	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepparm = (struct cmrepparm *) prepcblk->rpl_parmb;
+
+	/* check length of the returned secure key token */
+	seckeysize = prepparm->lv3.keyblock.toklen
+		- sizeof(prepparm->lv3.keyblock.toklen)
+		- sizeof(prepparm->lv3.keyblock.tokattr);
+	if (seckeysize != SECKEYBLOBSIZE) {
+		DEBUG_ERR(
+			"%s secure token size mismatch %d != %d bytes\n",
+			__func__, seckeysize, SECKEYBLOBSIZE);
+		rc = -EIO;
+		goto out;
+	}
+
+	/* check secure key token */
+	rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize);
+	if (rc) {
+		rc = -EIO;
+		goto out;
+	}
+
+	/* copy the generated secure key token */
+	memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
+
+out:
+	free_cprbmem(mem, PARMBSIZE, 1);
+	return rc;
+}
+EXPORT_SYMBOL(pkey_clr2seckey);
+
+/*
+ * Derive a proteced key from the secure key blob.
+ */
+int pkey_sec2protkey(u16 cardnr, u16 domain,
+		     const struct pkey_seckey *seckey,
+		     struct pkey_protkey *protkey)
+{
+	int rc;
+	u8 *mem;
+	struct CPRBX *preqcblk, *prepcblk;
+	struct ica_xcRB xcrb;
+	struct uskreqparm {
+		u8  subfunc_code[2];
+		u16 rule_array_len;
+		struct lv1 {
+			u16 len;
+			u16 attr_len;
+			u16 attr_flags;
+		} lv1;
+		struct lv2 {
+			u16 len;
+			u16 attr_len;
+			u16 attr_flags;
+			u8  token[0];	      /* cca secure key token */
+		} lv2 __packed;
+	} *preqparm;
+	struct uskrepparm {
+		u8  subfunc_code[2];
+		u16 rule_array_len;
+		struct lv3 {
+			u16 len;
+			u16 attr_len;
+			u16 attr_flags;
+			struct cpacfkeyblock {
+				u8  version;  /* version of this struct */
+				u8  flags[2];
+				u8  algo;
+				u8  form;
+				u8  pad1[3];
+				u16 keylen;
+				u8  key[64];  /* the key (keylen bytes) */
+				u16 keyattrlen;
+				u8  keyattr[32];
+				u8  pad2[1];
+				u8  vptype;
+				u8  vp[32];  /* verification pattern */
+			} keyblock;
+		} lv3 __packed;
+	} *prepparm;
+
+	/* get already prepared memory for 2 cprbs with param block each */
+	rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+	if (rc)
+		return rc;
+
+	/* fill request cprb struct */
+	preqcblk->domain = domain;
+
+	/* fill request cprb param block with USK request */
+	preqparm = (struct uskreqparm *) preqcblk->req_parmb;
+	memcpy(preqparm->subfunc_code, "US", 2);
+	preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
+	preqparm->lv1.len = sizeof(struct lv1);
+	preqparm->lv1.attr_len = sizeof(struct lv1) - sizeof(preqparm->lv1.len);
+	preqparm->lv1.attr_flags = 0x0001;
+	preqparm->lv2.len = sizeof(struct lv2) + SECKEYBLOBSIZE;
+	preqparm->lv2.attr_len = sizeof(struct lv2)
+		- sizeof(preqparm->lv2.len) + SECKEYBLOBSIZE;
+	preqparm->lv2.attr_flags = 0x0000;
+	memcpy(preqparm->lv2.token, seckey->seckey, SECKEYBLOBSIZE);
+	preqcblk->req_parml = sizeof(struct uskreqparm) + SECKEYBLOBSIZE;
+
+	/* fill xcrb struct */
+	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+	rc = _zcrypt_send_cprb(&xcrb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
+			__func__, (int) cardnr, (int) domain, rc);
+		goto out;
+	}
+
+	/* check response returncode and reasoncode */
+	if (prepcblk->ccp_rtcode != 0) {
+		DEBUG_ERR(
+			"%s unwrap secure key failure, card response %d/%d\n",
+			__func__,
+			(int) prepcblk->ccp_rtcode,
+			(int) prepcblk->ccp_rscode);
+		rc = -EIO;
+		goto out;
+	}
+	if (prepcblk->ccp_rscode != 0) {
+		DEBUG_WARN(
+			"%s unwrap secure key warning, card response %d/%d\n",
+			__func__,
+			(int) prepcblk->ccp_rtcode,
+			(int) prepcblk->ccp_rscode);
+	}
+
+	/* process response cprb param block */
+	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepparm = (struct uskrepparm *) prepcblk->rpl_parmb;
+
+	/* check the returned keyblock */
+	if (prepparm->lv3.keyblock.version != 0x01) {
+		DEBUG_ERR(
+			"%s reply param keyblock version mismatch 0x%02x != 0x01\n",
+			__func__, (int) prepparm->lv3.keyblock.version);
+		rc = -EIO;
+		goto out;
+	}
+
+	/* copy the tanslated protected key */
+	switch (prepparm->lv3.keyblock.keylen) {
+	case 16+32:
+		protkey->type = PKEY_KEYTYPE_AES_128;
+		break;
+	case 24+32:
+		protkey->type = PKEY_KEYTYPE_AES_192;
+		break;
+	case 32+32:
+		protkey->type = PKEY_KEYTYPE_AES_256;
+		break;
+	default:
+		DEBUG_ERR("%s unknown/unsupported keytype %d\n",
+			  __func__, prepparm->lv3.keyblock.keylen);
+		rc = -EIO;
+		goto out;
+	}
+	protkey->len = prepparm->lv3.keyblock.keylen;
+	memcpy(protkey->protkey, prepparm->lv3.keyblock.key, protkey->len);
+
+out:
+	free_cprbmem(mem, PARMBSIZE, 0);
+	return rc;
+}
+EXPORT_SYMBOL(pkey_sec2protkey);
+
+/*
+ * Create a protected key from a clear key value.
+ */
+int pkey_clr2protkey(u32 keytype,
+		     const struct pkey_clrkey *clrkey,
+		     struct pkey_protkey *protkey)
+{
+	long fc;
+	int keysize;
+	u8 paramblock[64];
+
+	switch (keytype) {
+	case PKEY_KEYTYPE_AES_128:
+		keysize = 16;
+		fc = CPACF_PCKMO_ENC_AES_128_KEY;
+		break;
+	case PKEY_KEYTYPE_AES_192:
+		keysize = 24;
+		fc = CPACF_PCKMO_ENC_AES_192_KEY;
+		break;
+	case PKEY_KEYTYPE_AES_256:
+		keysize = 32;
+		fc = CPACF_PCKMO_ENC_AES_256_KEY;
+		break;
+	default:
+		DEBUG_ERR("%s unknown/unsupported keytype %d\n",
+			  __func__, keytype);
+		return -EINVAL;
+	}
+
+	/* prepare param block */
+	memset(paramblock, 0, sizeof(paramblock));
+	memcpy(paramblock, clrkey->clrkey, keysize);
+
+	/* call the pckmo instruction */
+	cpacf_pckmo(fc, paramblock);
+
+	/* copy created protected key */
+	protkey->type = keytype;
+	protkey->len = keysize + 32;
+	memcpy(protkey->protkey, paramblock, keysize + 32);
+
+	return 0;
+}
+EXPORT_SYMBOL(pkey_clr2protkey);
+
+/*
+ * query cryptographic facility from adapter
+ */
+static int query_crypto_facility(u16 cardnr, u16 domain,
+				 const char *keyword,
+				 u8 *rarray, size_t *rarraylen,
+				 u8 *varray, size_t *varraylen)
+{
+	int rc;
+	u16 len;
+	u8 *mem, *ptr;
+	struct CPRBX *preqcblk, *prepcblk;
+	struct ica_xcRB xcrb;
+	struct fqreqparm {
+		u8  subfunc_code[2];
+		u16 rule_array_len;
+		char  rule_array[8];
+		struct lv1 {
+			u16 len;
+			u8  data[VARDATASIZE];
+		} lv1;
+		u16 dummylen;
+	} *preqparm;
+	size_t parmbsize = sizeof(struct fqreqparm);
+	struct fqrepparm {
+		u8  subfunc_code[2];
+		u8  lvdata[0];
+	} *prepparm;
+
+	/* get already prepared memory for 2 cprbs with param block each */
+	rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk);
+	if (rc)
+		return rc;
+
+	/* fill request cprb struct */
+	preqcblk->domain = domain;
+
+	/* fill request cprb param block with FQ request */
+	preqparm = (struct fqreqparm *) preqcblk->req_parmb;
+	memcpy(preqparm->subfunc_code, "FQ", 2);
+	memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
+	preqparm->rule_array_len =
+		sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
+	preqparm->lv1.len = sizeof(preqparm->lv1);
+	preqparm->dummylen = sizeof(preqparm->dummylen);
+	preqcblk->req_parml = parmbsize;
+
+	/* fill xcrb struct */
+	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+	rc = _zcrypt_send_cprb(&xcrb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
+			__func__, (int) cardnr, (int) domain, rc);
+		goto out;
+	}
+
+	/* check response returncode and reasoncode */
+	if (prepcblk->ccp_rtcode != 0) {
+		DEBUG_ERR(
+			"%s unwrap secure key failure, card response %d/%d\n",
+			__func__,
+			(int) prepcblk->ccp_rtcode,
+			(int) prepcblk->ccp_rscode);
+		rc = -EIO;
+		goto out;
+	}
+
+	/* process response cprb param block */
+	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepparm = (struct fqrepparm *) prepcblk->rpl_parmb;
+	ptr = prepparm->lvdata;
+
+	/* check and possibly copy reply rule array */
+	len = *((u16 *) ptr);
+	if (len > sizeof(u16)) {
+		ptr += sizeof(u16);
+		len -= sizeof(u16);
+		if (rarray && rarraylen && *rarraylen > 0) {
+			*rarraylen = (len > *rarraylen ? *rarraylen : len);
+			memcpy(rarray, ptr, *rarraylen);
+		}
+		ptr += len;
+	}
+	/* check and possible copy reply var array */
+	len = *((u16 *) ptr);
+	if (len > sizeof(u16)) {
+		ptr += sizeof(u16);
+		len -= sizeof(u16);
+		if (varray && varraylen && *varraylen > 0) {
+			*varraylen = (len > *varraylen ? *varraylen : len);
+			memcpy(varray, ptr, *varraylen);
+		}
+		ptr += len;
+	}
+
+out:
+	free_cprbmem(mem, parmbsize, 0);
+	return rc;
+}
+
+/*
+ * Fetch the current and old mkvp values via
+ * query_crypto_facility from adapter.
+ */
+static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2])
+{
+	int rc, found = 0;
+	size_t rlen, vlen;
+	u8 *rarray, *varray, *pg;
+
+	pg = (u8 *) __get_free_page(GFP_KERNEL);
+	if (!pg)
+		return -ENOMEM;
+	rarray = pg;
+	varray = pg + PAGE_SIZE/2;
+	rlen = vlen = PAGE_SIZE/2;
+
+	rc = query_crypto_facility(cardnr, domain, "STATICSA",
+				   rarray, &rlen, varray, &vlen);
+	if (rc == 0 && rlen > 8*8 && vlen > 184+8) {
+		if (rarray[8*8] == '2') {
+			/* current master key state is valid */
+			mkvp[0] = *((u64 *)(varray + 184));
+			mkvp[1] = *((u64 *)(varray + 172));
+			found = 1;
+		}
+	}
+
+	free_page((unsigned long) pg);
+
+	return found ? 0 : -ENOENT;
+}
+
+/* struct to hold cached mkvp info for each card/domain */
+struct mkvp_info {
+	struct list_head list;
+	u16 cardnr;
+	u16 domain;
+	u64 mkvp[2];
+};
+
+/* a list with mkvp_info entries */
+static LIST_HEAD(mkvp_list);
+static DEFINE_SPINLOCK(mkvp_list_lock);
+
+static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2])
+{
+	int rc = -ENOENT;
+	struct mkvp_info *ptr;
+
+	spin_lock_bh(&mkvp_list_lock);
+	list_for_each_entry(ptr, &mkvp_list, list) {
+		if (ptr->cardnr == cardnr &&
+		    ptr->domain == domain) {
+			memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64));
+			rc = 0;
+			break;
+		}
+	}
+	spin_unlock_bh(&mkvp_list_lock);
+
+	return rc;
+}
+
+static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2])
+{
+	int found = 0;
+	struct mkvp_info *ptr;
+
+	spin_lock_bh(&mkvp_list_lock);
+	list_for_each_entry(ptr, &mkvp_list, list) {
+		if (ptr->cardnr == cardnr &&
+		    ptr->domain == domain) {
+			memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
+			found = 1;
+			break;
+		}
+	}
+	if (!found) {
+		ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
+		if (!ptr) {
+			spin_unlock_bh(&mkvp_list_lock);
+			return;
+		}
+		ptr->cardnr = cardnr;
+		ptr->domain = domain;
+		memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
+		list_add(&ptr->list, &mkvp_list);
+	}
+	spin_unlock_bh(&mkvp_list_lock);
+}
+
+static void mkvp_cache_scrub(u16 cardnr, u16 domain)
+{
+	struct mkvp_info *ptr;
+
+	spin_lock_bh(&mkvp_list_lock);
+	list_for_each_entry(ptr, &mkvp_list, list) {
+		if (ptr->cardnr == cardnr &&
+		    ptr->domain == domain) {
+			list_del(&ptr->list);
+			kfree(ptr);
+			break;
+		}
+	}
+	spin_unlock_bh(&mkvp_list_lock);
+}
+
+static void __exit mkvp_cache_free(void)
+{
+	struct mkvp_info *ptr, *pnext;
+
+	spin_lock_bh(&mkvp_list_lock);
+	list_for_each_entry_safe(ptr, pnext, &mkvp_list, list) {
+		list_del(&ptr->list);
+		kfree(ptr);
+	}
+	spin_unlock_bh(&mkvp_list_lock);
+}
+
+/*
+ * Search for a matching crypto card based on the Master Key
+ * Verification Pattern provided inside a secure key.
+ */
+int pkey_findcard(const struct pkey_seckey *seckey,
+		  u16 *pcardnr, u16 *pdomain, int verify)
+{
+	struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
+	struct zcrypt_device_status_ext *device_status;
+	u16 card, dom;
+	u64 mkvp[2];
+	int i, rc, oi = -1;
+
+	/* mkvp must not be zero */
+	if (t->mkvp == 0)
+		return -EINVAL;
+
+	/* fetch status of all crypto cards */
+	device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+				      sizeof(struct zcrypt_device_status_ext),
+				      GFP_KERNEL);
+	if (!device_status)
+		return -ENOMEM;
+	zcrypt_device_status_mask_ext(device_status);
+
+	/* walk through all crypto cards */
+	for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+		card = AP_QID_CARD(device_status[i].qid);
+		dom = AP_QID_QUEUE(device_status[i].qid);
+		if (device_status[i].online &&
+		    device_status[i].functions & 0x04) {
+			/* an enabled CCA Coprocessor card */
+			/* try cached mkvp */
+			if (mkvp_cache_fetch(card, dom, mkvp) == 0 &&
+			    t->mkvp == mkvp[0]) {
+				if (!verify)
+					break;
+				/* verify: fetch mkvp from adapter */
+				if (fetch_mkvp(card, dom, mkvp) == 0) {
+					mkvp_cache_update(card, dom, mkvp);
+					if (t->mkvp == mkvp[0])
+						break;
+				}
+			}
+		} else {
+			/* Card is offline and/or not a CCA card. */
+			/* del mkvp entry from cache if it exists */
+			mkvp_cache_scrub(card, dom);
+		}
+	}
+	if (i >= MAX_ZDEV_ENTRIES_EXT) {
+		/* nothing found, so this time without cache */
+		for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+			if (!(device_status[i].online &&
+			      device_status[i].functions & 0x04))
+				continue;
+			card = AP_QID_CARD(device_status[i].qid);
+			dom = AP_QID_QUEUE(device_status[i].qid);
+			/* fresh fetch mkvp from adapter */
+			if (fetch_mkvp(card, dom, mkvp) == 0) {
+				mkvp_cache_update(card, dom, mkvp);
+				if (t->mkvp == mkvp[0])
+					break;
+				if (t->mkvp == mkvp[1] && oi < 0)
+					oi = i;
+			}
+		}
+		if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) {
+			/* old mkvp matched, use this card then */
+			card = AP_QID_CARD(device_status[oi].qid);
+			dom = AP_QID_QUEUE(device_status[oi].qid);
+		}
+	}
+	if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) {
+		if (pcardnr)
+			*pcardnr = card;
+		if (pdomain)
+			*pdomain = dom;
+		rc = 0;
+	} else
+		rc = -ENODEV;
+
+	kfree(device_status);
+	return rc;
+}
+EXPORT_SYMBOL(pkey_findcard);
+
+/*
+ * Find card and transform secure key into protected key.
+ */
+int pkey_skey2pkey(const struct pkey_seckey *seckey,
+		   struct pkey_protkey *protkey)
+{
+	u16 cardnr, domain;
+	int rc, verify;
+
+	/*
+	 * The pkey_sec2protkey call may fail when a card has been
+	 * addressed where the master key was changed after last fetch
+	 * of the mkvp into the cache. So first try without verify then
+	 * with verify enabled (thus refreshing the mkvp for each card).
+	 */
+	for (verify = 0; verify < 2; verify++) {
+		rc = pkey_findcard(seckey, &cardnr, &domain, verify);
+		if (rc)
+			continue;
+		rc = pkey_sec2protkey(cardnr, domain, seckey, protkey);
+		if (rc == 0)
+			break;
+	}
+
+	if (rc)
+		DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+
+	return rc;
+}
+EXPORT_SYMBOL(pkey_skey2pkey);
+
+/*
+ * Verify key and give back some info about the key.
+ */
+int pkey_verifykey(const struct pkey_seckey *seckey,
+		   u16 *pcardnr, u16 *pdomain,
+		   u16 *pkeysize, u32 *pattributes)
+{
+	struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
+	u16 cardnr, domain;
+	u64 mkvp[2];
+	int rc;
+
+	/* check the secure key for valid AES secure key */
+	rc = check_secaeskeytoken((u8 *) seckey, 0);
+	if (rc)
+		goto out;
+	if (pattributes)
+		*pattributes = PKEY_VERIFY_ATTR_AES;
+	if (pkeysize)
+		*pkeysize = t->bitsize;
+
+	/* try to find a card which can handle this key */
+	rc = pkey_findcard(seckey, &cardnr, &domain, 1);
+	if (rc)
+		goto out;
+
+	/* check mkvp for old mkvp match */
+	rc = mkvp_cache_fetch(cardnr, domain, mkvp);
+	if (rc)
+		goto out;
+	if (t->mkvp == mkvp[1]) {
+		DEBUG_DBG("%s secure key has old mkvp\n", __func__);
+		if (pattributes)
+			*pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
+	}
+
+	if (pcardnr)
+		*pcardnr = cardnr;
+	if (pdomain)
+		*pdomain = domain;
+
+out:
+	DEBUG_DBG("%s rc=%d\n", __func__, rc);
+	return rc;
+}
+EXPORT_SYMBOL(pkey_verifykey);
+
+/*
+ * File io functions
+ */
+
+static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+				unsigned long arg)
+{
+	int rc;
+
+	switch (cmd) {
+	case PKEY_GENSECK: {
+		struct pkey_genseck __user *ugs = (void __user *) arg;
+		struct pkey_genseck kgs;
+
+		if (copy_from_user(&kgs, ugs, sizeof(kgs)))
+			return -EFAULT;
+		rc = pkey_genseckey(kgs.cardnr, kgs.domain,
+				    kgs.keytype, &kgs.seckey);
+		DEBUG_DBG("%s pkey_genseckey()=%d\n", __func__, rc);
+		if (rc)
+			break;
+		if (copy_to_user(ugs, &kgs, sizeof(kgs)))
+			return -EFAULT;
+		break;
+	}
+	case PKEY_CLR2SECK: {
+		struct pkey_clr2seck __user *ucs = (void __user *) arg;
+		struct pkey_clr2seck kcs;
+
+		if (copy_from_user(&kcs, ucs, sizeof(kcs)))
+			return -EFAULT;
+		rc = pkey_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
+				     &kcs.clrkey, &kcs.seckey);
+		DEBUG_DBG("%s pkey_clr2seckey()=%d\n", __func__, rc);
+		if (rc)
+			break;
+		if (copy_to_user(ucs, &kcs, sizeof(kcs)))
+			return -EFAULT;
+		memzero_explicit(&kcs, sizeof(kcs));
+		break;
+	}
+	case PKEY_SEC2PROTK: {
+		struct pkey_sec2protk __user *usp = (void __user *) arg;
+		struct pkey_sec2protk ksp;
+
+		if (copy_from_user(&ksp, usp, sizeof(ksp)))
+			return -EFAULT;
+		rc = pkey_sec2protkey(ksp.cardnr, ksp.domain,
+				      &ksp.seckey, &ksp.protkey);
+		DEBUG_DBG("%s pkey_sec2protkey()=%d\n", __func__, rc);
+		if (rc)
+			break;
+		if (copy_to_user(usp, &ksp, sizeof(ksp)))
+			return -EFAULT;
+		break;
+	}
+	case PKEY_CLR2PROTK: {
+		struct pkey_clr2protk __user *ucp = (void __user *) arg;
+		struct pkey_clr2protk kcp;
+
+		if (copy_from_user(&kcp, ucp, sizeof(kcp)))
+			return -EFAULT;
+		rc = pkey_clr2protkey(kcp.keytype,
+				      &kcp.clrkey, &kcp.protkey);
+		DEBUG_DBG("%s pkey_clr2protkey()=%d\n", __func__, rc);
+		if (rc)
+			break;
+		if (copy_to_user(ucp, &kcp, sizeof(kcp)))
+			return -EFAULT;
+		memzero_explicit(&kcp, sizeof(kcp));
+		break;
+	}
+	case PKEY_FINDCARD: {
+		struct pkey_findcard __user *ufc = (void __user *) arg;
+		struct pkey_findcard kfc;
+
+		if (copy_from_user(&kfc, ufc, sizeof(kfc)))
+			return -EFAULT;
+		rc = pkey_findcard(&kfc.seckey,
+				   &kfc.cardnr, &kfc.domain, 1);
+		DEBUG_DBG("%s pkey_findcard()=%d\n", __func__, rc);
+		if (rc)
+			break;
+		if (copy_to_user(ufc, &kfc, sizeof(kfc)))
+			return -EFAULT;
+		break;
+	}
+	case PKEY_SKEY2PKEY: {
+		struct pkey_skey2pkey __user *usp = (void __user *) arg;
+		struct pkey_skey2pkey ksp;
+
+		if (copy_from_user(&ksp, usp, sizeof(ksp)))
+			return -EFAULT;
+		rc = pkey_skey2pkey(&ksp.seckey, &ksp.protkey);
+		DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc);
+		if (rc)
+			break;
+		if (copy_to_user(usp, &ksp, sizeof(ksp)))
+			return -EFAULT;
+		break;
+	}
+	case PKEY_VERIFYKEY: {
+		struct pkey_verifykey __user *uvk = (void __user *) arg;
+		struct pkey_verifykey kvk;
+
+		if (copy_from_user(&kvk, uvk, sizeof(kvk)))
+			return -EFAULT;
+		rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain,
+				    &kvk.keysize, &kvk.attributes);
+		DEBUG_DBG("%s pkey_verifykey()=%d\n", __func__, rc);
+		if (rc)
+			break;
+		if (copy_to_user(uvk, &kvk, sizeof(kvk)))
+			return -EFAULT;
+		break;
+	}
+	default:
+		/* unknown/unsupported ioctl cmd */
+		return -ENOTTY;
+	}
+
+	return rc;
+}
+
+/*
+ * Sysfs and file io operations
+ */
+static const struct file_operations pkey_fops = {
+	.owner		= THIS_MODULE,
+	.open		= nonseekable_open,
+	.llseek		= no_llseek,
+	.unlocked_ioctl = pkey_unlocked_ioctl,
+};
+
+static struct miscdevice pkey_dev = {
+	.name	= "pkey",
+	.minor	= MISC_DYNAMIC_MINOR,
+	.mode	= 0666,
+	.fops	= &pkey_fops,
+};
+
+/*
+ * Module init
+ */
+static int __init pkey_init(void)
+{
+	cpacf_mask_t pckmo_functions;
+
+	/* check for pckmo instructions available */
+	if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
+		return -EOPNOTSUPP;
+	if (!cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_128_KEY) ||
+	    !cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_192_KEY) ||
+	    !cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_256_KEY))
+		return -EOPNOTSUPP;
+
+	pkey_debug_init();
+
+	return misc_register(&pkey_dev);
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_exit(void)
+{
+	misc_deregister(&pkey_dev);
+	mkvp_cache_free();
+	pkey_debug_exit();
+}
+
+module_init(pkey_init);
+module_exit(pkey_exit);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
new file mode 100644
index 0000000..e685412
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -0,0 +1,1299 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  zcrypt 2.1.0
+ *
+ *  Copyright IBM Corp. 2001, 2012
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *	       Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *  MSGTYPE restruct:		  Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/hw_random.h>
+#include <linux/debugfs.h>
+#include <asm/debug.h>
+
+#define CREATE_TRACE_POINTS
+#include <asm/trace/zcrypt.h>
+
+#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
+
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
+
+/*
+ * Module description.
+ */
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
+		   "Copyright IBM Corp. 2001, 2012");
+MODULE_LICENSE("GPL");
+
+/*
+ * zcrypt tracepoint functions
+ */
+EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
+EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
+
+static int zcrypt_hwrng_seed = 1;
+module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, 0440);
+MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
+
+DEFINE_SPINLOCK(zcrypt_list_lock);
+LIST_HEAD(zcrypt_card_list);
+int zcrypt_device_count;
+
+static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
+static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
+
+atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
+EXPORT_SYMBOL(zcrypt_rescan_req);
+
+static LIST_HEAD(zcrypt_ops_list);
+
+/* Zcrypt related debug feature stuff. */
+debug_info_t *zcrypt_dbf_info;
+
+/**
+ * Process a rescan of the transport layer.
+ *
+ * Returns 1, if the rescan has been processed, otherwise 0.
+ */
+static inline int zcrypt_process_rescan(void)
+{
+	if (atomic_read(&zcrypt_rescan_req)) {
+		atomic_set(&zcrypt_rescan_req, 0);
+		atomic_inc(&zcrypt_rescan_count);
+		ap_bus_force_rescan();
+		ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n",
+			   atomic_inc_return(&zcrypt_rescan_count));
+		return 1;
+	}
+	return 0;
+}
+
+void zcrypt_msgtype_register(struct zcrypt_ops *zops)
+{
+	list_add_tail(&zops->list, &zcrypt_ops_list);
+}
+
+void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
+{
+	list_del_init(&zops->list);
+}
+
+struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
+{
+	struct zcrypt_ops *zops;
+
+	list_for_each_entry(zops, &zcrypt_ops_list, list)
+		if ((zops->variant == variant) &&
+		    (!strncmp(zops->name, name, sizeof(zops->name))))
+			return zops;
+	return NULL;
+}
+EXPORT_SYMBOL(zcrypt_msgtype);
+
+/**
+ * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
+ *
+ * This function is not supported beyond zcrypt 1.3.1.
+ */
+static ssize_t zcrypt_read(struct file *filp, char __user *buf,
+			   size_t count, loff_t *f_pos)
+{
+	return -EPERM;
+}
+
+/**
+ * zcrypt_write(): Not allowed.
+ *
+ * Write is is not allowed
+ */
+static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
+			    size_t count, loff_t *f_pos)
+{
+	return -EPERM;
+}
+
+/**
+ * zcrypt_open(): Count number of users.
+ *
+ * Device open function to count number of users.
+ */
+static int zcrypt_open(struct inode *inode, struct file *filp)
+{
+	atomic_inc(&zcrypt_open_count);
+	return nonseekable_open(inode, filp);
+}
+
+/**
+ * zcrypt_release(): Count number of users.
+ *
+ * Device close function to count number of users.
+ */
+static int zcrypt_release(struct inode *inode, struct file *filp)
+{
+	atomic_dec(&zcrypt_open_count);
+	return 0;
+}
+
+static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
+						     struct zcrypt_queue *zq,
+						     unsigned int weight)
+{
+	if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
+		return NULL;
+	zcrypt_queue_get(zq);
+	get_device(&zq->queue->ap_dev.device);
+	atomic_add(weight, &zc->load);
+	atomic_add(weight, &zq->load);
+	zq->request_count++;
+	return zq;
+}
+
+static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
+				     struct zcrypt_queue *zq,
+				     unsigned int weight)
+{
+	struct module *mod = zq->queue->ap_dev.drv->driver.owner;
+
+	zq->request_count--;
+	atomic_sub(weight, &zc->load);
+	atomic_sub(weight, &zq->load);
+	put_device(&zq->queue->ap_dev.device);
+	zcrypt_queue_put(zq);
+	module_put(mod);
+}
+
+static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
+				       struct zcrypt_card *pref_zc,
+				       unsigned int weight,
+				       unsigned int pref_weight)
+{
+	if (!pref_zc)
+		return false;
+	weight += atomic_read(&zc->load);
+	pref_weight += atomic_read(&pref_zc->load);
+	if (weight == pref_weight)
+		return atomic_read(&zc->card->total_request_count) >
+			atomic_read(&pref_zc->card->total_request_count);
+	return weight > pref_weight;
+}
+
+static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
+					struct zcrypt_queue *pref_zq,
+					unsigned int weight,
+					unsigned int pref_weight)
+{
+	if (!pref_zq)
+		return false;
+	weight += atomic_read(&zq->load);
+	pref_weight += atomic_read(&pref_zq->load);
+	if (weight == pref_weight)
+		return zq->queue->total_request_count >
+			pref_zq->queue->total_request_count;
+	return weight > pref_weight;
+}
+
+/*
+ * zcrypt ioctls.
+ */
+static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
+{
+	struct zcrypt_card *zc, *pref_zc;
+	struct zcrypt_queue *zq, *pref_zq;
+	unsigned int weight, pref_weight;
+	unsigned int func_code;
+	int qid = 0, rc = -ENODEV;
+
+	trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
+
+	if (mex->outputdatalength < mex->inputdatalength) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * As long as outputdatalength is big enough, we can set the
+	 * outputdatalength equal to the inputdatalength, since that is the
+	 * number of bytes we will copy in any case
+	 */
+	mex->outputdatalength = mex->inputdatalength;
+
+	rc = get_rsa_modex_fc(mex, &func_code);
+	if (rc)
+		goto out;
+
+	pref_zc = NULL;
+	pref_zq = NULL;
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		/* Check for online accelarator and CCA cards */
+		if (!zc->online || !(zc->card->functions & 0x18000000))
+			continue;
+		/* Check for size limits */
+		if (zc->min_mod_size > mex->inputdatalength ||
+		    zc->max_mod_size < mex->inputdatalength)
+			continue;
+		/* get weight index of the card device	*/
+		weight = zc->speed_rating[func_code];
+		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+			continue;
+		for_each_zcrypt_queue(zq, zc) {
+			/* check if device is online and eligible */
+			if (!zq->online || !zq->ops->rsa_modexpo)
+				continue;
+			if (zcrypt_queue_compare(zq, pref_zq,
+						 weight, pref_weight))
+				continue;
+			pref_zc = zc;
+			pref_zq = zq;
+			pref_weight = weight;
+		}
+	}
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+	if (!pref_zq) {
+		rc = -ENODEV;
+		goto out;
+	}
+
+	qid = pref_zq->queue->qid;
+	rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
+
+	spin_lock(&zcrypt_list_lock);
+	zcrypt_drop_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+out:
+	trace_s390_zcrypt_rep(mex, func_code, rc,
+			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+	return rc;
+}
+
+static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
+{
+	struct zcrypt_card *zc, *pref_zc;
+	struct zcrypt_queue *zq, *pref_zq;
+	unsigned int weight, pref_weight;
+	unsigned int func_code;
+	int qid = 0, rc = -ENODEV;
+
+	trace_s390_zcrypt_req(crt, TP_ICARSACRT);
+
+	if (crt->outputdatalength < crt->inputdatalength) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * As long as outputdatalength is big enough, we can set the
+	 * outputdatalength equal to the inputdatalength, since that is the
+	 * number of bytes we will copy in any case
+	 */
+	crt->outputdatalength = crt->inputdatalength;
+
+	rc = get_rsa_crt_fc(crt, &func_code);
+	if (rc)
+		goto out;
+
+	pref_zc = NULL;
+	pref_zq = NULL;
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		/* Check for online accelarator and CCA cards */
+		if (!zc->online || !(zc->card->functions & 0x18000000))
+			continue;
+		/* Check for size limits */
+		if (zc->min_mod_size > crt->inputdatalength ||
+		    zc->max_mod_size < crt->inputdatalength)
+			continue;
+		/* get weight index of the card device	*/
+		weight = zc->speed_rating[func_code];
+		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+			continue;
+		for_each_zcrypt_queue(zq, zc) {
+			/* check if device is online and eligible */
+			if (!zq->online || !zq->ops->rsa_modexpo_crt)
+				continue;
+			if (zcrypt_queue_compare(zq, pref_zq,
+						 weight, pref_weight))
+				continue;
+			pref_zc = zc;
+			pref_zq = zq;
+			pref_weight = weight;
+		}
+	}
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+	if (!pref_zq) {
+		rc = -ENODEV;
+		goto out;
+	}
+
+	qid = pref_zq->queue->qid;
+	rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
+
+	spin_lock(&zcrypt_list_lock);
+	zcrypt_drop_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+out:
+	trace_s390_zcrypt_rep(crt, func_code, rc,
+			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+	return rc;
+}
+
+long zcrypt_send_cprb(struct ica_xcRB *xcRB)
+{
+	struct zcrypt_card *zc, *pref_zc;
+	struct zcrypt_queue *zq, *pref_zq;
+	struct ap_message ap_msg;
+	unsigned int weight, pref_weight;
+	unsigned int func_code;
+	unsigned short *domain;
+	int qid = 0, rc = -ENODEV;
+
+	trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
+
+	ap_init_message(&ap_msg);
+	rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
+	if (rc)
+		goto out;
+
+	pref_zc = NULL;
+	pref_zq = NULL;
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		/* Check for online CCA cards */
+		if (!zc->online || !(zc->card->functions & 0x10000000))
+			continue;
+		/* Check for user selected CCA card */
+		if (xcRB->user_defined != AUTOSELECT &&
+		    xcRB->user_defined != zc->card->id)
+			continue;
+		/* get weight index of the card device	*/
+		weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
+		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+			continue;
+		for_each_zcrypt_queue(zq, zc) {
+			/* check if device is online and eligible */
+			if (!zq->online ||
+			    !zq->ops->send_cprb ||
+			    ((*domain != (unsigned short) AUTOSELECT) &&
+			     (*domain != AP_QID_QUEUE(zq->queue->qid))))
+				continue;
+			if (zcrypt_queue_compare(zq, pref_zq,
+						 weight, pref_weight))
+				continue;
+			pref_zc = zc;
+			pref_zq = zq;
+			pref_weight = weight;
+		}
+	}
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+	if (!pref_zq) {
+		rc = -ENODEV;
+		goto out;
+	}
+
+	/* in case of auto select, provide the correct domain */
+	qid = pref_zq->queue->qid;
+	if (*domain == (unsigned short) AUTOSELECT)
+		*domain = AP_QID_QUEUE(qid);
+
+	rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
+
+	spin_lock(&zcrypt_list_lock);
+	zcrypt_drop_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+out:
+	ap_release_message(&ap_msg);
+	trace_s390_zcrypt_rep(xcRB, func_code, rc,
+			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+	return rc;
+}
+EXPORT_SYMBOL(zcrypt_send_cprb);
+
+static bool is_desired_ep11_card(unsigned int dev_id,
+				 unsigned short target_num,
+				 struct ep11_target_dev *targets)
+{
+	while (target_num-- > 0) {
+		if (dev_id == targets->ap_id)
+			return true;
+		targets++;
+	}
+	return false;
+}
+
+static bool is_desired_ep11_queue(unsigned int dev_qid,
+				  unsigned short target_num,
+				  struct ep11_target_dev *targets)
+{
+	while (target_num-- > 0) {
+		if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
+			return true;
+		targets++;
+	}
+	return false;
+}
+
+static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
+{
+	struct zcrypt_card *zc, *pref_zc;
+	struct zcrypt_queue *zq, *pref_zq;
+	struct ep11_target_dev *targets;
+	unsigned short target_num;
+	unsigned int weight, pref_weight;
+	unsigned int func_code;
+	struct ap_message ap_msg;
+	int qid = 0, rc = -ENODEV;
+
+	trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
+
+	ap_init_message(&ap_msg);
+
+	target_num = (unsigned short) xcrb->targets_num;
+
+	/* empty list indicates autoselect (all available targets) */
+	targets = NULL;
+	if (target_num != 0) {
+		struct ep11_target_dev __user *uptr;
+
+		targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
+		if (!targets) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
+		if (copy_from_user(targets, uptr,
+				   target_num * sizeof(*targets))) {
+			rc = -EFAULT;
+			goto out_free;
+		}
+	}
+
+	rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code);
+	if (rc)
+		goto out_free;
+
+	pref_zc = NULL;
+	pref_zq = NULL;
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		/* Check for online EP11 cards */
+		if (!zc->online || !(zc->card->functions & 0x04000000))
+			continue;
+		/* Check for user selected EP11 card */
+		if (targets &&
+		    !is_desired_ep11_card(zc->card->id, target_num, targets))
+			continue;
+		/* get weight index of the card device	*/
+		weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
+		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+			continue;
+		for_each_zcrypt_queue(zq, zc) {
+			/* check if device is online and eligible */
+			if (!zq->online ||
+			    !zq->ops->send_ep11_cprb ||
+			    (targets &&
+			     !is_desired_ep11_queue(zq->queue->qid,
+						    target_num, targets)))
+				continue;
+			if (zcrypt_queue_compare(zq, pref_zq,
+						 weight, pref_weight))
+				continue;
+			pref_zc = zc;
+			pref_zq = zq;
+			pref_weight = weight;
+		}
+	}
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+	if (!pref_zq) {
+		rc = -ENODEV;
+		goto out_free;
+	}
+
+	qid = pref_zq->queue->qid;
+	rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
+
+	spin_lock(&zcrypt_list_lock);
+	zcrypt_drop_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+out_free:
+	kfree(targets);
+out:
+	ap_release_message(&ap_msg);
+	trace_s390_zcrypt_rep(xcrb, func_code, rc,
+			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+	return rc;
+}
+
+static long zcrypt_rng(char *buffer)
+{
+	struct zcrypt_card *zc, *pref_zc;
+	struct zcrypt_queue *zq, *pref_zq;
+	unsigned int weight, pref_weight;
+	unsigned int func_code;
+	struct ap_message ap_msg;
+	unsigned int domain;
+	int qid = 0, rc = -ENODEV;
+
+	trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
+
+	ap_init_message(&ap_msg);
+	rc = get_rng_fc(&ap_msg, &func_code, &domain);
+	if (rc)
+		goto out;
+
+	pref_zc = NULL;
+	pref_zq = NULL;
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		/* Check for online CCA cards */
+		if (!zc->online || !(zc->card->functions & 0x10000000))
+			continue;
+		/* get weight index of the card device	*/
+		weight = zc->speed_rating[func_code];
+		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+			continue;
+		for_each_zcrypt_queue(zq, zc) {
+			/* check if device is online and eligible */
+			if (!zq->online || !zq->ops->rng)
+				continue;
+			if (zcrypt_queue_compare(zq, pref_zq,
+						 weight, pref_weight))
+				continue;
+			pref_zc = zc;
+			pref_zq = zq;
+			pref_weight = weight;
+		}
+	}
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+	if (!pref_zq) {
+		rc = -ENODEV;
+		goto out;
+	}
+
+	qid = pref_zq->queue->qid;
+	rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
+
+	spin_lock(&zcrypt_list_lock);
+	zcrypt_drop_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+out:
+	ap_release_message(&ap_msg);
+	trace_s390_zcrypt_rep(buffer, func_code, rc,
+			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+	return rc;
+}
+
+static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
+{
+	struct zcrypt_card *zc;
+	struct zcrypt_queue *zq;
+	struct zcrypt_device_status *stat;
+	int card, queue;
+
+	memset(devstatus, 0, MAX_ZDEV_ENTRIES
+	       * sizeof(struct zcrypt_device_status));
+
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		for_each_zcrypt_queue(zq, zc) {
+			card = AP_QID_CARD(zq->queue->qid);
+			if (card >= MAX_ZDEV_CARDIDS)
+				continue;
+			queue = AP_QID_QUEUE(zq->queue->qid);
+			stat = &devstatus[card * AP_DOMAINS + queue];
+			stat->hwtype = zc->card->ap_dev.device_type;
+			stat->functions = zc->card->functions >> 26;
+			stat->qid = zq->queue->qid;
+			stat->online = zq->online ? 0x01 : 0x00;
+		}
+	}
+	spin_unlock(&zcrypt_list_lock);
+}
+
+void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
+{
+	struct zcrypt_card *zc;
+	struct zcrypt_queue *zq;
+	struct zcrypt_device_status_ext *stat;
+	int card, queue;
+
+	memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT
+	       * sizeof(struct zcrypt_device_status_ext));
+
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		for_each_zcrypt_queue(zq, zc) {
+			card = AP_QID_CARD(zq->queue->qid);
+			queue = AP_QID_QUEUE(zq->queue->qid);
+			stat = &devstatus[card * AP_DOMAINS + queue];
+			stat->hwtype = zc->card->ap_dev.device_type;
+			stat->functions = zc->card->functions >> 26;
+			stat->qid = zq->queue->qid;
+			stat->online = zq->online ? 0x01 : 0x00;
+		}
+	}
+	spin_unlock(&zcrypt_list_lock);
+}
+EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
+
+static void zcrypt_status_mask(char status[], size_t max_adapters)
+{
+	struct zcrypt_card *zc;
+	struct zcrypt_queue *zq;
+	int card;
+
+	memset(status, 0, max_adapters);
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		for_each_zcrypt_queue(zq, zc) {
+			card = AP_QID_CARD(zq->queue->qid);
+			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
+			    || card >= max_adapters)
+				continue;
+			status[card] = zc->online ? zc->user_space_type : 0x0d;
+		}
+	}
+	spin_unlock(&zcrypt_list_lock);
+}
+
+static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
+{
+	struct zcrypt_card *zc;
+	struct zcrypt_queue *zq;
+	int card;
+
+	memset(qdepth, 0, max_adapters);
+	spin_lock(&zcrypt_list_lock);
+	local_bh_disable();
+	for_each_zcrypt_card(zc) {
+		for_each_zcrypt_queue(zq, zc) {
+			card = AP_QID_CARD(zq->queue->qid);
+			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
+			    || card >= max_adapters)
+				continue;
+			spin_lock(&zq->queue->lock);
+			qdepth[card] =
+				zq->queue->pendingq_count +
+				zq->queue->requestq_count;
+			spin_unlock(&zq->queue->lock);
+		}
+	}
+	local_bh_enable();
+	spin_unlock(&zcrypt_list_lock);
+}
+
+static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
+{
+	struct zcrypt_card *zc;
+	struct zcrypt_queue *zq;
+	int card;
+
+	memset(reqcnt, 0, sizeof(int) * max_adapters);
+	spin_lock(&zcrypt_list_lock);
+	local_bh_disable();
+	for_each_zcrypt_card(zc) {
+		for_each_zcrypt_queue(zq, zc) {
+			card = AP_QID_CARD(zq->queue->qid);
+			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
+			    || card >= max_adapters)
+				continue;
+			spin_lock(&zq->queue->lock);
+			reqcnt[card] = zq->queue->total_request_count;
+			spin_unlock(&zq->queue->lock);
+		}
+	}
+	local_bh_enable();
+	spin_unlock(&zcrypt_list_lock);
+}
+
+static int zcrypt_pendingq_count(void)
+{
+	struct zcrypt_card *zc;
+	struct zcrypt_queue *zq;
+	int pendingq_count;
+
+	pendingq_count = 0;
+	spin_lock(&zcrypt_list_lock);
+	local_bh_disable();
+	for_each_zcrypt_card(zc) {
+		for_each_zcrypt_queue(zq, zc) {
+			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+				continue;
+			spin_lock(&zq->queue->lock);
+			pendingq_count += zq->queue->pendingq_count;
+			spin_unlock(&zq->queue->lock);
+		}
+	}
+	local_bh_enable();
+	spin_unlock(&zcrypt_list_lock);
+	return pendingq_count;
+}
+
+static int zcrypt_requestq_count(void)
+{
+	struct zcrypt_card *zc;
+	struct zcrypt_queue *zq;
+	int requestq_count;
+
+	requestq_count = 0;
+	spin_lock(&zcrypt_list_lock);
+	local_bh_disable();
+	for_each_zcrypt_card(zc) {
+		for_each_zcrypt_queue(zq, zc) {
+			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+				continue;
+			spin_lock(&zq->queue->lock);
+			requestq_count += zq->queue->requestq_count;
+			spin_unlock(&zq->queue->lock);
+		}
+	}
+	local_bh_enable();
+	spin_unlock(&zcrypt_list_lock);
+	return requestq_count;
+}
+
+static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
+				  unsigned long arg)
+{
+	int rc = 0;
+
+	switch (cmd) {
+	case ICARSAMODEXPO: {
+		struct ica_rsa_modexpo __user *umex = (void __user *) arg;
+		struct ica_rsa_modexpo mex;
+
+		if (copy_from_user(&mex, umex, sizeof(mex)))
+			return -EFAULT;
+		do {
+			rc = zcrypt_rsa_modexpo(&mex);
+		} while (rc == -EAGAIN);
+		/* on failure: retry once again after a requested rescan */
+		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+			do {
+				rc = zcrypt_rsa_modexpo(&mex);
+			} while (rc == -EAGAIN);
+		if (rc) {
+			ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
+			return rc;
+		}
+		return put_user(mex.outputdatalength, &umex->outputdatalength);
+	}
+	case ICARSACRT: {
+		struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
+		struct ica_rsa_modexpo_crt crt;
+
+		if (copy_from_user(&crt, ucrt, sizeof(crt)))
+			return -EFAULT;
+		do {
+			rc = zcrypt_rsa_crt(&crt);
+		} while (rc == -EAGAIN);
+		/* on failure: retry once again after a requested rescan */
+		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+			do {
+				rc = zcrypt_rsa_crt(&crt);
+			} while (rc == -EAGAIN);
+		if (rc) {
+			ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
+			return rc;
+		}
+		return put_user(crt.outputdatalength, &ucrt->outputdatalength);
+	}
+	case ZSECSENDCPRB: {
+		struct ica_xcRB __user *uxcRB = (void __user *) arg;
+		struct ica_xcRB xcRB;
+
+		if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
+			return -EFAULT;
+		do {
+			rc = zcrypt_send_cprb(&xcRB);
+		} while (rc == -EAGAIN);
+		/* on failure: retry once again after a requested rescan */
+		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+			do {
+				rc = zcrypt_send_cprb(&xcRB);
+			} while (rc == -EAGAIN);
+		if (rc)
+			ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d\n", rc);
+		if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
+			return -EFAULT;
+		return rc;
+	}
+	case ZSENDEP11CPRB: {
+		struct ep11_urb __user *uxcrb = (void __user *)arg;
+		struct ep11_urb xcrb;
+
+		if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
+			return -EFAULT;
+		do {
+			rc = zcrypt_send_ep11_cprb(&xcrb);
+		} while (rc == -EAGAIN);
+		/* on failure: retry once again after a requested rescan */
+		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+			do {
+				rc = zcrypt_send_ep11_cprb(&xcrb);
+			} while (rc == -EAGAIN);
+		if (rc)
+			ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
+		if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
+			return -EFAULT;
+		return rc;
+	}
+	case ZCRYPT_DEVICE_STATUS: {
+		struct zcrypt_device_status_ext *device_status;
+		size_t total_size = MAX_ZDEV_ENTRIES_EXT
+			* sizeof(struct zcrypt_device_status_ext);
+
+		device_status = kzalloc(total_size, GFP_KERNEL);
+		if (!device_status)
+			return -ENOMEM;
+		zcrypt_device_status_mask_ext(device_status);
+		if (copy_to_user((char __user *) arg, device_status,
+				 total_size))
+			rc = -EFAULT;
+		kfree(device_status);
+		return rc;
+	}
+	case ZCRYPT_STATUS_MASK: {
+		char status[AP_DEVICES];
+
+		zcrypt_status_mask(status, AP_DEVICES);
+		if (copy_to_user((char __user *) arg, status, sizeof(status)))
+			return -EFAULT;
+		return 0;
+	}
+	case ZCRYPT_QDEPTH_MASK: {
+		char qdepth[AP_DEVICES];
+
+		zcrypt_qdepth_mask(qdepth, AP_DEVICES);
+		if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
+			return -EFAULT;
+		return 0;
+	}
+	case ZCRYPT_PERDEV_REQCNT: {
+		int *reqcnt;
+
+		reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
+		if (!reqcnt)
+			return -ENOMEM;
+		zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
+		if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
+			rc = -EFAULT;
+		kfree(reqcnt);
+		return rc;
+	}
+	case Z90STAT_REQUESTQ_COUNT:
+		return put_user(zcrypt_requestq_count(), (int __user *) arg);
+	case Z90STAT_PENDINGQ_COUNT:
+		return put_user(zcrypt_pendingq_count(), (int __user *) arg);
+	case Z90STAT_TOTALOPEN_COUNT:
+		return put_user(atomic_read(&zcrypt_open_count),
+				(int __user *) arg);
+	case Z90STAT_DOMAIN_INDEX:
+		return put_user(ap_domain_index, (int __user *) arg);
+	/*
+	 * Deprecated ioctls
+	 */
+	case ZDEVICESTATUS: {
+		/* the old ioctl supports only 64 adapters */
+		struct zcrypt_device_status *device_status;
+		size_t total_size = MAX_ZDEV_ENTRIES
+			* sizeof(struct zcrypt_device_status);
+
+		device_status = kzalloc(total_size, GFP_KERNEL);
+		if (!device_status)
+			return -ENOMEM;
+		zcrypt_device_status_mask(device_status);
+		if (copy_to_user((char __user *) arg, device_status,
+				 total_size))
+			rc = -EFAULT;
+		kfree(device_status);
+		return rc;
+	}
+	case Z90STAT_STATUS_MASK: {
+		/* the old ioctl supports only 64 adapters */
+		char status[MAX_ZDEV_CARDIDS];
+
+		zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
+		if (copy_to_user((char __user *) arg, status, sizeof(status)))
+			return -EFAULT;
+		return 0;
+	}
+	case Z90STAT_QDEPTH_MASK: {
+		/* the old ioctl supports only 64 adapters */
+		char qdepth[MAX_ZDEV_CARDIDS];
+
+		zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
+		if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
+			return -EFAULT;
+		return 0;
+	}
+	case Z90STAT_PERDEV_REQCNT: {
+		/* the old ioctl supports only 64 adapters */
+		int reqcnt[MAX_ZDEV_CARDIDS];
+
+		zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
+		if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
+			return -EFAULT;
+		return 0;
+	}
+	/* unknown ioctl number */
+	default:
+		ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd);
+		return -ENOIOCTLCMD;
+	}
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * ioctl32 conversion routines
+ */
+struct compat_ica_rsa_modexpo {
+	compat_uptr_t	inputdata;
+	unsigned int	inputdatalength;
+	compat_uptr_t	outputdata;
+	unsigned int	outputdatalength;
+	compat_uptr_t	b_key;
+	compat_uptr_t	n_modulus;
+};
+
+static long trans_modexpo32(struct file *filp, unsigned int cmd,
+			    unsigned long arg)
+{
+	struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
+	struct compat_ica_rsa_modexpo mex32;
+	struct ica_rsa_modexpo mex64;
+	long rc;
+
+	if (copy_from_user(&mex32, umex32, sizeof(mex32)))
+		return -EFAULT;
+	mex64.inputdata = compat_ptr(mex32.inputdata);
+	mex64.inputdatalength = mex32.inputdatalength;
+	mex64.outputdata = compat_ptr(mex32.outputdata);
+	mex64.outputdatalength = mex32.outputdatalength;
+	mex64.b_key = compat_ptr(mex32.b_key);
+	mex64.n_modulus = compat_ptr(mex32.n_modulus);
+	do {
+		rc = zcrypt_rsa_modexpo(&mex64);
+	} while (rc == -EAGAIN);
+	/* on failure: retry once again after a requested rescan */
+	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+		do {
+			rc = zcrypt_rsa_modexpo(&mex64);
+		} while (rc == -EAGAIN);
+	if (rc)
+		return rc;
+	return put_user(mex64.outputdatalength,
+			&umex32->outputdatalength);
+}
+
+struct compat_ica_rsa_modexpo_crt {
+	compat_uptr_t	inputdata;
+	unsigned int	inputdatalength;
+	compat_uptr_t	outputdata;
+	unsigned int	outputdatalength;
+	compat_uptr_t	bp_key;
+	compat_uptr_t	bq_key;
+	compat_uptr_t	np_prime;
+	compat_uptr_t	nq_prime;
+	compat_uptr_t	u_mult_inv;
+};
+
+static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
+				unsigned long arg)
+{
+	struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
+	struct compat_ica_rsa_modexpo_crt crt32;
+	struct ica_rsa_modexpo_crt crt64;
+	long rc;
+
+	if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
+		return -EFAULT;
+	crt64.inputdata = compat_ptr(crt32.inputdata);
+	crt64.inputdatalength = crt32.inputdatalength;
+	crt64.outputdata = compat_ptr(crt32.outputdata);
+	crt64.outputdatalength = crt32.outputdatalength;
+	crt64.bp_key = compat_ptr(crt32.bp_key);
+	crt64.bq_key = compat_ptr(crt32.bq_key);
+	crt64.np_prime = compat_ptr(crt32.np_prime);
+	crt64.nq_prime = compat_ptr(crt32.nq_prime);
+	crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
+	do {
+		rc = zcrypt_rsa_crt(&crt64);
+	} while (rc == -EAGAIN);
+	/* on failure: retry once again after a requested rescan */
+	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+		do {
+			rc = zcrypt_rsa_crt(&crt64);
+		} while (rc == -EAGAIN);
+	if (rc)
+		return rc;
+	return put_user(crt64.outputdatalength,
+			&ucrt32->outputdatalength);
+}
+
+struct compat_ica_xcRB {
+	unsigned short	agent_ID;
+	unsigned int	user_defined;
+	unsigned short	request_ID;
+	unsigned int	request_control_blk_length;
+	unsigned char	padding1[16 - sizeof(compat_uptr_t)];
+	compat_uptr_t	request_control_blk_addr;
+	unsigned int	request_data_length;
+	char		padding2[16 - sizeof(compat_uptr_t)];
+	compat_uptr_t	request_data_address;
+	unsigned int	reply_control_blk_length;
+	char		padding3[16 - sizeof(compat_uptr_t)];
+	compat_uptr_t	reply_control_blk_addr;
+	unsigned int	reply_data_length;
+	char		padding4[16 - sizeof(compat_uptr_t)];
+	compat_uptr_t	reply_data_addr;
+	unsigned short	priority_window;
+	unsigned int	status;
+} __packed;
+
+static long trans_xcRB32(struct file *filp, unsigned int cmd,
+			 unsigned long arg)
+{
+	struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
+	struct compat_ica_xcRB xcRB32;
+	struct ica_xcRB xcRB64;
+	long rc;
+
+	if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
+		return -EFAULT;
+	xcRB64.agent_ID = xcRB32.agent_ID;
+	xcRB64.user_defined = xcRB32.user_defined;
+	xcRB64.request_ID = xcRB32.request_ID;
+	xcRB64.request_control_blk_length =
+		xcRB32.request_control_blk_length;
+	xcRB64.request_control_blk_addr =
+		compat_ptr(xcRB32.request_control_blk_addr);
+	xcRB64.request_data_length =
+		xcRB32.request_data_length;
+	xcRB64.request_data_address =
+		compat_ptr(xcRB32.request_data_address);
+	xcRB64.reply_control_blk_length =
+		xcRB32.reply_control_blk_length;
+	xcRB64.reply_control_blk_addr =
+		compat_ptr(xcRB32.reply_control_blk_addr);
+	xcRB64.reply_data_length = xcRB32.reply_data_length;
+	xcRB64.reply_data_addr =
+		compat_ptr(xcRB32.reply_data_addr);
+	xcRB64.priority_window = xcRB32.priority_window;
+	xcRB64.status = xcRB32.status;
+	do {
+		rc = zcrypt_send_cprb(&xcRB64);
+	} while (rc == -EAGAIN);
+	/* on failure: retry once again after a requested rescan */
+	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+		do {
+			rc = zcrypt_send_cprb(&xcRB64);
+		} while (rc == -EAGAIN);
+	xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
+	xcRB32.reply_data_length = xcRB64.reply_data_length;
+	xcRB32.status = xcRB64.status;
+	if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
+		return -EFAULT;
+	return rc;
+}
+
+static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
+			 unsigned long arg)
+{
+	if (cmd == ICARSAMODEXPO)
+		return trans_modexpo32(filp, cmd, arg);
+	if (cmd == ICARSACRT)
+		return trans_modexpo_crt32(filp, cmd, arg);
+	if (cmd == ZSECSENDCPRB)
+		return trans_xcRB32(filp, cmd, arg);
+	return zcrypt_unlocked_ioctl(filp, cmd, arg);
+}
+#endif
+
+/*
+ * Misc device file operations.
+ */
+static const struct file_operations zcrypt_fops = {
+	.owner		= THIS_MODULE,
+	.read		= zcrypt_read,
+	.write		= zcrypt_write,
+	.unlocked_ioctl	= zcrypt_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= zcrypt_compat_ioctl,
+#endif
+	.open		= zcrypt_open,
+	.release	= zcrypt_release,
+	.llseek		= no_llseek,
+};
+
+/*
+ * Misc device.
+ */
+static struct miscdevice zcrypt_misc_device = {
+	.minor	    = MISC_DYNAMIC_MINOR,
+	.name	    = "z90crypt",
+	.fops	    = &zcrypt_fops,
+};
+
+static int zcrypt_rng_device_count;
+static u32 *zcrypt_rng_buffer;
+static int zcrypt_rng_buffer_index;
+static DEFINE_MUTEX(zcrypt_rng_mutex);
+
+static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
+{
+	int rc;
+
+	/*
+	 * We don't need locking here because the RNG API guarantees serialized
+	 * read method calls.
+	 */
+	if (zcrypt_rng_buffer_index == 0) {
+		rc = zcrypt_rng((char *) zcrypt_rng_buffer);
+		/* on failure: retry once again after a requested rescan */
+		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+			rc = zcrypt_rng((char *) zcrypt_rng_buffer);
+		if (rc < 0)
+			return -EIO;
+		zcrypt_rng_buffer_index = rc / sizeof(*data);
+	}
+	*data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
+	return sizeof(*data);
+}
+
+static struct hwrng zcrypt_rng_dev = {
+	.name		= "zcrypt",
+	.data_read	= zcrypt_rng_data_read,
+	.quality	= 990,
+};
+
+int zcrypt_rng_device_add(void)
+{
+	int rc = 0;
+
+	mutex_lock(&zcrypt_rng_mutex);
+	if (zcrypt_rng_device_count == 0) {
+		zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
+		if (!zcrypt_rng_buffer) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		zcrypt_rng_buffer_index = 0;
+		if (!zcrypt_hwrng_seed)
+			zcrypt_rng_dev.quality = 0;
+		rc = hwrng_register(&zcrypt_rng_dev);
+		if (rc)
+			goto out_free;
+		zcrypt_rng_device_count = 1;
+	} else
+		zcrypt_rng_device_count++;
+	mutex_unlock(&zcrypt_rng_mutex);
+	return 0;
+
+out_free:
+	free_page((unsigned long) zcrypt_rng_buffer);
+out:
+	mutex_unlock(&zcrypt_rng_mutex);
+	return rc;
+}
+
+void zcrypt_rng_device_remove(void)
+{
+	mutex_lock(&zcrypt_rng_mutex);
+	zcrypt_rng_device_count--;
+	if (zcrypt_rng_device_count == 0) {
+		hwrng_unregister(&zcrypt_rng_dev);
+		free_page((unsigned long) zcrypt_rng_buffer);
+	}
+	mutex_unlock(&zcrypt_rng_mutex);
+}
+
+int __init zcrypt_debug_init(void)
+{
+	zcrypt_dbf_info = debug_register("zcrypt", 1, 1,
+					 DBF_MAX_SPRINTF_ARGS * sizeof(long));
+	debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
+	debug_set_level(zcrypt_dbf_info, DBF_ERR);
+
+	return 0;
+}
+
+void zcrypt_debug_exit(void)
+{
+	debug_unregister(zcrypt_dbf_info);
+}
+
+/**
+ * zcrypt_api_init(): Module initialization.
+ *
+ * The module initialization code.
+ */
+int __init zcrypt_api_init(void)
+{
+	int rc;
+
+	rc = zcrypt_debug_init();
+	if (rc)
+		goto out;
+
+	/* Register the request sprayer. */
+	rc = misc_register(&zcrypt_misc_device);
+	if (rc < 0)
+		goto out;
+
+	zcrypt_msgtype6_init();
+	zcrypt_msgtype50_init();
+	return 0;
+
+out:
+	return rc;
+}
+
+/**
+ * zcrypt_api_exit(): Module termination.
+ *
+ * The module termination code.
+ */
+void __exit zcrypt_api_exit(void)
+{
+	misc_deregister(&zcrypt_misc_device);
+	zcrypt_msgtype6_exit();
+	zcrypt_msgtype50_exit();
+	zcrypt_debug_exit();
+}
+
+module_init(zcrypt_api_init);
+module_exit(zcrypt_api_exit);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
new file mode 100644
index 0000000..a848625
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ *  zcrypt 2.1.0
+ *
+ *  Copyright IBM Corp. 2001, 2012
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *	       Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *  MSGTYPE restruct:		  Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#ifndef _ZCRYPT_API_H_
+#define _ZCRYPT_API_H_
+
+#include <linux/atomic.h>
+#include <asm/debug.h>
+#include <asm/zcrypt.h>
+#include "ap_bus.h"
+
+/**
+ * device type for an actual device is either PCICA, PCICC, PCIXCC_MCL2,
+ * PCIXCC_MCL3, CEX2C, or CEX2A
+ *
+ * NOTE: PCIXCC_MCL3 refers to a PCIXCC with May 2004 version of Licensed
+ *	 Internal Code (LIC) (EC J12220 level 29).
+ *	 PCIXCC_MCL2 refers to any LIC before this level.
+ */
+#define ZCRYPT_PCICA		1
+#define ZCRYPT_PCICC		2
+#define ZCRYPT_PCIXCC_MCL2	3
+#define ZCRYPT_PCIXCC_MCL3	4
+#define ZCRYPT_CEX2C		5
+#define ZCRYPT_CEX2A		6
+#define ZCRYPT_CEX3C		7
+#define ZCRYPT_CEX3A		8
+#define ZCRYPT_CEX4	       10
+#define ZCRYPT_CEX5	       11
+#define ZCRYPT_CEX6	       12
+
+/**
+ * Large random numbers are pulled in 4096 byte chunks from the crypto cards
+ * and stored in a page. Be careful when increasing this buffer due to size
+ * limitations for AP requests.
+ */
+#define ZCRYPT_RNG_BUFFER_SIZE	4096
+
+/*
+ * Identifier for Crypto Request Performance Index
+ */
+enum crypto_ops {
+	MEX_1K,
+	MEX_2K,
+	MEX_4K,
+	CRT_1K,
+	CRT_2K,
+	CRT_4K,
+	HWRNG,
+	SECKEY,
+	NUM_OPS
+};
+
+struct zcrypt_queue;
+
+struct zcrypt_ops {
+	long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *);
+	long (*rsa_modexpo_crt)(struct zcrypt_queue *,
+				struct ica_rsa_modexpo_crt *);
+	long (*send_cprb)(struct zcrypt_queue *, struct ica_xcRB *,
+			  struct ap_message *);
+	long (*send_ep11_cprb)(struct zcrypt_queue *, struct ep11_urb *,
+			       struct ap_message *);
+	long (*rng)(struct zcrypt_queue *, char *, struct ap_message *);
+	struct list_head list;		/* zcrypt ops list. */
+	struct module *owner;
+	int variant;
+	char name[128];
+};
+
+struct zcrypt_card {
+	struct list_head list;		/* Device list. */
+	struct list_head zqueues;	/* List of zcrypt queues */
+	struct kref refcount;		/* device refcounting */
+	struct ap_card *card;		/* The "real" ap card device. */
+	int online;			/* User online/offline */
+
+	int user_space_type;		/* User space device id. */
+	char *type_string;		/* User space device name. */
+	int min_mod_size;		/* Min number of bits. */
+	int max_mod_size;		/* Max number of bits. */
+	int max_exp_bit_length;
+	int speed_rating[NUM_OPS];	/* Speed idx of crypto ops. */
+	atomic_t load;			/* Utilization of the crypto device */
+
+	int request_count;		/* # current requests. */
+};
+
+struct zcrypt_queue {
+	struct list_head list;		/* Device list. */
+	struct kref refcount;		/* device refcounting */
+	struct zcrypt_card *zcard;
+	struct zcrypt_ops *ops;		/* Crypto operations. */
+	struct ap_queue *queue;		/* The "real" ap queue device. */
+	int online;			/* User online/offline */
+
+	atomic_t load;			/* Utilization of the crypto device */
+
+	int request_count;		/* # current requests. */
+
+	struct ap_message reply;	/* Per-device reply structure. */
+};
+
+/* transport layer rescanning */
+extern atomic_t zcrypt_rescan_req;
+
+extern spinlock_t zcrypt_list_lock;
+extern int zcrypt_device_count;
+extern struct list_head zcrypt_card_list;
+
+#define for_each_zcrypt_card(_zc) \
+	list_for_each_entry(_zc, &zcrypt_card_list, list)
+
+#define for_each_zcrypt_queue(_zq, _zc) \
+	list_for_each_entry(_zq, &(_zc)->zqueues, list)
+
+struct zcrypt_card *zcrypt_card_alloc(void);
+void zcrypt_card_free(struct zcrypt_card *);
+void zcrypt_card_get(struct zcrypt_card *);
+int zcrypt_card_put(struct zcrypt_card *);
+int zcrypt_card_register(struct zcrypt_card *);
+void zcrypt_card_unregister(struct zcrypt_card *);
+struct zcrypt_card *zcrypt_card_get_best(unsigned int *,
+					 unsigned int, unsigned int);
+void zcrypt_card_put_best(struct zcrypt_card *, unsigned int);
+
+struct zcrypt_queue *zcrypt_queue_alloc(size_t);
+void zcrypt_queue_free(struct zcrypt_queue *);
+void zcrypt_queue_get(struct zcrypt_queue *);
+int zcrypt_queue_put(struct zcrypt_queue *);
+int zcrypt_queue_register(struct zcrypt_queue *);
+void zcrypt_queue_unregister(struct zcrypt_queue *);
+void zcrypt_queue_force_online(struct zcrypt_queue *, int);
+struct zcrypt_queue *zcrypt_queue_get_best(unsigned int, unsigned int);
+void  zcrypt_queue_put_best(struct zcrypt_queue *, unsigned int);
+
+int zcrypt_rng_device_add(void);
+void zcrypt_rng_device_remove(void);
+
+void zcrypt_msgtype_register(struct zcrypt_ops *);
+void zcrypt_msgtype_unregister(struct zcrypt_ops *);
+struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
+int zcrypt_api_init(void);
+void zcrypt_api_exit(void);
+long zcrypt_send_cprb(struct ica_xcRB *xcRB);
+void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
+
+#endif /* _ZCRYPT_API_H_ */
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
new file mode 100644
index 0000000..40cd4c1
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  zcrypt 2.1.0
+ *
+ *  Copyright IBM Corp. 2001, 2012
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *	       Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *  MSGTYPE restruct:		  Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/hw_random.h>
+#include <linux/debugfs.h>
+#include <asm/debug.h>
+
+#include "zcrypt_debug.h"
+#include "zcrypt_api.h"
+
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
+
+/*
+ * Device attributes common for all crypto card devices.
+ */
+
+static ssize_t type_show(struct device *dev,
+			 struct device_attribute *attr, char *buf)
+{
+	struct zcrypt_card *zc = to_ap_card(dev)->private;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
+}
+
+static DEVICE_ATTR_RO(type);
+
+static ssize_t online_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	struct zcrypt_card *zc = to_ap_card(dev)->private;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", zc->online);
+}
+
+static ssize_t online_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct zcrypt_card *zc = to_ap_card(dev)->private;
+	struct zcrypt_queue *zq;
+	int online, id;
+
+	if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
+		return -EINVAL;
+
+	zc->online = online;
+	id = zc->card->id;
+
+	ZCRYPT_DBF(DBF_INFO, "card=%02x online=%d\n", id, online);
+
+	spin_lock(&zcrypt_list_lock);
+	list_for_each_entry(zq, &zc->zqueues, list)
+		zcrypt_queue_force_online(zq, online);
+	spin_unlock(&zcrypt_list_lock);
+	return count;
+}
+
+static DEVICE_ATTR_RW(online);
+
+static ssize_t load_show(struct device *dev,
+			 struct device_attribute *attr,
+			 char *buf)
+{
+	struct zcrypt_card *zc = to_ap_card(dev)->private;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
+}
+
+static DEVICE_ATTR_RO(load);
+
+static struct attribute *zcrypt_card_attrs[] = {
+	&dev_attr_type.attr,
+	&dev_attr_online.attr,
+	&dev_attr_load.attr,
+	NULL,
+};
+
+static const struct attribute_group zcrypt_card_attr_group = {
+	.attrs = zcrypt_card_attrs,
+};
+
+struct zcrypt_card *zcrypt_card_alloc(void)
+{
+	struct zcrypt_card *zc;
+
+	zc = kzalloc(sizeof(struct zcrypt_card), GFP_KERNEL);
+	if (!zc)
+		return NULL;
+	INIT_LIST_HEAD(&zc->list);
+	INIT_LIST_HEAD(&zc->zqueues);
+	kref_init(&zc->refcount);
+	return zc;
+}
+EXPORT_SYMBOL(zcrypt_card_alloc);
+
+void zcrypt_card_free(struct zcrypt_card *zc)
+{
+	kfree(zc);
+}
+EXPORT_SYMBOL(zcrypt_card_free);
+
+static void zcrypt_card_release(struct kref *kref)
+{
+	struct zcrypt_card *zdev =
+		container_of(kref, struct zcrypt_card, refcount);
+	zcrypt_card_free(zdev);
+}
+
+void zcrypt_card_get(struct zcrypt_card *zc)
+{
+	kref_get(&zc->refcount);
+}
+EXPORT_SYMBOL(zcrypt_card_get);
+
+int zcrypt_card_put(struct zcrypt_card *zc)
+{
+	return kref_put(&zc->refcount, zcrypt_card_release);
+}
+EXPORT_SYMBOL(zcrypt_card_put);
+
+/**
+ * zcrypt_card_register() - Register a crypto card device.
+ * @zc: Pointer to a crypto card device
+ *
+ * Register a crypto card device. Returns 0 if successful.
+ */
+int zcrypt_card_register(struct zcrypt_card *zc)
+{
+	int rc;
+
+	rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
+				&zcrypt_card_attr_group);
+	if (rc)
+		return rc;
+
+	spin_lock(&zcrypt_list_lock);
+	list_add_tail(&zc->list, &zcrypt_card_list);
+	spin_unlock(&zcrypt_list_lock);
+
+	zc->online = 1;
+
+	ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id);
+
+	return rc;
+}
+EXPORT_SYMBOL(zcrypt_card_register);
+
+/**
+ * zcrypt_card_unregister(): Unregister a crypto card device.
+ * @zc: Pointer to crypto card device
+ *
+ * Unregister a crypto card device.
+ */
+void zcrypt_card_unregister(struct zcrypt_card *zc)
+{
+	ZCRYPT_DBF(DBF_INFO, "card=%02x unregister\n", zc->card->id);
+
+	spin_lock(&zcrypt_list_lock);
+	list_del_init(&zc->list);
+	spin_unlock(&zcrypt_list_lock);
+	sysfs_remove_group(&zc->card->ap_dev.device.kobj,
+			   &zcrypt_card_attr_group);
+}
+EXPORT_SYMBOL(zcrypt_card_unregister);
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
new file mode 100644
index 0000000..e5b5c02
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ *  zcrypt 2.1.0
+ *
+ *  Copyright IBM Corp. 2001, 2006
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _ZCRYPT_CCA_KEY_H_
+#define _ZCRYPT_CCA_KEY_H_
+
+struct T6_keyBlock_hdr {
+	unsigned short blen;
+	unsigned short ulen;
+	unsigned short flags;
+};
+
+/**
+ * mapping for the cca private ME key token.
+ * Three parts of interest here: the header, the private section and
+ * the public section.
+ *
+ * mapping for the cca key token header
+ */
+struct cca_token_hdr {
+	unsigned char  token_identifier;
+	unsigned char  version;
+	unsigned short token_length;
+	unsigned char  reserved[4];
+} __packed;
+
+#define CCA_TKN_HDR_ID_EXT 0x1E
+
+#define CCA_PVT_USAGE_ALL 0x80
+
+/**
+ * mapping for the cca public section
+ * In a private key, the modulus doesn't appear in the public
+ * section. So, an arbitrary public exponent of 0x010001 will be
+ * used, for a section length of 0x0F always.
+ */
+struct cca_public_sec {
+	unsigned char  section_identifier;
+	unsigned char  version;
+	unsigned short section_length;
+	unsigned char  reserved[2];
+	unsigned short exponent_len;
+	unsigned short modulus_bit_len;
+	unsigned short modulus_byte_len;    /* In a private key, this is 0 */
+} __packed;
+
+/**
+ * mapping for the cca private CRT key 'token'
+ * The first three parts (the only parts considered in this release)
+ * are: the header, the private section and the public section.
+ * The header and public section are the same as for the
+ * struct cca_private_ext_ME
+ *
+ * Following the structure are the quantities p, q, dp, dq, u, pad,
+ * and modulus, in that order, where pad_len is the modulo 8
+ * complement of the residue modulo 8 of the sum of
+ * (p_len + q_len + dp_len + dq_len + u_len).
+ */
+struct cca_pvt_ext_CRT_sec {
+	unsigned char  section_identifier;
+	unsigned char  version;
+	unsigned short section_length;
+	unsigned char  private_key_hash[20];
+	unsigned char  reserved1[4];
+	unsigned char  key_format;
+	unsigned char  reserved2;
+	unsigned char  key_name_hash[20];
+	unsigned char  key_use_flags[4];
+	unsigned short p_len;
+	unsigned short q_len;
+	unsigned short dp_len;
+	unsigned short dq_len;
+	unsigned short u_len;
+	unsigned short mod_len;
+	unsigned char  reserved3[4];
+	unsigned short pad_len;
+	unsigned char  reserved4[52];
+	unsigned char  confounder[8];
+} __packed;
+
+#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08
+#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
+
+/**
+ * Set up private key fields of a type6 MEX message. The _pad variant
+ * strips leading zeroes from the b_key.
+ * Note that all numerics in the key token are big-endian,
+ * while the entries in the key block header are little-endian.
+ *
+ * @mex: pointer to user input data
+ * @p: pointer to memory area for the key
+ *
+ * Returns the size of the key area or negative errno value.
+ */
+static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
+{
+	static struct cca_token_hdr static_pub_hdr = {
+		.token_identifier	=  0x1E,
+	};
+	static struct cca_public_sec static_pub_sec = {
+		.section_identifier	=  0x04,
+	};
+	struct {
+		struct T6_keyBlock_hdr t6_hdr;
+		struct cca_token_hdr pubHdr;
+		struct cca_public_sec pubSec;
+		char exponent[0];
+	} __packed *key = p;
+	unsigned char *temp;
+	int i;
+
+	/*
+	 * The inputdatalength was a selection criteria in the dispatching
+	 * function zcrypt_rsa_modexpo(). However, do a plausibility check
+	 * here to make sure the following copy_from_user() can't be utilized
+	 * to compromise the system.
+	 */
+	if (WARN_ON_ONCE(mex->inputdatalength > 512))
+		return -EINVAL;
+
+	memset(key, 0, sizeof(*key));
+
+	key->pubHdr = static_pub_hdr;
+	key->pubSec = static_pub_sec;
+
+	/* key parameter block */
+	temp = key->exponent;
+	if (copy_from_user(temp, mex->b_key, mex->inputdatalength))
+		return -EFAULT;
+	/* Strip leading zeroes from b_key. */
+	for (i = 0; i < mex->inputdatalength; i++)
+		if (temp[i])
+			break;
+	if (i >= mex->inputdatalength)
+		return -EINVAL;
+	memmove(temp, temp + i, mex->inputdatalength - i);
+	temp += mex->inputdatalength - i;
+	/* modulus */
+	if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
+		return -EFAULT;
+
+	key->pubSec.modulus_bit_len = 8 * mex->inputdatalength;
+	key->pubSec.modulus_byte_len = mex->inputdatalength;
+	key->pubSec.exponent_len = mex->inputdatalength - i;
+	key->pubSec.section_length = sizeof(key->pubSec) +
+					2*mex->inputdatalength - i;
+	key->pubHdr.token_length =
+		key->pubSec.section_length + sizeof(key->pubHdr);
+	key->t6_hdr.ulen = key->pubHdr.token_length + 4;
+	key->t6_hdr.blen = key->pubHdr.token_length + 6;
+	return sizeof(*key) + 2*mex->inputdatalength - i;
+}
+
+/**
+ * Set up private key fields of a type6 CRT message.
+ * Note that all numerics in the key token are big-endian,
+ * while the entries in the key block header are little-endian.
+ *
+ * @mex: pointer to user input data
+ * @p: pointer to memory area for the key
+ *
+ * Returns the size of the key area or -EFAULT
+ */
+static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
+{
+	static struct cca_public_sec static_cca_pub_sec = {
+		.section_identifier = 4,
+		.section_length = 0x000f,
+		.exponent_len = 0x0003,
+	};
+	static char pk_exponent[3] = { 0x01, 0x00, 0x01 };
+	struct {
+		struct T6_keyBlock_hdr t6_hdr;
+		struct cca_token_hdr token;
+		struct cca_pvt_ext_CRT_sec pvt;
+		char key_parts[0];
+	} __packed *key = p;
+	struct cca_public_sec *pub;
+	int short_len, long_len, pad_len, key_len, size;
+
+	/*
+	 * The inputdatalength was a selection criteria in the dispatching
+	 * function zcrypt_rsa_crt(). However, do a plausibility check
+	 * here to make sure the following copy_from_user() can't be utilized
+	 * to compromise the system.
+	 */
+	if (WARN_ON_ONCE(crt->inputdatalength > 512))
+		return -EINVAL;
+
+	memset(key, 0, sizeof(*key));
+
+	short_len = (crt->inputdatalength + 1) / 2;
+	long_len = short_len + 8;
+	pad_len = -(3*long_len + 2*short_len) & 7;
+	key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength;
+	size = sizeof(*key) + key_len + sizeof(*pub) + 3;
+
+	/* parameter block.key block */
+	key->t6_hdr.blen = size;
+	key->t6_hdr.ulen = size - 2;
+
+	/* key token header */
+	key->token.token_identifier = CCA_TKN_HDR_ID_EXT;
+	key->token.token_length = size - 6;
+
+	/* private section */
+	key->pvt.section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
+	key->pvt.section_length = sizeof(key->pvt) + key_len;
+	key->pvt.key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
+	key->pvt.key_use_flags[0] = CCA_PVT_USAGE_ALL;
+	key->pvt.p_len = key->pvt.dp_len = key->pvt.u_len = long_len;
+	key->pvt.q_len = key->pvt.dq_len = short_len;
+	key->pvt.mod_len = crt->inputdatalength;
+	key->pvt.pad_len = pad_len;
+
+	/* key parts */
+	if (copy_from_user(key->key_parts, crt->np_prime, long_len) ||
+	    copy_from_user(key->key_parts + long_len,
+					crt->nq_prime, short_len) ||
+	    copy_from_user(key->key_parts + long_len + short_len,
+					crt->bp_key, long_len) ||
+	    copy_from_user(key->key_parts + 2*long_len + short_len,
+					crt->bq_key, short_len) ||
+	    copy_from_user(key->key_parts + 2*long_len + 2*short_len,
+					crt->u_mult_inv, long_len))
+		return -EFAULT;
+	memset(key->key_parts + 3*long_len + 2*short_len + pad_len,
+	       0xff, crt->inputdatalength);
+	pub = (struct cca_public_sec *)(key->key_parts + key_len);
+	*pub = static_cca_pub_sec;
+	pub->modulus_bit_len = 8 * crt->inputdatalength;
+	/*
+	 * In a private key, the modulus doesn't appear in the public
+	 * section. So, an arbitrary public exponent of 0x010001 will be
+	 * used.
+	 */
+	memcpy((char *) (pub + 1), pk_exponent, 3);
+	return size;
+}
+
+#endif /* _ZCRYPT_CCA_KEY_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
new file mode 100644
index 0000000..f4ae5fa
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  zcrypt 2.1.0
+ *
+ *  Copyright IBM Corp. 2001, 2012
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *  MSGTYPE restruct:		  Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/mod_devicetable.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_cex2a.h"
+#include "zcrypt_msgtype50.h"
+
+#define CEX2A_MIN_MOD_SIZE	  1	/*    8 bits	*/
+#define CEX2A_MAX_MOD_SIZE	256	/* 2048 bits	*/
+#define CEX3A_MIN_MOD_SIZE	CEX2A_MIN_MOD_SIZE
+#define CEX3A_MAX_MOD_SIZE	512	/* 4096 bits	*/
+
+#define CEX2A_MAX_MESSAGE_SIZE	0x390	/* sizeof(struct type50_crb2_msg)    */
+#define CEX2A_MAX_RESPONSE_SIZE 0x110	/* max outputdatalength + type80_hdr */
+
+#define CEX3A_MAX_RESPONSE_SIZE	0x210	/* 512 bit modulus
+					 * (max outputdatalength) +
+					 * type80_hdr*/
+#define CEX3A_MAX_MESSAGE_SIZE	sizeof(struct type50_crb3_msg)
+
+#define CEX2A_CLEANUP_TIME	(15*HZ)
+#define CEX3A_CLEANUP_TIME	CEX2A_CLEANUP_TIME
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \
+		   "Copyright IBM Corp. 2001, 2012");
+MODULE_LICENSE("GPL");
+
+static struct ap_device_id zcrypt_cex2a_card_ids[] = {
+	{ .dev_type = AP_DEVICE_TYPE_CEX2A,
+	  .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX3A,
+	  .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+	{ /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_card_ids);
+
+static struct ap_device_id zcrypt_cex2a_queue_ids[] = {
+	{ .dev_type = AP_DEVICE_TYPE_CEX2A,
+	  .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX3A,
+	  .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+	{ /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids);
+
+/**
+ * Probe function for CEX2A card devices. It always accepts the AP device
+ * since the bus_match already checked the card type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
+{
+	/*
+	 * Normalized speed ratings per crypto adapter
+	 * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
+	 */
+	static const int CEX2A_SPEED_IDX[] = {
+		800, 1000, 2000,  900, 1200, 2400, 0, 0};
+	static const int CEX3A_SPEED_IDX[] = {
+		400,  500, 1000,  450,	550, 1200, 0, 0};
+
+	struct ap_card *ac = to_ap_card(&ap_dev->device);
+	struct zcrypt_card *zc;
+	int rc = 0;
+
+	zc = zcrypt_card_alloc();
+	if (!zc)
+		return -ENOMEM;
+	zc->card = ac;
+	ac->private = zc;
+
+	if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
+		zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
+		zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
+		memcpy(zc->speed_rating, CEX2A_SPEED_IDX,
+		       sizeof(CEX2A_SPEED_IDX));
+		zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
+		zc->type_string = "CEX2A";
+		zc->user_space_type = ZCRYPT_CEX2A;
+	} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX3A) {
+		zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
+		zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
+		zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
+		if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
+		    ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
+			zc->max_mod_size = CEX3A_MAX_MOD_SIZE;
+			zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
+		}
+		memcpy(zc->speed_rating, CEX3A_SPEED_IDX,
+		       sizeof(CEX3A_SPEED_IDX));
+		zc->type_string = "CEX3A";
+		zc->user_space_type = ZCRYPT_CEX3A;
+	} else {
+		zcrypt_card_free(zc);
+		return -ENODEV;
+	}
+	zc->online = 1;
+
+	rc = zcrypt_card_register(zc);
+	if (rc) {
+		ac->private = NULL;
+		zcrypt_card_free(zc);
+	}
+
+	return rc;
+}
+
+/**
+ * This is called to remove the CEX2A card driver information
+ * if an AP card device is removed.
+ */
+static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev)
+{
+	struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+
+	if (zc)
+		zcrypt_card_unregister(zc);
+}
+
+static struct ap_driver zcrypt_cex2a_card_driver = {
+	.probe = zcrypt_cex2a_card_probe,
+	.remove = zcrypt_cex2a_card_remove,
+	.ids = zcrypt_cex2a_card_ids,
+	.flags = AP_DRIVER_FLAG_DEFAULT,
+};
+
+/**
+ * Probe function for CEX2A queue devices. It always accepts the AP device
+ * since the bus_match already checked the queue type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
+{
+	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+	struct zcrypt_queue *zq = NULL;
+	int rc;
+
+	switch (ap_dev->device_type) {
+	case AP_DEVICE_TYPE_CEX2A:
+		zq = zcrypt_queue_alloc(CEX2A_MAX_RESPONSE_SIZE);
+		if (!zq)
+			return -ENOMEM;
+		break;
+	case AP_DEVICE_TYPE_CEX3A:
+		zq = zcrypt_queue_alloc(CEX3A_MAX_RESPONSE_SIZE);
+		if (!zq)
+			return -ENOMEM;
+		break;
+	}
+	if (!zq)
+		return -ENODEV;
+	zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT);
+	zq->queue = aq;
+	zq->online = 1;
+	atomic_set(&zq->load, 0);
+	ap_queue_init_reply(aq, &zq->reply);
+	aq->request_timeout = CEX2A_CLEANUP_TIME,
+	aq->private = zq;
+	rc = zcrypt_queue_register(zq);
+	if (rc) {
+		aq->private = NULL;
+		zcrypt_queue_free(zq);
+	}
+
+	return rc;
+}
+
+/**
+ * This is called to remove the CEX2A queue driver information
+ * if an AP queue device is removed.
+ */
+static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
+{
+	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+	struct zcrypt_queue *zq = aq->private;
+
+	ap_queue_remove(aq);
+	if (zq)
+		zcrypt_queue_unregister(zq);
+}
+
+static struct ap_driver zcrypt_cex2a_queue_driver = {
+	.probe = zcrypt_cex2a_queue_probe,
+	.remove = zcrypt_cex2a_queue_remove,
+	.suspend = ap_queue_suspend,
+	.resume = ap_queue_resume,
+	.ids = zcrypt_cex2a_queue_ids,
+	.flags = AP_DRIVER_FLAG_DEFAULT,
+};
+
+int __init zcrypt_cex2a_init(void)
+{
+	int rc;
+
+	rc = ap_driver_register(&zcrypt_cex2a_card_driver,
+				THIS_MODULE, "cex2acard");
+	if (rc)
+		return rc;
+
+	rc = ap_driver_register(&zcrypt_cex2a_queue_driver,
+				THIS_MODULE, "cex2aqueue");
+	if (rc)
+		ap_driver_unregister(&zcrypt_cex2a_card_driver);
+
+	return rc;
+}
+
+void __exit zcrypt_cex2a_exit(void)
+{
+	ap_driver_unregister(&zcrypt_cex2a_queue_driver);
+	ap_driver_unregister(&zcrypt_cex2a_card_driver);
+}
+
+module_init(zcrypt_cex2a_init);
+module_exit(zcrypt_cex2a_exit);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
new file mode 100644
index 0000000..66d58bc
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ *  zcrypt 2.1.0
+ *
+ *  Copyright IBM Corp. 2001, 2006
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _ZCRYPT_CEX2A_H_
+#define _ZCRYPT_CEX2A_H_
+
+/**
+ * The type 50 message family is associated with a CEX2A card.
+ *
+ * The four members of the family are described below.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type50_hdr {
+	unsigned char	reserved1;
+	unsigned char	msg_type_code;	/* 0x50 */
+	unsigned short	msg_len;
+	unsigned char	reserved2;
+	unsigned char	ignored;
+	unsigned short	reserved3;
+} __packed;
+
+#define TYPE50_TYPE_CODE	0x50
+
+#define TYPE50_MEB1_FMT		0x0001
+#define TYPE50_MEB2_FMT		0x0002
+#define TYPE50_MEB3_FMT		0x0003
+#define TYPE50_CRB1_FMT		0x0011
+#define TYPE50_CRB2_FMT		0x0012
+#define TYPE50_CRB3_FMT		0x0013
+
+/* Mod-Exp, with a small modulus */
+struct type50_meb1_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0001 */
+	unsigned char	reserved[6];
+	unsigned char	exponent[128];
+	unsigned char	modulus[128];
+	unsigned char	message[128];
+} __packed;
+
+/* Mod-Exp, with a large modulus */
+struct type50_meb2_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0002 */
+	unsigned char	reserved[6];
+	unsigned char	exponent[256];
+	unsigned char	modulus[256];
+	unsigned char	message[256];
+} __packed;
+
+/* Mod-Exp, with a larger modulus */
+struct type50_meb3_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0003 */
+	unsigned char	reserved[6];
+	unsigned char	exponent[512];
+	unsigned char	modulus[512];
+	unsigned char	message[512];
+} __packed;
+
+/* CRT, with a small modulus */
+struct type50_crb1_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0011 */
+	unsigned char	reserved[6];
+	unsigned char	p[64];
+	unsigned char	q[64];
+	unsigned char	dp[64];
+	unsigned char	dq[64];
+	unsigned char	u[64];
+	unsigned char	message[128];
+} __packed;
+
+/* CRT, with a large modulus */
+struct type50_crb2_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0012 */
+	unsigned char	reserved[6];
+	unsigned char	p[128];
+	unsigned char	q[128];
+	unsigned char	dp[128];
+	unsigned char	dq[128];
+	unsigned char	u[128];
+	unsigned char	message[256];
+} __packed;
+
+/* CRT, with a larger modulus */
+struct type50_crb3_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0013 */
+	unsigned char	reserved[6];
+	unsigned char	p[256];
+	unsigned char	q[256];
+	unsigned char	dp[256];
+	unsigned char	dq[256];
+	unsigned char	u[256];
+	unsigned char	message[512];
+} __packed;
+
+/**
+ * The type 80 response family is associated with a CEX2A card.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+
+#define TYPE80_RSP_CODE 0x80
+
+struct type80_hdr {
+	unsigned char	reserved1;
+	unsigned char	type;		/* 0x80 */
+	unsigned short	len;
+	unsigned char	code;		/* 0x00 */
+	unsigned char	reserved2[3];
+	unsigned char	reserved3[8];
+} __packed;
+
+int zcrypt_cex2a_init(void);
+void zcrypt_cex2a_exit(void);
+
+#endif /* _ZCRYPT_CEX2A_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
new file mode 100644
index 0000000..35d58db
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright IBM Corp. 2012
+ *  Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/mod_devicetable.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
+#include "zcrypt_error.h"
+#include "zcrypt_cex4.h"
+
+#define CEX4A_MIN_MOD_SIZE	  1	/*    8 bits	*/
+#define CEX4A_MAX_MOD_SIZE_2K	256	/* 2048 bits	*/
+#define CEX4A_MAX_MOD_SIZE_4K	512	/* 4096 bits	*/
+
+#define CEX4C_MIN_MOD_SIZE	 16	/*  256 bits	*/
+#define CEX4C_MAX_MOD_SIZE	512	/* 4096 bits	*/
+
+#define CEX4A_MAX_MESSAGE_SIZE	MSGTYPE50_CRB3_MAX_MSG_SIZE
+#define CEX4C_MAX_MESSAGE_SIZE	MSGTYPE06_MAX_MSG_SIZE
+
+/* Waiting time for requests to be processed.
+ * Currently there are some types of request which are not deterministic.
+ * But the maximum time limit managed by the stomper code is set to 60sec.
+ * Hence we have to wait at least that time period.
+ */
+#define CEX4_CLEANUP_TIME	(900*HZ)
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \
+		   "Copyright IBM Corp. 2012");
+MODULE_LICENSE("GPL");
+
+static struct ap_device_id zcrypt_cex4_card_ids[] = {
+	{ .dev_type = AP_DEVICE_TYPE_CEX4,
+	  .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX5,
+	  .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX6,
+	  .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+	{ /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_cex4_card_ids);
+
+static struct ap_device_id zcrypt_cex4_queue_ids[] = {
+	{ .dev_type = AP_DEVICE_TYPE_CEX4,
+	  .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX5,
+	  .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX6,
+	  .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+	{ /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
+
+/**
+ * Probe function for CEX4 card device. It always accepts the AP device
+ * since the bus_match already checked the hardware type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
+{
+	/*
+	 * Normalized speed ratings per crypto adapter
+	 * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
+	 */
+	static const int CEX4A_SPEED_IDX[] = {
+		 14, 19, 249, 42, 228, 1458, 0, 0};
+	static const int CEX5A_SPEED_IDX[] = {
+		  8,  9,  20, 18,  66,	458, 0, 0};
+	static const int CEX6A_SPEED_IDX[] = {
+		  6,  9,  20, 17,  65,	438, 0, 0};
+
+	static const int CEX4C_SPEED_IDX[] = {
+		 59,  69, 308, 83, 278, 2204, 209, 40};
+	static const int CEX5C_SPEED_IDX[] = {
+		 24,  31,  50, 37,  90,  479,  27, 10};
+	static const int CEX6C_SPEED_IDX[] = {
+		 16,  20,  32, 27,  77,  455,  23,  9};
+
+	static const int CEX4P_SPEED_IDX[] = {
+		224, 313, 3560, 359, 605, 2827, 0, 50};
+	static const int CEX5P_SPEED_IDX[] = {
+		 63,  84,  156,  83, 142,  533, 0, 10};
+	static const int CEX6P_SPEED_IDX[] = {
+		 55,  70,  121,  73, 129,  522, 0,  9};
+
+	struct ap_card *ac = to_ap_card(&ap_dev->device);
+	struct zcrypt_card *zc;
+	int rc = 0;
+
+	zc = zcrypt_card_alloc();
+	if (!zc)
+		return -ENOMEM;
+	zc->card = ac;
+	ac->private = zc;
+	if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) {
+		if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
+			zc->type_string = "CEX4A";
+			zc->user_space_type = ZCRYPT_CEX4;
+			memcpy(zc->speed_rating, CEX4A_SPEED_IDX,
+			       sizeof(CEX4A_SPEED_IDX));
+		} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
+			zc->type_string = "CEX5A";
+			zc->user_space_type = ZCRYPT_CEX5;
+			memcpy(zc->speed_rating, CEX5A_SPEED_IDX,
+			       sizeof(CEX5A_SPEED_IDX));
+		} else {
+			zc->type_string = "CEX6A";
+			zc->user_space_type = ZCRYPT_CEX6;
+			memcpy(zc->speed_rating, CEX6A_SPEED_IDX,
+			       sizeof(CEX6A_SPEED_IDX));
+		}
+		zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
+		if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
+		    ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
+			zc->max_mod_size = CEX4A_MAX_MOD_SIZE_4K;
+			zc->max_exp_bit_length =
+				CEX4A_MAX_MOD_SIZE_4K;
+		} else {
+			zc->max_mod_size = CEX4A_MAX_MOD_SIZE_2K;
+			zc->max_exp_bit_length =
+				CEX4A_MAX_MOD_SIZE_2K;
+		}
+	} else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
+		if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
+			zc->type_string = "CEX4C";
+			/* wrong user space type, must be CEX4
+			 * just keep it for cca compatibility
+			 */
+			zc->user_space_type = ZCRYPT_CEX3C;
+			memcpy(zc->speed_rating, CEX4C_SPEED_IDX,
+			       sizeof(CEX4C_SPEED_IDX));
+		} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
+			zc->type_string = "CEX5C";
+			/* wrong user space type, must be CEX5
+			 * just keep it for cca compatibility
+			 */
+			zc->user_space_type = ZCRYPT_CEX3C;
+			memcpy(zc->speed_rating, CEX5C_SPEED_IDX,
+			       sizeof(CEX5C_SPEED_IDX));
+		} else {
+			zc->type_string = "CEX6C";
+			/* wrong user space type, must be CEX6
+			 * just keep it for cca compatibility
+			 */
+			zc->user_space_type = ZCRYPT_CEX3C;
+			memcpy(zc->speed_rating, CEX6C_SPEED_IDX,
+			       sizeof(CEX6C_SPEED_IDX));
+		}
+		zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
+		zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
+		zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
+	} else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
+		if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
+			zc->type_string = "CEX4P";
+			zc->user_space_type = ZCRYPT_CEX4;
+			memcpy(zc->speed_rating, CEX4P_SPEED_IDX,
+			       sizeof(CEX4P_SPEED_IDX));
+		} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
+			zc->type_string = "CEX5P";
+			zc->user_space_type = ZCRYPT_CEX5;
+			memcpy(zc->speed_rating, CEX5P_SPEED_IDX,
+			       sizeof(CEX5P_SPEED_IDX));
+		} else {
+			zc->type_string = "CEX6P";
+			zc->user_space_type = ZCRYPT_CEX6;
+			memcpy(zc->speed_rating, CEX6P_SPEED_IDX,
+			       sizeof(CEX6P_SPEED_IDX));
+		}
+		zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
+		zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
+		zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
+	} else {
+		zcrypt_card_free(zc);
+		return -ENODEV;
+	}
+	zc->online = 1;
+
+	rc = zcrypt_card_register(zc);
+	if (rc) {
+		ac->private = NULL;
+		zcrypt_card_free(zc);
+	}
+
+	return rc;
+}
+
+/**
+ * This is called to remove the CEX4 card driver information
+ * if an AP card device is removed.
+ */
+static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
+{
+	struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+
+	if (zc)
+		zcrypt_card_unregister(zc);
+}
+
+static struct ap_driver zcrypt_cex4_card_driver = {
+	.probe = zcrypt_cex4_card_probe,
+	.remove = zcrypt_cex4_card_remove,
+	.ids = zcrypt_cex4_card_ids,
+	.flags = AP_DRIVER_FLAG_DEFAULT,
+};
+
+/**
+ * Probe function for CEX4 queue device. It always accepts the AP device
+ * since the bus_match already checked the hardware type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
+{
+	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+	struct zcrypt_queue *zq;
+	int rc;
+
+	if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) {
+		zq = zcrypt_queue_alloc(CEX4A_MAX_MESSAGE_SIZE);
+		if (!zq)
+			return -ENOMEM;
+		zq->ops = zcrypt_msgtype(MSGTYPE50_NAME,
+					 MSGTYPE50_VARIANT_DEFAULT);
+	} else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
+		zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
+		if (!zq)
+			return -ENOMEM;
+		zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+					 MSGTYPE06_VARIANT_DEFAULT);
+	} else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
+		zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
+		if (!zq)
+			return -ENOMEM;
+		zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+					 MSGTYPE06_VARIANT_EP11);
+	} else {
+		return -ENODEV;
+	}
+	zq->queue = aq;
+	zq->online = 1;
+	atomic_set(&zq->load, 0);
+	ap_queue_init_reply(aq, &zq->reply);
+	aq->request_timeout = CEX4_CLEANUP_TIME,
+	aq->private = zq;
+	rc = zcrypt_queue_register(zq);
+	if (rc) {
+		aq->private = NULL;
+		zcrypt_queue_free(zq);
+	}
+
+	return rc;
+}
+
+/**
+ * This is called to remove the CEX4 queue driver information
+ * if an AP queue device is removed.
+ */
+static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
+{
+	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+	struct zcrypt_queue *zq = aq->private;
+
+	ap_queue_remove(aq);
+	if (zq)
+		zcrypt_queue_unregister(zq);
+}
+
+static struct ap_driver zcrypt_cex4_queue_driver = {
+	.probe = zcrypt_cex4_queue_probe,
+	.remove = zcrypt_cex4_queue_remove,
+	.suspend = ap_queue_suspend,
+	.resume = ap_queue_resume,
+	.ids = zcrypt_cex4_queue_ids,
+	.flags = AP_DRIVER_FLAG_DEFAULT,
+};
+
+int __init zcrypt_cex4_init(void)
+{
+	int rc;
+
+	rc = ap_driver_register(&zcrypt_cex4_card_driver,
+				THIS_MODULE, "cex4card");
+	if (rc)
+		return rc;
+
+	rc = ap_driver_register(&zcrypt_cex4_queue_driver,
+				THIS_MODULE, "cex4queue");
+	if (rc)
+		ap_driver_unregister(&zcrypt_cex4_card_driver);
+
+	return rc;
+}
+
+void __exit zcrypt_cex4_exit(void)
+{
+	ap_driver_unregister(&zcrypt_cex4_queue_driver);
+	ap_driver_unregister(&zcrypt_cex4_card_driver);
+}
+
+module_init(zcrypt_cex4_init);
+module_exit(zcrypt_cex4_exit);
diff --git a/drivers/s390/crypto/zcrypt_cex4.h b/drivers/s390/crypto/zcrypt_cex4.h
new file mode 100644
index 0000000..748390a
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex4.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright IBM Corp. 2012
+ *  Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#ifndef _ZCRYPT_CEX4_H_
+#define _ZCRYPT_CEX4_H_
+
+int zcrypt_cex4_init(void);
+void zcrypt_cex4_exit(void);
+
+#endif /* _ZCRYPT_CEX4_H_ */
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
new file mode 100644
index 0000000..241dbb5
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright IBM Corp. 2016
+ *  Author(s): Holger Dengler (hd@linux.vnet.ibm.com)
+ *	       Harald Freudenberger <freude@de.ibm.com>
+ */
+#ifndef ZCRYPT_DEBUG_H
+#define ZCRYPT_DEBUG_H
+
+#include <asm/debug.h>
+
+#define DBF_ERR		3	/* error conditions   */
+#define DBF_WARN	4	/* warning conditions */
+#define DBF_INFO	5	/* informational      */
+#define DBF_DEBUG	6	/* for debugging only */
+
+#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
+#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
+
+#define DBF_MAX_SPRINTF_ARGS 5
+
+#define ZCRYPT_DBF(...)					\
+	debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
+
+extern debug_info_t *zcrypt_dbf_info;
+
+int zcrypt_debug_init(void);
+void zcrypt_debug_exit(void);
+
+#endif /* ZCRYPT_DEBUG_H */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
new file mode 100644
index 0000000..6f7ebc1
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ *  zcrypt 2.1.0
+ *
+ *  Copyright IBM Corp. 2001, 2006
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _ZCRYPT_ERROR_H_
+#define _ZCRYPT_ERROR_H_
+
+#include <linux/atomic.h>
+#include "zcrypt_debug.h"
+#include "zcrypt_api.h"
+
+/**
+ * Reply Messages
+ *
+ * Error reply messages are of two types:
+ *    82:  Error (see below)
+ *    88:  Error (see below)
+ * Both type 82 and type 88 have the same structure in the header.
+ *
+ * Request reply messages are of three known types:
+ *    80:  Reply from a Type 50 Request (see CEX2A-RELATED STRUCTS)
+ *    84:  Reply from a Type 4 Request (see PCICA-RELATED STRUCTS)
+ *    86:  Reply from a Type 6 Request (see PCICC/PCIXCC/CEX2C-RELATED STRUCTS)
+ *
+ */
+struct error_hdr {
+	unsigned char reserved1;	/* 0x00			*/
+	unsigned char type;		/* 0x82 or 0x88		*/
+	unsigned char reserved2[2];	/* 0x0000		*/
+	unsigned char reply_code;	/* reply code		*/
+	unsigned char reserved3[3];	/* 0x000000		*/
+};
+
+#define TYPE82_RSP_CODE 0x82
+#define TYPE88_RSP_CODE 0x88
+
+#define REP82_ERROR_MACHINE_FAILURE	    0x10
+#define REP82_ERROR_PREEMPT_FAILURE	    0x12
+#define REP82_ERROR_CHECKPT_FAILURE	    0x14
+#define REP82_ERROR_MESSAGE_TYPE	    0x20
+#define REP82_ERROR_INVALID_COMM_CD	    0x21 /* Type 84	*/
+#define REP82_ERROR_INVALID_MSG_LEN	    0x23
+#define REP82_ERROR_RESERVD_FIELD	    0x24 /* was 0x50	*/
+#define REP82_ERROR_FORMAT_FIELD	    0x29
+#define REP82_ERROR_INVALID_COMMAND	    0x30
+#define REP82_ERROR_MALFORMED_MSG	    0x40
+#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
+#define REP82_ERROR_RESERVED_FIELDO	    0x50 /* old value	*/
+#define REP82_ERROR_WORD_ALIGNMENT	    0x60
+#define REP82_ERROR_MESSAGE_LENGTH	    0x80
+#define REP82_ERROR_OPERAND_INVALID	    0x82
+#define REP82_ERROR_OPERAND_SIZE	    0x84
+#define REP82_ERROR_EVEN_MOD_IN_OPND	    0x85
+#define REP82_ERROR_RESERVED_FIELD	    0x88
+#define REP82_ERROR_INVALID_DOMAIN_PENDING  0x8A
+#define REP82_ERROR_TRANSPORT_FAIL	    0x90
+#define REP82_ERROR_PACKET_TRUNCATED	    0xA0
+#define REP82_ERROR_ZERO_BUFFER_LEN	    0xB0
+
+#define REP88_ERROR_MODULE_FAILURE	    0x10
+
+#define REP88_ERROR_MESSAGE_TYPE	    0x20
+#define REP88_ERROR_MESSAGE_MALFORMD	    0x22
+#define REP88_ERROR_MESSAGE_LENGTH	    0x23
+#define REP88_ERROR_RESERVED_FIELD	    0x24
+#define REP88_ERROR_KEY_TYPE		    0x34
+#define REP88_ERROR_INVALID_KEY	    0x82 /* CEX2A	*/
+#define REP88_ERROR_OPERAND		    0x84 /* CEX2A	*/
+#define REP88_ERROR_OPERAND_EVEN_MOD	    0x85 /* CEX2A	*/
+
+static inline int convert_error(struct zcrypt_queue *zq,
+				struct ap_message *reply)
+{
+	struct error_hdr *ehdr = reply->message;
+	int card = AP_QID_CARD(zq->queue->qid);
+	int queue = AP_QID_QUEUE(zq->queue->qid);
+
+	switch (ehdr->reply_code) {
+	case REP82_ERROR_OPERAND_INVALID:
+	case REP82_ERROR_OPERAND_SIZE:
+	case REP82_ERROR_EVEN_MOD_IN_OPND:
+	case REP88_ERROR_MESSAGE_MALFORMD:
+	case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
+	case REP82_ERROR_INVALID_DOMAIN_PENDING:
+	//   REP88_ERROR_INVALID_KEY		// '82' CEX2A
+	//   REP88_ERROR_OPERAND		// '84' CEX2A
+	//   REP88_ERROR_OPERAND_EVEN_MOD	// '85' CEX2A
+		/* Invalid input data. */
+		ZCRYPT_DBF(DBF_WARN,
+			   "device=%02x.%04x reply=0x%02x => rc=EINVAL\n",
+			   card, queue, ehdr->reply_code);
+		return -EINVAL;
+	case REP82_ERROR_MESSAGE_TYPE:
+	//   REP88_ERROR_MESSAGE_TYPE		// '20' CEX2A
+		/*
+		 * To sent a message of the wrong type is a bug in the
+		 * device driver. Send error msg, disable the device
+		 * and then repeat the request.
+		 */
+		atomic_set(&zcrypt_rescan_req, 1);
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       card, queue);
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+			   card, queue, ehdr->reply_code);
+		return -EAGAIN;
+	case REP82_ERROR_TRANSPORT_FAIL:
+	case REP82_ERROR_MACHINE_FAILURE:
+	//   REP88_ERROR_MODULE_FAILURE		// '10' CEX2A
+		/* If a card fails disable it and repeat the request. */
+		atomic_set(&zcrypt_rescan_req, 1);
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       card, queue);
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+			   card, queue, ehdr->reply_code);
+		return -EAGAIN;
+	default:
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       card, queue);
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+			   card, queue, ehdr->reply_code);
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+}
+
+#endif /* _ZCRYPT_ERROR_H_ */
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
new file mode 100644
index 0000000..f159662
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -0,0 +1,560 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  zcrypt 2.1.0
+ *
+ *  Copyright IBM Corp. 2001, 2012
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *  MSGTYPE restruct:		  Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_msgtype50.h"
+
+/* 4096 bits */
+#define CEX3A_MAX_MOD_SIZE 512
+
+/* max outputdatalength + type80_hdr */
+#define CEX2A_MAX_RESPONSE_SIZE 0x110
+
+/* 512 bit modulus, (max outputdatalength) + type80_hdr */
+#define CEX3A_MAX_RESPONSE_SIZE 0x210
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
+		   "Copyright IBM Corp. 2001, 2012");
+MODULE_LICENSE("GPL");
+
+/**
+ * The type 50 message family is associated with a CEX2A card.
+ *
+ * The four members of the family are described below.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type50_hdr {
+	unsigned char	reserved1;
+	unsigned char	msg_type_code;	/* 0x50 */
+	unsigned short	msg_len;
+	unsigned char	reserved2;
+	unsigned char	ignored;
+	unsigned short	reserved3;
+} __packed;
+
+#define TYPE50_TYPE_CODE	0x50
+
+#define TYPE50_MEB1_FMT		0x0001
+#define TYPE50_MEB2_FMT		0x0002
+#define TYPE50_MEB3_FMT		0x0003
+#define TYPE50_CRB1_FMT		0x0011
+#define TYPE50_CRB2_FMT		0x0012
+#define TYPE50_CRB3_FMT		0x0013
+
+/* Mod-Exp, with a small modulus */
+struct type50_meb1_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0001 */
+	unsigned char	reserved[6];
+	unsigned char	exponent[128];
+	unsigned char	modulus[128];
+	unsigned char	message[128];
+} __packed;
+
+/* Mod-Exp, with a large modulus */
+struct type50_meb2_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0002 */
+	unsigned char	reserved[6];
+	unsigned char	exponent[256];
+	unsigned char	modulus[256];
+	unsigned char	message[256];
+} __packed;
+
+/* Mod-Exp, with a larger modulus */
+struct type50_meb3_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0003 */
+	unsigned char	reserved[6];
+	unsigned char	exponent[512];
+	unsigned char	modulus[512];
+	unsigned char	message[512];
+} __packed;
+
+/* CRT, with a small modulus */
+struct type50_crb1_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0011 */
+	unsigned char	reserved[6];
+	unsigned char	p[64];
+	unsigned char	q[64];
+	unsigned char	dp[64];
+	unsigned char	dq[64];
+	unsigned char	u[64];
+	unsigned char	message[128];
+} __packed;
+
+/* CRT, with a large modulus */
+struct type50_crb2_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0012 */
+	unsigned char	reserved[6];
+	unsigned char	p[128];
+	unsigned char	q[128];
+	unsigned char	dp[128];
+	unsigned char	dq[128];
+	unsigned char	u[128];
+	unsigned char	message[256];
+} __packed;
+
+/* CRT, with a larger modulus */
+struct type50_crb3_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0013 */
+	unsigned char	reserved[6];
+	unsigned char	p[256];
+	unsigned char	q[256];
+	unsigned char	dp[256];
+	unsigned char	dq[256];
+	unsigned char	u[256];
+	unsigned char	message[512];
+} __packed;
+
+/**
+ * The type 80 response family is associated with a CEX2A card.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+
+#define TYPE80_RSP_CODE 0x80
+
+struct type80_hdr {
+	unsigned char	reserved1;
+	unsigned char	type;		/* 0x80 */
+	unsigned short	len;
+	unsigned char	code;		/* 0x00 */
+	unsigned char	reserved2[3];
+	unsigned char	reserved3[8];
+} __packed;
+
+unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode)
+{
+
+	if (!mex->inputdatalength)
+		return -EINVAL;
+
+	if (mex->inputdatalength <= 128)	/* 1024 bit */
+		*fcode = MEX_1K;
+	else if (mex->inputdatalength <= 256)	/* 2048 bit */
+		*fcode = MEX_2K;
+	else					/* 4096 bit */
+		*fcode = MEX_4K;
+
+	return 0;
+}
+
+unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode)
+{
+
+	if (!crt->inputdatalength)
+		return -EINVAL;
+
+	if (crt->inputdatalength <= 128)	/* 1024 bit */
+		*fcode = CRT_1K;
+	else if (crt->inputdatalength <= 256)	/* 2048 bit */
+		*fcode = CRT_2K;
+	else					/* 4096 bit */
+		*fcode = CRT_4K;
+
+	return 0;
+}
+
+/**
+ * Convert a ICAMEX message to a type50 MEX message.
+ *
+ * @zq: crypto queue pointer
+ * @ap_msg: crypto request pointer
+ * @mex: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
+				       struct ap_message *ap_msg,
+				       struct ica_rsa_modexpo *mex)
+{
+	unsigned char *mod, *exp, *inp;
+	int mod_len;
+
+	mod_len = mex->inputdatalength;
+
+	if (mod_len <= 128) {
+		struct type50_meb1_msg *meb1 = ap_msg->message;
+
+		memset(meb1, 0, sizeof(*meb1));
+		ap_msg->length = sizeof(*meb1);
+		meb1->header.msg_type_code = TYPE50_TYPE_CODE;
+		meb1->header.msg_len = sizeof(*meb1);
+		meb1->keyblock_type = TYPE50_MEB1_FMT;
+		mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
+		exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
+		inp = meb1->message + sizeof(meb1->message) - mod_len;
+	} else if (mod_len <= 256) {
+		struct type50_meb2_msg *meb2 = ap_msg->message;
+
+		memset(meb2, 0, sizeof(*meb2));
+		ap_msg->length = sizeof(*meb2);
+		meb2->header.msg_type_code = TYPE50_TYPE_CODE;
+		meb2->header.msg_len = sizeof(*meb2);
+		meb2->keyblock_type = TYPE50_MEB2_FMT;
+		mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
+		exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
+		inp = meb2->message + sizeof(meb2->message) - mod_len;
+	} else if (mod_len <= 512) {
+		struct type50_meb3_msg *meb3 = ap_msg->message;
+
+		memset(meb3, 0, sizeof(*meb3));
+		ap_msg->length = sizeof(*meb3);
+		meb3->header.msg_type_code = TYPE50_TYPE_CODE;
+		meb3->header.msg_len = sizeof(*meb3);
+		meb3->keyblock_type = TYPE50_MEB3_FMT;
+		mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
+		exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
+		inp = meb3->message + sizeof(meb3->message) - mod_len;
+	} else
+		return -EINVAL;
+
+	if (copy_from_user(mod, mex->n_modulus, mod_len) ||
+	    copy_from_user(exp, mex->b_key, mod_len) ||
+	    copy_from_user(inp, mex->inputdata, mod_len))
+		return -EFAULT;
+	return 0;
+}
+
+/**
+ * Convert a ICACRT message to a type50 CRT message.
+ *
+ * @zq: crypto queue pointer
+ * @ap_msg: crypto request pointer
+ * @crt: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
+				       struct ap_message *ap_msg,
+				       struct ica_rsa_modexpo_crt *crt)
+{
+	int mod_len, short_len;
+	unsigned char *p, *q, *dp, *dq, *u, *inp;
+
+	mod_len = crt->inputdatalength;
+	short_len = (mod_len + 1) / 2;
+
+	/*
+	 * CEX2A and CEX3A w/o FW update can handle requests up to
+	 * 256 byte modulus (2k keys).
+	 * CEX3A with FW update and CEX4A cards are able to handle
+	 * 512 byte modulus (4k keys).
+	 */
+	if (mod_len <= 128) {		/* up to 1024 bit key size */
+		struct type50_crb1_msg *crb1 = ap_msg->message;
+
+		memset(crb1, 0, sizeof(*crb1));
+		ap_msg->length = sizeof(*crb1);
+		crb1->header.msg_type_code = TYPE50_TYPE_CODE;
+		crb1->header.msg_len = sizeof(*crb1);
+		crb1->keyblock_type = TYPE50_CRB1_FMT;
+		p = crb1->p + sizeof(crb1->p) - short_len;
+		q = crb1->q + sizeof(crb1->q) - short_len;
+		dp = crb1->dp + sizeof(crb1->dp) - short_len;
+		dq = crb1->dq + sizeof(crb1->dq) - short_len;
+		u = crb1->u + sizeof(crb1->u) - short_len;
+		inp = crb1->message + sizeof(crb1->message) - mod_len;
+	} else if (mod_len <= 256) {	/* up to 2048 bit key size */
+		struct type50_crb2_msg *crb2 = ap_msg->message;
+
+		memset(crb2, 0, sizeof(*crb2));
+		ap_msg->length = sizeof(*crb2);
+		crb2->header.msg_type_code = TYPE50_TYPE_CODE;
+		crb2->header.msg_len = sizeof(*crb2);
+		crb2->keyblock_type = TYPE50_CRB2_FMT;
+		p = crb2->p + sizeof(crb2->p) - short_len;
+		q = crb2->q + sizeof(crb2->q) - short_len;
+		dp = crb2->dp + sizeof(crb2->dp) - short_len;
+		dq = crb2->dq + sizeof(crb2->dq) - short_len;
+		u = crb2->u + sizeof(crb2->u) - short_len;
+		inp = crb2->message + sizeof(crb2->message) - mod_len;
+	} else if ((mod_len <= 512) &&	/* up to 4096 bit key size */
+		   (zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) {
+		struct type50_crb3_msg *crb3 = ap_msg->message;
+
+		memset(crb3, 0, sizeof(*crb3));
+		ap_msg->length = sizeof(*crb3);
+		crb3->header.msg_type_code = TYPE50_TYPE_CODE;
+		crb3->header.msg_len = sizeof(*crb3);
+		crb3->keyblock_type = TYPE50_CRB3_FMT;
+		p = crb3->p + sizeof(crb3->p) - short_len;
+		q = crb3->q + sizeof(crb3->q) - short_len;
+		dp = crb3->dp + sizeof(crb3->dp) - short_len;
+		dq = crb3->dq + sizeof(crb3->dq) - short_len;
+		u = crb3->u + sizeof(crb3->u) - short_len;
+		inp = crb3->message + sizeof(crb3->message) - mod_len;
+	} else
+		return -EINVAL;
+
+	/*
+	 * correct the offset of p, bp and mult_inv according zcrypt.h
+	 * block size right aligned (skip the first byte)
+	 */
+	if (copy_from_user(p, crt->np_prime + MSGTYPE_ADJUSTMENT, short_len) ||
+	    copy_from_user(q, crt->nq_prime, short_len) ||
+	    copy_from_user(dp, crt->bp_key + MSGTYPE_ADJUSTMENT, short_len) ||
+	    copy_from_user(dq, crt->bq_key, short_len) ||
+	    copy_from_user(u, crt->u_mult_inv + MSGTYPE_ADJUSTMENT, short_len) ||
+	    copy_from_user(inp, crt->inputdata, mod_len))
+		return -EFAULT;
+
+	return 0;
+}
+
+/**
+ * Copy results from a type 80 reply message back to user space.
+ *
+ * @zq: crypto device pointer
+ * @reply: reply AP message.
+ * @data: pointer to user output data
+ * @length: size of user output data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int convert_type80(struct zcrypt_queue *zq,
+			  struct ap_message *reply,
+			  char __user *outputdata,
+			  unsigned int outputdatalength)
+{
+	struct type80_hdr *t80h = reply->message;
+	unsigned char *data;
+
+	if (t80h->len < sizeof(*t80h) + outputdatalength) {
+		/* The result is too short, the CEX2A card may not do that.. */
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       AP_QID_CARD(zq->queue->qid),
+		       AP_QID_QUEUE(zq->queue->qid));
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
+			   AP_QID_CARD(zq->queue->qid),
+			   AP_QID_QUEUE(zq->queue->qid),
+			   t80h->code);
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+	if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
+		BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
+	else
+		BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
+	data = reply->message + t80h->len - outputdatalength;
+	if (copy_to_user(outputdata, data, outputdatalength))
+		return -EFAULT;
+	return 0;
+}
+
+static int convert_response(struct zcrypt_queue *zq,
+			    struct ap_message *reply,
+			    char __user *outputdata,
+			    unsigned int outputdatalength)
+{
+	/* Response type byte is the second byte in the response. */
+	unsigned char rtype = ((unsigned char *) reply->message)[1];
+
+	switch (rtype) {
+	case TYPE82_RSP_CODE:
+	case TYPE88_RSP_CODE:
+		return convert_error(zq, reply);
+	case TYPE80_RSP_CODE:
+		return convert_type80(zq, reply,
+				      outputdata, outputdatalength);
+	default: /* Unknown response type, this should NEVER EVER happen */
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       AP_QID_CARD(zq->queue->qid),
+		       AP_QID_QUEUE(zq->queue->qid));
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+			   AP_QID_CARD(zq->queue->qid),
+			   AP_QID_QUEUE(zq->queue->qid),
+			   (unsigned int) rtype);
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+}
+
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @aq: pointer to the AP device
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_cex2a_receive(struct ap_queue *aq,
+				 struct ap_message *msg,
+				 struct ap_message *reply)
+{
+	static struct error_hdr error_reply = {
+		.type = TYPE82_RSP_CODE,
+		.reply_code = REP82_ERROR_MACHINE_FAILURE,
+	};
+	struct type80_hdr *t80h;
+	int length;
+
+	/* Copy the reply message to the request message buffer. */
+	if (!reply)
+		goto out;	/* ap_msg->rc indicates the error */
+	t80h = reply->message;
+	if (t80h->type == TYPE80_RSP_CODE) {
+		if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A)
+			length = min_t(int,
+				       CEX2A_MAX_RESPONSE_SIZE, t80h->len);
+		else
+			length = min_t(int,
+				       CEX3A_MAX_RESPONSE_SIZE, t80h->len);
+		memcpy(msg->message, reply->message, length);
+	} else
+		memcpy(msg->message, reply->message, sizeof(error_reply));
+out:
+	complete((struct completion *) msg->private);
+}
+
+static atomic_t zcrypt_step = ATOMIC_INIT(0);
+
+/**
+ * The request distributor calls this function if it picked the CEX2A
+ * device to handle a modexpo request.
+ * @zq: pointer to zcrypt_queue structure that identifies the
+ *	  CEX2A device to the request distributor
+ * @mex: pointer to the modexpo request buffer
+ */
+static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
+				 struct ica_rsa_modexpo *mex)
+{
+	struct ap_message ap_msg;
+	struct completion work;
+	int rc;
+
+	ap_init_message(&ap_msg);
+	if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
+		ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
+					 GFP_KERNEL);
+	else
+		ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
+					 GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+	ap_msg.receive = zcrypt_cex2a_receive;
+	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg.private = &work;
+	rc = ICAMEX_msg_to_type50MEX_msg(zq, &ap_msg, mex);
+	if (rc)
+		goto out_free;
+	init_completion(&work);
+	ap_queue_message(zq->queue, &ap_msg);
+	rc = wait_for_completion_interruptible(&work);
+	if (rc == 0) {
+		rc = ap_msg.rc;
+		if (rc == 0)
+			rc = convert_response(zq, &ap_msg, mex->outputdata,
+					      mex->outputdatalength);
+	} else
+		/* Signal pending. */
+		ap_cancel_message(zq->queue, &ap_msg);
+out_free:
+	kfree(ap_msg.message);
+	return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the CEX2A
+ * device to handle a modexpo_crt request.
+ * @zq: pointer to zcrypt_queue structure that identifies the
+ *	  CEX2A device to the request distributor
+ * @crt: pointer to the modexpoc_crt request buffer
+ */
+static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
+				     struct ica_rsa_modexpo_crt *crt)
+{
+	struct ap_message ap_msg;
+	struct completion work;
+	int rc;
+
+	ap_init_message(&ap_msg);
+	if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
+		ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
+					 GFP_KERNEL);
+	else
+		ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
+					 GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+	ap_msg.receive = zcrypt_cex2a_receive;
+	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg.private = &work;
+	rc = ICACRT_msg_to_type50CRT_msg(zq, &ap_msg, crt);
+	if (rc)
+		goto out_free;
+	init_completion(&work);
+	ap_queue_message(zq->queue, &ap_msg);
+	rc = wait_for_completion_interruptible(&work);
+	if (rc == 0) {
+		rc = ap_msg.rc;
+		if (rc == 0)
+			rc = convert_response(zq, &ap_msg, crt->outputdata,
+					      crt->outputdatalength);
+	} else
+		/* Signal pending. */
+		ap_cancel_message(zq->queue, &ap_msg);
+out_free:
+	kfree(ap_msg.message);
+	return rc;
+}
+
+/**
+ * The crypto operations for message type 50.
+ */
+static struct zcrypt_ops zcrypt_msgtype50_ops = {
+	.rsa_modexpo = zcrypt_cex2a_modexpo,
+	.rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
+	.owner = THIS_MODULE,
+	.name = MSGTYPE50_NAME,
+	.variant = MSGTYPE50_VARIANT_DEFAULT,
+};
+
+void __init zcrypt_msgtype50_init(void)
+{
+	zcrypt_msgtype_register(&zcrypt_msgtype50_ops);
+}
+
+void __exit zcrypt_msgtype50_exit(void)
+{
+	zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops);
+}
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
new file mode 100644
index 0000000..8530f65
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_msgtype50.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ *  zcrypt 2.1.0
+ *
+ *  Copyright IBM Corp. 2001, 2012
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *  MSGTYPE restruct:		  Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#ifndef _ZCRYPT_MSGTYPE50_H_
+#define _ZCRYPT_MSGTYPE50_H_
+
+#define MSGTYPE50_NAME			"zcrypt_msgtype50"
+#define MSGTYPE50_VARIANT_DEFAULT	0
+
+#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
+#define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /* sizeof(struct type50_crb3_msg) */
+
+#define MSGTYPE_ADJUSTMENT 0x08  /* type04 extension (not needed in type50) */
+
+unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *, int *);
+unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *, int *);
+
+void zcrypt_msgtype50_init(void);
+void zcrypt_msgtype50_exit(void);
+
+#endif /* _ZCRYPT_MSGTYPE50_H_ */
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
new file mode 100644
index 0000000..2101776
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -0,0 +1,1377 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  zcrypt 2.1.0
+ *
+ *  Copyright IBM Corp. 2001, 2012
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *  MSGTYPE restruct:		  Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_cca_key.h"
+
+#define PCIXCC_MIN_MOD_SIZE_OLD	 64	/*  512 bits	*/
+#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply	    */
+
+#define CEIL4(x) ((((x)+3)/4)*4)
+
+struct response_type {
+	struct completion work;
+	int type;
+};
+#define PCIXCC_RESPONSE_TYPE_ICA  0
+#define PCIXCC_RESPONSE_TYPE_XCRB 1
+#define PCIXCC_RESPONSE_TYPE_EP11 2
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
+		   "Copyright IBM Corp. 2001, 2012");
+MODULE_LICENSE("GPL");
+
+/**
+ * CPRB
+ *	  Note that all shorts, ints and longs are little-endian.
+ *	  All pointer fields are 32-bits long, and mean nothing
+ *
+ *	  A request CPRB is followed by a request_parameter_block.
+ *
+ *	  The request (or reply) parameter block is organized thus:
+ *	    function code
+ *	    VUD block
+ *	    key block
+ */
+struct CPRB {
+	unsigned short cprb_len;	/* CPRB length			 */
+	unsigned char cprb_ver_id;	/* CPRB version id.		 */
+	unsigned char pad_000;		/* Alignment pad byte.		 */
+	unsigned char srpi_rtcode[4];	/* SRPI return code LELONG	 */
+	unsigned char srpi_verb;	/* SRPI verb type		 */
+	unsigned char flags;		/* flags			 */
+	unsigned char func_id[2];	/* function id			 */
+	unsigned char checkpoint_flag;	/*				 */
+	unsigned char resv2;		/* reserved			 */
+	unsigned short req_parml;	/* request parameter buffer	 */
+					/* length 16-bit little endian	 */
+	unsigned char req_parmp[4];	/* request parameter buffer	 *
+					 * pointer (means nothing: the	 *
+					 * parameter buffer follows	 *
+					 * the CPRB).			 */
+	unsigned char req_datal[4];	/* request data buffer		 */
+					/* length	  ULELONG	 */
+	unsigned char req_datap[4];	/* request data buffer		 */
+					/* pointer			 */
+	unsigned short rpl_parml;	/* reply  parameter buffer	 */
+					/* length 16-bit little endian	 */
+	unsigned char pad_001[2];	/* Alignment pad bytes. ULESHORT */
+	unsigned char rpl_parmp[4];	/* reply parameter buffer	 *
+					 * pointer (means nothing: the	 *
+					 * parameter buffer follows	 *
+					 * the CPRB).			 */
+	unsigned char rpl_datal[4];	/* reply data buffer len ULELONG */
+	unsigned char rpl_datap[4];	/* reply data buffer		 */
+					/* pointer			 */
+	unsigned short ccp_rscode;	/* server reason code	ULESHORT */
+	unsigned short ccp_rtcode;	/* server return code	ULESHORT */
+	unsigned char repd_parml[2];	/* replied parameter len ULESHORT*/
+	unsigned char mac_data_len[2];	/* Mac Data Length	ULESHORT */
+	unsigned char repd_datal[4];	/* replied data length	ULELONG	 */
+	unsigned char req_pc[2];	/* PC identifier		 */
+	unsigned char res_origin[8];	/* resource origin		 */
+	unsigned char mac_value[8];	/* Mac Value			 */
+	unsigned char logon_id[8];	/* Logon Identifier		 */
+	unsigned char usage_domain[2];	/* cdx				 */
+	unsigned char resv3[18];	/* reserved for requestor	 */
+	unsigned short svr_namel;	/* server name length  ULESHORT	 */
+	unsigned char svr_name[8];	/* server name			 */
+} __packed;
+
+struct function_and_rules_block {
+	unsigned char function_code[2];
+	unsigned short ulen;
+	unsigned char only_rule[8];
+} __packed;
+
+/**
+ * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C
+ * card in a type6 message. The 3 fields that must be filled in at execution
+ * time are  req_parml, rpl_parml and usage_domain.
+ * Everything about this interface is ascii/big-endian, since the
+ * device does *not* have 'Intel inside'.
+ *
+ * The CPRBX is followed immediately by the parm block.
+ * The parm block contains:
+ * - function code ('PD' 0x5044 or 'PK' 0x504B)
+ * - rule block (one of:)
+ *   + 0x000A 'PKCS-1.2' (MCL2 'PD')
+ *   + 0x000A 'ZERO-PAD' (MCL2 'PK')
+ *   + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD')
+ *   + 0x000A 'MRP     ' (MCL3 'PK' or CEX2C 'PK')
+ * - VUD block
+ */
+static const struct CPRBX static_cprbx = {
+	.cprb_len	=  0x00DC,
+	.cprb_ver_id	=  0x02,
+	.func_id	= {0x54, 0x32},
+};
+
+int speed_idx_cca(int req_type)
+{
+	switch (req_type) {
+	case 0x4142:
+	case 0x4149:
+	case 0x414D:
+	case 0x4341:
+	case 0x4344:
+	case 0x4354:
+	case 0x4358:
+	case 0x444B:
+	case 0x4558:
+	case 0x4643:
+	case 0x4651:
+	case 0x4C47:
+	case 0x4C4B:
+	case 0x4C51:
+	case 0x4F48:
+	case 0x504F:
+	case 0x5053:
+	case 0x5058:
+	case 0x5343:
+	case 0x5344:
+	case 0x5345:
+	case 0x5350:
+		return LOW;
+	case 0x414B:
+	case 0x4345:
+	case 0x4349:
+	case 0x434D:
+	case 0x4847:
+	case 0x4849:
+	case 0x484D:
+	case 0x4850:
+	case 0x4851:
+	case 0x4954:
+	case 0x4958:
+	case 0x4B43:
+	case 0x4B44:
+	case 0x4B45:
+	case 0x4B47:
+	case 0x4B48:
+	case 0x4B49:
+	case 0x4B4E:
+	case 0x4B50:
+	case 0x4B52:
+	case 0x4B54:
+	case 0x4B58:
+	case 0x4D50:
+	case 0x4D53:
+	case 0x4D56:
+	case 0x4D58:
+	case 0x5044:
+	case 0x5045:
+	case 0x5046:
+	case 0x5047:
+	case 0x5049:
+	case 0x504B:
+	case 0x504D:
+	case 0x5254:
+	case 0x5347:
+	case 0x5349:
+	case 0x534B:
+	case 0x534D:
+	case 0x5356:
+	case 0x5358:
+	case 0x5443:
+	case 0x544B:
+	case 0x5647:
+		return HIGH;
+	default:
+		return MEDIUM;
+	}
+}
+
+int speed_idx_ep11(int req_type)
+{
+	switch (req_type) {
+	case  1:
+	case  2:
+	case 36:
+	case 37:
+	case 38:
+	case 39:
+	case 40:
+		return LOW;
+	case 17:
+	case 18:
+	case 19:
+	case 20:
+	case 21:
+	case 22:
+	case 26:
+	case 30:
+	case 31:
+	case 32:
+	case 33:
+	case 34:
+	case 35:
+		return HIGH;
+	default:
+		return MEDIUM;
+	}
+}
+
+
+/**
+ * Convert a ICAMEX message to a type6 MEX message.
+ *
+ * @zq: crypto device pointer
+ * @ap_msg: pointer to AP message
+ * @mex: pointer to user input data
+ *
+ * Returns 0 on success or negative errno value.
+ */
+static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
+				       struct ap_message *ap_msg,
+				       struct ica_rsa_modexpo *mex)
+{
+	static struct type6_hdr static_type6_hdrX = {
+		.type		=  0x06,
+		.offset1	=  0x00000058,
+		.agent_id	= {'C', 'A',},
+		.function_code	= {'P', 'K'},
+	};
+	static struct function_and_rules_block static_pke_fnr = {
+		.function_code	= {'P', 'K'},
+		.ulen		= 10,
+		.only_rule	= {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '}
+	};
+	struct {
+		struct type6_hdr hdr;
+		struct CPRBX cprbx;
+		struct function_and_rules_block fr;
+		unsigned short length;
+		char text[0];
+	} __packed * msg = ap_msg->message;
+	int size;
+
+	/*
+	 * The inputdatalength was a selection criteria in the dispatching
+	 * function zcrypt_rsa_modexpo(). However, make sure the following
+	 * copy_from_user() never exceeds the allocated buffer space.
+	 */
+	if (WARN_ON_ONCE(mex->inputdatalength > PAGE_SIZE))
+		return -EINVAL;
+
+	/* VUD.ciphertext */
+	msg->length = mex->inputdatalength + 2;
+	if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
+		return -EFAULT;
+
+	/* Set up key which is located after the variable length text. */
+	size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength);
+	if (size < 0)
+		return size;
+	size += sizeof(*msg) + mex->inputdatalength;
+
+	/* message header, cprbx and f&r */
+	msg->hdr = static_type6_hdrX;
+	msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
+	msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
+
+	msg->cprbx = static_cprbx;
+	msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
+	msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
+
+	msg->fr = static_pke_fnr;
+
+	msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
+
+	ap_msg->length = size;
+	return 0;
+}
+
+/**
+ * Convert a ICACRT message to a type6 CRT message.
+ *
+ * @zq: crypto device pointer
+ * @ap_msg: pointer to AP message
+ * @crt: pointer to user input data
+ *
+ * Returns 0 on success or negative errno value.
+ */
+static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
+				       struct ap_message *ap_msg,
+				       struct ica_rsa_modexpo_crt *crt)
+{
+	static struct type6_hdr static_type6_hdrX = {
+		.type		=  0x06,
+		.offset1	=  0x00000058,
+		.agent_id	= {'C', 'A',},
+		.function_code	= {'P', 'D'},
+	};
+	static struct function_and_rules_block static_pkd_fnr = {
+		.function_code	= {'P', 'D'},
+		.ulen		= 10,
+		.only_rule	= {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
+	};
+
+	struct {
+		struct type6_hdr hdr;
+		struct CPRBX cprbx;
+		struct function_and_rules_block fr;
+		unsigned short length;
+		char text[0];
+	} __packed * msg = ap_msg->message;
+	int size;
+
+	/*
+	 * The inputdatalength was a selection criteria in the dispatching
+	 * function zcrypt_rsa_crt(). However, make sure the following
+	 * copy_from_user() never exceeds the allocated buffer space.
+	 */
+	if (WARN_ON_ONCE(crt->inputdatalength > PAGE_SIZE))
+		return -EINVAL;
+
+	/* VUD.ciphertext */
+	msg->length = crt->inputdatalength + 2;
+	if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
+		return -EFAULT;
+
+	/* Set up key which is located after the variable length text. */
+	size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength);
+	if (size < 0)
+		return size;
+	size += sizeof(*msg) + crt->inputdatalength;	/* total size of msg */
+
+	/* message header, cprbx and f&r */
+	msg->hdr = static_type6_hdrX;
+	msg->hdr.ToCardLen1 = size -  sizeof(msg->hdr);
+	msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
+
+	msg->cprbx = static_cprbx;
+	msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
+	msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
+		size - sizeof(msg->hdr) - sizeof(msg->cprbx);
+
+	msg->fr = static_pkd_fnr;
+
+	ap_msg->length = size;
+	return 0;
+}
+
+/**
+ * Convert a XCRB message to a type6 CPRB message.
+ *
+ * @zq: crypto device pointer
+ * @ap_msg: pointer to AP message
+ * @xcRB: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT, -EINVAL.
+ */
+struct type86_fmt2_msg {
+	struct type86_hdr hdr;
+	struct type86_fmt2_ext fmt2;
+} __packed;
+
+static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
+				      struct ica_xcRB *xcRB,
+				      unsigned int *fcode,
+				      unsigned short **dom)
+{
+	static struct type6_hdr static_type6_hdrX = {
+		.type		=  0x06,
+		.offset1	=  0x00000058,
+	};
+	struct {
+		struct type6_hdr hdr;
+		struct CPRBX cprbx;
+	} __packed * msg = ap_msg->message;
+
+	int rcblen = CEIL4(xcRB->request_control_blk_length);
+	int replylen, req_sumlen, resp_sumlen;
+	char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
+	char *function_code;
+
+	if (CEIL4(xcRB->request_control_blk_length) <
+			xcRB->request_control_blk_length)
+		return -EINVAL; /* overflow after alignment*/
+
+	/* length checks */
+	ap_msg->length = sizeof(struct type6_hdr) +
+		CEIL4(xcRB->request_control_blk_length) +
+		xcRB->request_data_length;
+	if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE)
+		return -EINVAL;
+
+	/*
+	 * Overflow check
+	 * sum must be greater (or equal) than the largest operand
+	 */
+	req_sumlen = CEIL4(xcRB->request_control_blk_length) +
+			xcRB->request_data_length;
+	if ((CEIL4(xcRB->request_control_blk_length) <=
+						xcRB->request_data_length) ?
+		(req_sumlen < xcRB->request_data_length) :
+		(req_sumlen < CEIL4(xcRB->request_control_blk_length))) {
+		return -EINVAL;
+	}
+
+	if (CEIL4(xcRB->reply_control_blk_length) <
+			xcRB->reply_control_blk_length)
+		return -EINVAL; /* overflow after alignment*/
+
+	replylen = sizeof(struct type86_fmt2_msg) +
+		CEIL4(xcRB->reply_control_blk_length) +
+		xcRB->reply_data_length;
+	if (replylen > MSGTYPE06_MAX_MSG_SIZE)
+		return -EINVAL;
+
+	/*
+	 * Overflow check
+	 * sum must be greater (or equal) than the largest operand
+	 */
+	resp_sumlen = CEIL4(xcRB->reply_control_blk_length) +
+			xcRB->reply_data_length;
+	if ((CEIL4(xcRB->reply_control_blk_length) <= xcRB->reply_data_length) ?
+		(resp_sumlen < xcRB->reply_data_length) :
+		(resp_sumlen < CEIL4(xcRB->reply_control_blk_length))) {
+		return -EINVAL;
+	}
+
+	/* prepare type6 header */
+	msg->hdr = static_type6_hdrX;
+	memcpy(msg->hdr.agent_id, &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
+	msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
+	if (xcRB->request_data_length) {
+		msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
+		msg->hdr.ToCardLen2 = xcRB->request_data_length;
+	}
+	msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length;
+	msg->hdr.FromCardLen2 = xcRB->reply_data_length;
+
+	/* prepare CPRB */
+	if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
+		    xcRB->request_control_blk_length))
+		return -EFAULT;
+	if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
+	    xcRB->request_control_blk_length)
+		return -EINVAL;
+	function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
+	memcpy(msg->hdr.function_code, function_code,
+	       sizeof(msg->hdr.function_code));
+
+	*fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1];
+	*dom = (unsigned short *)&msg->cprbx.domain;
+
+	if (memcmp(function_code, "US", 2) == 0
+	    || memcmp(function_code, "AU", 2) == 0)
+		ap_msg->special = 1;
+	else
+		ap_msg->special = 0;
+
+	/* copy data block */
+	if (xcRB->request_data_length &&
+	    copy_from_user(req_data, xcRB->request_data_address,
+		xcRB->request_data_length))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
+				       struct ep11_urb *xcRB,
+				       unsigned int *fcode)
+{
+	unsigned int lfmt;
+	static struct type6_hdr static_type6_ep11_hdr = {
+		.type		=  0x06,
+		.rqid		= {0x00, 0x01},
+		.function_code	= {0x00, 0x00},
+		.agent_id[0]	=  0x58,	/* {'X'} */
+		.agent_id[1]	=  0x43,	/* {'C'} */
+		.offset1	=  0x00000058,
+	};
+
+	struct {
+		struct type6_hdr hdr;
+		struct ep11_cprb cprbx;
+		unsigned char	pld_tag;	/* fixed value 0x30 */
+		unsigned char	pld_lenfmt;	/* payload length format */
+	} __packed * msg = ap_msg->message;
+
+	struct pld_hdr {
+		unsigned char	func_tag;	/* fixed value 0x4 */
+		unsigned char	func_len;	/* fixed value 0x4 */
+		unsigned int	func_val;	/* function ID	   */
+		unsigned char	dom_tag;	/* fixed value 0x4 */
+		unsigned char	dom_len;	/* fixed value 0x4 */
+		unsigned int	dom_val;	/* domain id	   */
+	} __packed * payload_hdr = NULL;
+
+	if (CEIL4(xcRB->req_len) < xcRB->req_len)
+		return -EINVAL; /* overflow after alignment*/
+
+	/* length checks */
+	ap_msg->length = sizeof(struct type6_hdr) + xcRB->req_len;
+	if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE -
+				   (sizeof(struct type6_hdr)))
+		return -EINVAL;
+
+	if (CEIL4(xcRB->resp_len) < xcRB->resp_len)
+		return -EINVAL; /* overflow after alignment*/
+
+	if (CEIL4(xcRB->resp_len) > MSGTYPE06_MAX_MSG_SIZE -
+				    (sizeof(struct type86_fmt2_msg)))
+		return -EINVAL;
+
+	/* prepare type6 header */
+	msg->hdr = static_type6_ep11_hdr;
+	msg->hdr.ToCardLen1   = xcRB->req_len;
+	msg->hdr.FromCardLen1 = xcRB->resp_len;
+
+	/* Import CPRB data from the ioctl input parameter */
+	if (copy_from_user(&(msg->cprbx.cprb_len),
+			   (char __force __user *)xcRB->req, xcRB->req_len)) {
+		return -EFAULT;
+	}
+
+	if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
+		switch (msg->pld_lenfmt & 0x03) {
+		case 1:
+			lfmt = 2;
+			break;
+		case 2:
+			lfmt = 3;
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else {
+		lfmt = 1; /* length format #1 */
+	}
+	payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
+	*fcode = payload_hdr->func_val & 0xFFFF;
+
+	return 0;
+}
+
+/**
+ * Copy results from a type 86 ICA reply message back to user space.
+ *
+ * @zq: crypto device pointer
+ * @reply: reply AP message.
+ * @data: pointer to user output data
+ * @length: size of user output data
+ *
+ * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
+ */
+struct type86x_reply {
+	struct type86_hdr hdr;
+	struct type86_fmt2_ext fmt2;
+	struct CPRBX cprbx;
+	unsigned char pad[4];	/* 4 byte function code/rules block ? */
+	unsigned short length;
+	char text[0];
+} __packed;
+
+struct type86_ep11_reply {
+	struct type86_hdr hdr;
+	struct type86_fmt2_ext fmt2;
+	struct ep11_cprb cprbx;
+} __packed;
+
+static int convert_type86_ica(struct zcrypt_queue *zq,
+			  struct ap_message *reply,
+			  char __user *outputdata,
+			  unsigned int outputdatalength)
+{
+	static unsigned char static_pad[] = {
+		0x00, 0x02,
+		0x1B, 0x7B, 0x5D, 0xB5, 0x75, 0x01, 0x3D, 0xFD,
+		0x8D, 0xD1, 0xC7, 0x03, 0x2D, 0x09, 0x23, 0x57,
+		0x89, 0x49, 0xB9, 0x3F, 0xBB, 0x99, 0x41, 0x5B,
+		0x75, 0x21, 0x7B, 0x9D, 0x3B, 0x6B, 0x51, 0x39,
+		0xBB, 0x0D, 0x35, 0xB9, 0x89, 0x0F, 0x93, 0xA5,
+		0x0B, 0x47, 0xF1, 0xD3, 0xBB, 0xCB, 0xF1, 0x9D,
+		0x23, 0x73, 0x71, 0xFF, 0xF3, 0xF5, 0x45, 0xFB,
+		0x61, 0x29, 0x23, 0xFD, 0xF1, 0x29, 0x3F, 0x7F,
+		0x17, 0xB7, 0x1B, 0xA9, 0x19, 0xBD, 0x57, 0xA9,
+		0xD7, 0x95, 0xA3, 0xCB, 0xED, 0x1D, 0xDB, 0x45,
+		0x7D, 0x11, 0xD1, 0x51, 0x1B, 0xED, 0x71, 0xE9,
+		0xB1, 0xD1, 0xAB, 0xAB, 0x21, 0x2B, 0x1B, 0x9F,
+		0x3B, 0x9F, 0xF7, 0xF7, 0xBD, 0x63, 0xEB, 0xAD,
+		0xDF, 0xB3, 0x6F, 0x5B, 0xDB, 0x8D, 0xA9, 0x5D,
+		0xE3, 0x7D, 0x77, 0x49, 0x47, 0xF5, 0xA7, 0xFD,
+		0xAB, 0x2F, 0x27, 0x35, 0x77, 0xD3, 0x49, 0xC9,
+		0x09, 0xEB, 0xB1, 0xF9, 0xBF, 0x4B, 0xCB, 0x2B,
+		0xEB, 0xEB, 0x05, 0xFF, 0x7D, 0xC7, 0x91, 0x8B,
+		0x09, 0x83, 0xB9, 0xB9, 0x69, 0x33, 0x39, 0x6B,
+		0x79, 0x75, 0x19, 0xBF, 0xBB, 0x07, 0x1D, 0xBD,
+		0x29, 0xBF, 0x39, 0x95, 0x93, 0x1D, 0x35, 0xC7,
+		0xC9, 0x4D, 0xE5, 0x97, 0x0B, 0x43, 0x9B, 0xF1,
+		0x16, 0x93, 0x03, 0x1F, 0xA5, 0xFB, 0xDB, 0xF3,
+		0x27, 0x4F, 0x27, 0x61, 0x05, 0x1F, 0xB9, 0x23,
+		0x2F, 0xC3, 0x81, 0xA9, 0x23, 0x71, 0x55, 0x55,
+		0xEB, 0xED, 0x41, 0xE5, 0xF3, 0x11, 0xF1, 0x43,
+		0x69, 0x03, 0xBD, 0x0B, 0x37, 0x0F, 0x51, 0x8F,
+		0x0B, 0xB5, 0x89, 0x5B, 0x67, 0xA9, 0xD9, 0x4F,
+		0x01, 0xF9, 0x21, 0x77, 0x37, 0x73, 0x79, 0xC5,
+		0x7F, 0x51, 0xC1, 0xCF, 0x97, 0xA1, 0x75, 0xAD,
+		0x35, 0x9D, 0xD3, 0xD3, 0xA7, 0x9D, 0x5D, 0x41,
+		0x6F, 0x65, 0x1B, 0xCF, 0xA9, 0x87, 0x91, 0x09
+	};
+	struct type86x_reply *msg = reply->message;
+	unsigned short service_rc, service_rs;
+	unsigned int reply_len, pad_len;
+	char *data;
+
+	service_rc = msg->cprbx.ccp_rtcode;
+	if (unlikely(service_rc != 0)) {
+		service_rs = msg->cprbx.ccp_rscode;
+		if ((service_rc == 8 && service_rs == 66) ||
+		    (service_rc == 8 && service_rs == 65) ||
+		    (service_rc == 8 && service_rs == 72) ||
+		    (service_rc == 8 && service_rs == 770) ||
+		    (service_rc == 12 && service_rs == 769)) {
+			ZCRYPT_DBF(DBF_DEBUG,
+				   "device=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
+				   AP_QID_CARD(zq->queue->qid),
+				   AP_QID_QUEUE(zq->queue->qid),
+				   (int) service_rc, (int) service_rs);
+			return -EINVAL;
+		}
+		if (service_rc == 8 && service_rs == 783) {
+			zq->zcard->min_mod_size =
+				PCIXCC_MIN_MOD_SIZE_OLD;
+			ZCRYPT_DBF(DBF_DEBUG,
+				   "device=%02x.%04x rc/rs=%d/%d => rc=EAGAIN\n",
+				   AP_QID_CARD(zq->queue->qid),
+				   AP_QID_QUEUE(zq->queue->qid),
+				   (int) service_rc, (int) service_rs);
+			return -EAGAIN;
+		}
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       AP_QID_CARD(zq->queue->qid),
+		       AP_QID_QUEUE(zq->queue->qid));
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
+			   AP_QID_CARD(zq->queue->qid),
+			   AP_QID_QUEUE(zq->queue->qid),
+			   (int) service_rc, (int) service_rs);
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+	data = msg->text;
+	reply_len = msg->length - 2;
+	if (reply_len > outputdatalength)
+		return -EINVAL;
+	/*
+	 * For all encipher requests, the length of the ciphertext (reply_len)
+	 * will always equal the modulus length. For MEX decipher requests
+	 * the output needs to get padded. Minimum pad size is 10.
+	 *
+	 * Currently, the cases where padding will be added is for:
+	 * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
+	 *   ZERO-PAD and CRT is only supported for PKD requests)
+	 * - PCICC, always
+	 */
+	pad_len = outputdatalength - reply_len;
+	if (pad_len > 0) {
+		if (pad_len < 10)
+			return -EINVAL;
+		/* 'restore' padding left in the PCICC/PCIXCC card. */
+		if (copy_to_user(outputdata, static_pad, pad_len - 1))
+			return -EFAULT;
+		if (put_user(0, outputdata + pad_len - 1))
+			return -EFAULT;
+	}
+	/* Copy the crypto response to user space. */
+	if (copy_to_user(outputdata + pad_len, data, reply_len))
+		return -EFAULT;
+	return 0;
+}
+
+/**
+ * Copy results from a type 86 XCRB reply message back to user space.
+ *
+ * @zq: crypto device pointer
+ * @reply: reply AP message.
+ * @xcRB: pointer to XCRB
+ *
+ * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
+ */
+static int convert_type86_xcrb(struct zcrypt_queue *zq,
+			       struct ap_message *reply,
+			       struct ica_xcRB *xcRB)
+{
+	struct type86_fmt2_msg *msg = reply->message;
+	char *data = reply->message;
+
+	/* Copy CPRB to user */
+	if (copy_to_user(xcRB->reply_control_blk_addr,
+		data + msg->fmt2.offset1, msg->fmt2.count1))
+		return -EFAULT;
+	xcRB->reply_control_blk_length = msg->fmt2.count1;
+
+	/* Copy data buffer to user */
+	if (msg->fmt2.count2)
+		if (copy_to_user(xcRB->reply_data_addr,
+			data + msg->fmt2.offset2, msg->fmt2.count2))
+			return -EFAULT;
+	xcRB->reply_data_length = msg->fmt2.count2;
+	return 0;
+}
+
+/**
+ * Copy results from a type 86 EP11 XCRB reply message back to user space.
+ *
+ * @zq: crypto device pointer
+ * @reply: reply AP message.
+ * @xcRB: pointer to EP11 user request block
+ *
+ * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
+ */
+static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq,
+				    struct ap_message *reply,
+				    struct ep11_urb *xcRB)
+{
+	struct type86_fmt2_msg *msg = reply->message;
+	char *data = reply->message;
+
+	if (xcRB->resp_len < msg->fmt2.count1)
+		return -EINVAL;
+
+	/* Copy response CPRB to user */
+	if (copy_to_user((char __force __user *)xcRB->resp,
+			 data + msg->fmt2.offset1, msg->fmt2.count1))
+		return -EFAULT;
+	xcRB->resp_len = msg->fmt2.count1;
+	return 0;
+}
+
+static int convert_type86_rng(struct zcrypt_queue *zq,
+			  struct ap_message *reply,
+			  char *buffer)
+{
+	struct {
+		struct type86_hdr hdr;
+		struct type86_fmt2_ext fmt2;
+		struct CPRBX cprbx;
+	} __packed * msg = reply->message;
+	char *data = reply->message;
+
+	if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
+		return -EINVAL;
+	memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
+	return msg->fmt2.count2;
+}
+
+static int convert_response_ica(struct zcrypt_queue *zq,
+			    struct ap_message *reply,
+			    char __user *outputdata,
+			    unsigned int outputdatalength)
+{
+	struct type86x_reply *msg = reply->message;
+
+	switch (msg->hdr.type) {
+	case TYPE82_RSP_CODE:
+	case TYPE88_RSP_CODE:
+		return convert_error(zq, reply);
+	case TYPE86_RSP_CODE:
+		if (msg->cprbx.ccp_rtcode &&
+		   (msg->cprbx.ccp_rscode == 0x14f) &&
+		   (outputdatalength > 256)) {
+			if (zq->zcard->max_exp_bit_length <= 17) {
+				zq->zcard->max_exp_bit_length = 17;
+				return -EAGAIN;
+			} else
+				return -EINVAL;
+		}
+		if (msg->hdr.reply_code)
+			return convert_error(zq, reply);
+		if (msg->cprbx.cprb_ver_id == 0x02)
+			return convert_type86_ica(zq, reply,
+						  outputdata, outputdatalength);
+		/*
+		 * Fall through, no break, incorrect cprb version is an unknown
+		 * response
+		 */
+	default: /* Unknown response type, this should NEVER EVER happen */
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       AP_QID_CARD(zq->queue->qid),
+		       AP_QID_QUEUE(zq->queue->qid));
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+			   AP_QID_CARD(zq->queue->qid),
+			   AP_QID_QUEUE(zq->queue->qid),
+			   (int) msg->hdr.type);
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+}
+
+static int convert_response_xcrb(struct zcrypt_queue *zq,
+			    struct ap_message *reply,
+			    struct ica_xcRB *xcRB)
+{
+	struct type86x_reply *msg = reply->message;
+
+	switch (msg->hdr.type) {
+	case TYPE82_RSP_CODE:
+	case TYPE88_RSP_CODE:
+		xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
+		return convert_error(zq, reply);
+	case TYPE86_RSP_CODE:
+		if (msg->hdr.reply_code) {
+			memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
+			return convert_error(zq, reply);
+		}
+		if (msg->cprbx.cprb_ver_id == 0x02)
+			return convert_type86_xcrb(zq, reply, xcRB);
+		/*
+		 * Fall through, no break, incorrect cprb version is an unknown
+		 * response
+		 */
+	default: /* Unknown response type, this should NEVER EVER happen */
+		xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       AP_QID_CARD(zq->queue->qid),
+		       AP_QID_QUEUE(zq->queue->qid));
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+			   AP_QID_CARD(zq->queue->qid),
+			   AP_QID_QUEUE(zq->queue->qid),
+			   (int) msg->hdr.type);
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+}
+
+static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
+	struct ap_message *reply, struct ep11_urb *xcRB)
+{
+	struct type86_ep11_reply *msg = reply->message;
+
+	switch (msg->hdr.type) {
+	case TYPE82_RSP_CODE:
+	case TYPE87_RSP_CODE:
+		return convert_error(zq, reply);
+	case TYPE86_RSP_CODE:
+		if (msg->hdr.reply_code)
+			return convert_error(zq, reply);
+		if (msg->cprbx.cprb_ver_id == 0x04)
+			return convert_type86_ep11_xcrb(zq, reply, xcRB);
+	/* Fall through, no break, incorrect cprb version is an unknown resp.*/
+	default: /* Unknown response type, this should NEVER EVER happen */
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       AP_QID_CARD(zq->queue->qid),
+		       AP_QID_QUEUE(zq->queue->qid));
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+			   AP_QID_CARD(zq->queue->qid),
+			   AP_QID_QUEUE(zq->queue->qid),
+			   (int) msg->hdr.type);
+		return -EAGAIN; /* repeat the request on a different device. */
+	}
+}
+
+static int convert_response_rng(struct zcrypt_queue *zq,
+				 struct ap_message *reply,
+				 char *data)
+{
+	struct type86x_reply *msg = reply->message;
+
+	switch (msg->hdr.type) {
+	case TYPE82_RSP_CODE:
+	case TYPE88_RSP_CODE:
+		return -EINVAL;
+	case TYPE86_RSP_CODE:
+		if (msg->hdr.reply_code)
+			return -EINVAL;
+		if (msg->cprbx.cprb_ver_id == 0x02)
+			return convert_type86_rng(zq, reply, data);
+		/*
+		 * Fall through, no break, incorrect cprb version is an unknown
+		 * response
+		 */
+	default: /* Unknown response type, this should NEVER EVER happen */
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       AP_QID_CARD(zq->queue->qid),
+		       AP_QID_QUEUE(zq->queue->qid));
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+			   AP_QID_CARD(zq->queue->qid),
+			   AP_QID_QUEUE(zq->queue->qid),
+			   (int) msg->hdr.type);
+		return -EAGAIN;	/* repeat the request on a different device. */
+	}
+}
+
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @aq: pointer to the AP queue
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_msgtype6_receive(struct ap_queue *aq,
+				  struct ap_message *msg,
+				  struct ap_message *reply)
+{
+	static struct error_hdr error_reply = {
+		.type = TYPE82_RSP_CODE,
+		.reply_code = REP82_ERROR_MACHINE_FAILURE,
+	};
+	struct response_type *resp_type =
+		(struct response_type *) msg->private;
+	struct type86x_reply *t86r;
+	int length;
+
+	/* Copy the reply message to the request message buffer. */
+	if (!reply)
+		goto out;	/* ap_msg->rc indicates the error */
+	t86r = reply->message;
+	if (t86r->hdr.type == TYPE86_RSP_CODE &&
+		 t86r->cprbx.cprb_ver_id == 0x02) {
+		switch (resp_type->type) {
+		case PCIXCC_RESPONSE_TYPE_ICA:
+			length = sizeof(struct type86x_reply)
+				+ t86r->length - 2;
+			length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length);
+			memcpy(msg->message, reply->message, length);
+			break;
+		case PCIXCC_RESPONSE_TYPE_XCRB:
+			length = t86r->fmt2.offset2 + t86r->fmt2.count2;
+			length = min(MSGTYPE06_MAX_MSG_SIZE, length);
+			memcpy(msg->message, reply->message, length);
+			break;
+		default:
+			memcpy(msg->message, &error_reply,
+			       sizeof(error_reply));
+		}
+	} else
+		memcpy(msg->message, reply->message, sizeof(error_reply));
+out:
+	complete(&(resp_type->work));
+}
+
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @aq: pointer to the AP queue
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
+					 struct ap_message *msg,
+					 struct ap_message *reply)
+{
+	static struct error_hdr error_reply = {
+		.type = TYPE82_RSP_CODE,
+		.reply_code = REP82_ERROR_MACHINE_FAILURE,
+	};
+	struct response_type *resp_type =
+		(struct response_type *)msg->private;
+	struct type86_ep11_reply *t86r;
+	int length;
+
+	/* Copy the reply message to the request message buffer. */
+	if (!reply)
+		goto out;	/* ap_msg->rc indicates the error */
+	t86r = reply->message;
+	if (t86r->hdr.type == TYPE86_RSP_CODE &&
+	    t86r->cprbx.cprb_ver_id == 0x04) {
+		switch (resp_type->type) {
+		case PCIXCC_RESPONSE_TYPE_EP11:
+			length = t86r->fmt2.offset1 + t86r->fmt2.count1;
+			length = min(MSGTYPE06_MAX_MSG_SIZE, length);
+			memcpy(msg->message, reply->message, length);
+			break;
+		default:
+			memcpy(msg->message, &error_reply, sizeof(error_reply));
+		}
+	} else {
+		memcpy(msg->message, reply->message, sizeof(error_reply));
+	}
+out:
+	complete(&(resp_type->work));
+}
+
+static atomic_t zcrypt_step = ATOMIC_INIT(0);
+
+/**
+ * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * device to handle a modexpo request.
+ * @zq: pointer to zcrypt_queue structure that identifies the
+ *	  PCIXCC/CEX2C device to the request distributor
+ * @mex: pointer to the modexpo request buffer
+ */
+static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
+				  struct ica_rsa_modexpo *mex)
+{
+	struct ap_message ap_msg;
+	struct response_type resp_type = {
+		.type = PCIXCC_RESPONSE_TYPE_ICA,
+	};
+	int rc;
+
+	ap_init_message(&ap_msg);
+	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+	ap_msg.receive = zcrypt_msgtype6_receive;
+	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg.private = &resp_type;
+	rc = ICAMEX_msg_to_type6MEX_msgX(zq, &ap_msg, mex);
+	if (rc)
+		goto out_free;
+	init_completion(&resp_type.work);
+	ap_queue_message(zq->queue, &ap_msg);
+	rc = wait_for_completion_interruptible(&resp_type.work);
+	if (rc == 0) {
+		rc = ap_msg.rc;
+		if (rc == 0)
+			rc = convert_response_ica(zq, &ap_msg,
+						  mex->outputdata,
+						  mex->outputdatalength);
+	} else
+		/* Signal pending. */
+		ap_cancel_message(zq->queue, &ap_msg);
+out_free:
+	free_page((unsigned long) ap_msg.message);
+	return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * device to handle a modexpo_crt request.
+ * @zq: pointer to zcrypt_queue structure that identifies the
+ *	  PCIXCC/CEX2C device to the request distributor
+ * @crt: pointer to the modexpoc_crt request buffer
+ */
+static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
+				      struct ica_rsa_modexpo_crt *crt)
+{
+	struct ap_message ap_msg;
+	struct response_type resp_type = {
+		.type = PCIXCC_RESPONSE_TYPE_ICA,
+	};
+	int rc;
+
+	ap_init_message(&ap_msg);
+	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+	ap_msg.receive = zcrypt_msgtype6_receive;
+	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg.private = &resp_type;
+	rc = ICACRT_msg_to_type6CRT_msgX(zq, &ap_msg, crt);
+	if (rc)
+		goto out_free;
+	init_completion(&resp_type.work);
+	ap_queue_message(zq->queue, &ap_msg);
+	rc = wait_for_completion_interruptible(&resp_type.work);
+	if (rc == 0) {
+		rc = ap_msg.rc;
+		if (rc == 0)
+			rc = convert_response_ica(zq, &ap_msg,
+						  crt->outputdata,
+						  crt->outputdatalength);
+	} else {
+		/* Signal pending. */
+		ap_cancel_message(zq->queue, &ap_msg);
+	}
+out_free:
+	free_page((unsigned long) ap_msg.message);
+	return rc;
+}
+
+/**
+ * Fetch function code from cprb.
+ * Extracting the fc requires to copy the cprb from userspace.
+ * So this function allocates memory and needs an ap_msg prepared
+ * by the caller with ap_init_message(). Also the caller has to
+ * make sure ap_release_message() is always called even on failure.
+ */
+unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
+				struct ap_message *ap_msg,
+				unsigned int *func_code, unsigned short **dom)
+{
+	struct response_type resp_type = {
+		.type = PCIXCC_RESPONSE_TYPE_XCRB,
+	};
+
+	ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+	if (!ap_msg->message)
+		return -ENOMEM;
+	ap_msg->receive = zcrypt_msgtype6_receive;
+	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
+	if (!ap_msg->private)
+		return -ENOMEM;
+	memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
+	return XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom);
+}
+
+/**
+ * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * device to handle a send_cprb request.
+ * @zq: pointer to zcrypt_queue structure that identifies the
+ *	  PCIXCC/CEX2C device to the request distributor
+ * @xcRB: pointer to the send_cprb request buffer
+ */
+static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq,
+				    struct ica_xcRB *xcRB,
+				    struct ap_message *ap_msg)
+{
+	int rc;
+	struct response_type *rtype = (struct response_type *)(ap_msg->private);
+
+	init_completion(&rtype->work);
+	ap_queue_message(zq->queue, ap_msg);
+	rc = wait_for_completion_interruptible(&rtype->work);
+	if (rc == 0) {
+		rc = ap_msg->rc;
+		if (rc == 0)
+			rc = convert_response_xcrb(zq, ap_msg, xcRB);
+	} else
+		/* Signal pending. */
+		ap_cancel_message(zq->queue, ap_msg);
+
+	return rc;
+}
+
+/**
+ * Fetch function code from ep11 cprb.
+ * Extracting the fc requires to copy the ep11 cprb from userspace.
+ * So this function allocates memory and needs an ap_msg prepared
+ * by the caller with ap_init_message(). Also the caller has to
+ * make sure ap_release_message() is always called even on failure.
+ */
+unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
+				    struct ap_message *ap_msg,
+				    unsigned int *func_code)
+{
+	struct response_type resp_type = {
+		.type = PCIXCC_RESPONSE_TYPE_EP11,
+	};
+
+	ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+	if (!ap_msg->message)
+		return -ENOMEM;
+	ap_msg->receive = zcrypt_msgtype6_receive_ep11;
+	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
+	if (!ap_msg->private)
+		return -ENOMEM;
+	memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
+	return xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code);
+}
+
+/**
+ * The request distributor calls this function if it picked the CEX4P
+ * device to handle a send_ep11_cprb request.
+ * @zq: pointer to zcrypt_queue structure that identifies the
+ *	  CEX4P device to the request distributor
+ * @xcRB: pointer to the ep11 user request block
+ */
+static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq,
+					   struct ep11_urb *xcrb,
+					   struct ap_message *ap_msg)
+{
+	int rc;
+	unsigned int lfmt;
+	struct response_type *rtype = (struct response_type *)(ap_msg->private);
+	struct {
+		struct type6_hdr hdr;
+		struct ep11_cprb cprbx;
+		unsigned char	pld_tag;	/* fixed value 0x30 */
+		unsigned char	pld_lenfmt;	/* payload length format */
+	} __packed * msg = ap_msg->message;
+	struct pld_hdr {
+		unsigned char	func_tag;	/* fixed value 0x4 */
+		unsigned char	func_len;	/* fixed value 0x4 */
+		unsigned int	func_val;	/* function ID	   */
+		unsigned char	dom_tag;	/* fixed value 0x4 */
+		unsigned char	dom_len;	/* fixed value 0x4 */
+		unsigned int	dom_val;	/* domain id	   */
+	} __packed * payload_hdr = NULL;
+
+
+	/**
+	 * The target domain field within the cprb body/payload block will be
+	 * replaced by the usage domain for non-management commands only.
+	 * Therefore we check the first bit of the 'flags' parameter for
+	 * management command indication.
+	 *   0 - non management command
+	 *   1 - management command
+	 */
+	if (!((msg->cprbx.flags & 0x80) == 0x80)) {
+		msg->cprbx.target_id = (unsigned int)
+					AP_QID_QUEUE(zq->queue->qid);
+
+		if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
+			switch (msg->pld_lenfmt & 0x03) {
+			case 1:
+				lfmt = 2;
+				break;
+			case 2:
+				lfmt = 3;
+				break;
+			default:
+				return -EINVAL;
+			}
+		} else {
+			lfmt = 1; /* length format #1 */
+		}
+		payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
+		payload_hdr->dom_val = (unsigned int)
+					AP_QID_QUEUE(zq->queue->qid);
+	}
+
+	init_completion(&rtype->work);
+	ap_queue_message(zq->queue, ap_msg);
+	rc = wait_for_completion_interruptible(&rtype->work);
+	if (rc == 0) {
+		rc = ap_msg->rc;
+		if (rc == 0)
+			rc = convert_response_ep11_xcrb(zq, ap_msg, xcrb);
+	} else
+		/* Signal pending. */
+		ap_cancel_message(zq->queue, ap_msg);
+
+	return rc;
+}
+
+unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
+						   unsigned int *domain)
+{
+	struct response_type resp_type = {
+		.type = PCIXCC_RESPONSE_TYPE_XCRB,
+	};
+
+	ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+	if (!ap_msg->message)
+		return -ENOMEM;
+	ap_msg->receive = zcrypt_msgtype6_receive;
+	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
+	if (!ap_msg->private)
+		return -ENOMEM;
+	memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
+
+	rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain);
+
+	*func_code = HWRNG;
+	return 0;
+}
+
+/**
+ * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * device to generate random data.
+ * @zq: pointer to zcrypt_queue structure that identifies the
+ *	  PCIXCC/CEX2C device to the request distributor
+ * @buffer: pointer to a memory page to return random data
+ */
+static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
+				char *buffer, struct ap_message *ap_msg)
+{
+	struct {
+		struct type6_hdr hdr;
+		struct CPRBX cprbx;
+		char function_code[2];
+		short int rule_length;
+		char rule[8];
+		short int verb_length;
+		short int key_length;
+	} __packed * msg = ap_msg->message;
+	struct response_type *rtype = (struct response_type *)(ap_msg->private);
+	int rc;
+
+	msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
+
+	init_completion(&rtype->work);
+	ap_queue_message(zq->queue, ap_msg);
+	rc = wait_for_completion_interruptible(&rtype->work);
+	if (rc == 0) {
+		rc = ap_msg->rc;
+		if (rc == 0)
+			rc = convert_response_rng(zq, ap_msg, buffer);
+	} else
+		/* Signal pending. */
+		ap_cancel_message(zq->queue, ap_msg);
+
+	return rc;
+}
+
+/**
+ * The crypto operations for a PCIXCC/CEX2C card.
+ */
+static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
+	.owner = THIS_MODULE,
+	.name = MSGTYPE06_NAME,
+	.variant = MSGTYPE06_VARIANT_NORNG,
+	.rsa_modexpo = zcrypt_msgtype6_modexpo,
+	.rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
+	.send_cprb = zcrypt_msgtype6_send_cprb,
+};
+
+static struct zcrypt_ops zcrypt_msgtype6_ops = {
+	.owner = THIS_MODULE,
+	.name = MSGTYPE06_NAME,
+	.variant = MSGTYPE06_VARIANT_DEFAULT,
+	.rsa_modexpo = zcrypt_msgtype6_modexpo,
+	.rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
+	.send_cprb = zcrypt_msgtype6_send_cprb,
+	.rng = zcrypt_msgtype6_rng,
+};
+
+static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
+	.owner = THIS_MODULE,
+	.name = MSGTYPE06_NAME,
+	.variant = MSGTYPE06_VARIANT_EP11,
+	.rsa_modexpo = NULL,
+	.rsa_modexpo_crt = NULL,
+	.send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb,
+};
+
+void __init zcrypt_msgtype6_init(void)
+{
+	zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops);
+	zcrypt_msgtype_register(&zcrypt_msgtype6_ops);
+	zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops);
+}
+
+void __exit zcrypt_msgtype6_exit(void)
+{
+	zcrypt_msgtype_unregister(&zcrypt_msgtype6_norng_ops);
+	zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops);
+	zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops);
+}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
new file mode 100644
index 0000000..e4c2f37
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ *  zcrypt 2.1.0
+ *
+ *  Copyright IBM Corp. 2001, 2012
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *  MSGTYPE restruct:		  Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#ifndef _ZCRYPT_MSGTYPE6_H_
+#define _ZCRYPT_MSGTYPE6_H_
+
+#include <asm/zcrypt.h>
+
+#define MSGTYPE06_NAME			"zcrypt_msgtype6"
+#define MSGTYPE06_VARIANT_DEFAULT	0
+#define MSGTYPE06_VARIANT_NORNG		1
+#define MSGTYPE06_VARIANT_EP11		2
+
+#define MSGTYPE06_MAX_MSG_SIZE		(12*1024)
+
+/**
+ * The type 6 message family is associated with PCICC or PCIXCC cards.
+ *
+ * It contains a message header followed by a CPRB, both of which
+ * are described below.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type6_hdr {
+	unsigned char reserved1;	/* 0x00				*/
+	unsigned char type;		/* 0x06				*/
+	unsigned char reserved2[2];	/* 0x0000			*/
+	unsigned char right[4];		/* 0x00000000			*/
+	unsigned char reserved3[2];	/* 0x0000			*/
+	unsigned char reserved4[2];	/* 0x0000			*/
+	unsigned char apfs[4];		/* 0x00000000			*/
+	unsigned int  offset1;		/* 0x00000058 (offset to CPRB)	*/
+	unsigned int  offset2;		/* 0x00000000			*/
+	unsigned int  offset3;		/* 0x00000000			*/
+	unsigned int  offset4;		/* 0x00000000			*/
+	unsigned char agent_id[16];	/* PCICC:			*/
+					/*    0x0100			*/
+					/*    0x4343412d4150504c202020	*/
+					/*    0x010101			*/
+					/* PCIXCC:			*/
+					/*    0x4341000000000000	*/
+					/*    0x0000000000000000	*/
+	unsigned char rqid[2];		/* rqid.  internal to 603	*/
+	unsigned char reserved5[2];	/* 0x0000			*/
+	unsigned char function_code[2];	/* for PKD, 0x5044 (ascii 'PD')	*/
+	unsigned char reserved6[2];	/* 0x0000			*/
+	unsigned int  ToCardLen1;	/* (request CPRB len + 3) & -4	*/
+	unsigned int  ToCardLen2;	/* db len 0x00000000 for PKD	*/
+	unsigned int  ToCardLen3;	/* 0x00000000			*/
+	unsigned int  ToCardLen4;	/* 0x00000000			*/
+	unsigned int  FromCardLen1;	/* response buffer length	*/
+	unsigned int  FromCardLen2;	/* db len 0x00000000 for PKD	*/
+	unsigned int  FromCardLen3;	/* 0x00000000			*/
+	unsigned int  FromCardLen4;	/* 0x00000000			*/
+} __packed;
+
+/**
+ * The type 86 message family is associated with PCICC and PCIXCC cards.
+ *
+ * It contains a message header followed by a CPRB.  The CPRB is
+ * the same as the request CPRB, which is described above.
+ *
+ * If format is 1, an error condition exists and no data beyond
+ * the 8-byte message header is of interest.
+ *
+ * The non-error message is shown below.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type86_hdr {
+	unsigned char reserved1;	/* 0x00				*/
+	unsigned char type;		/* 0x86				*/
+	unsigned char format;		/* 0x01 (error) or 0x02 (ok)	*/
+	unsigned char reserved2;	/* 0x00				*/
+	unsigned char reply_code;	/* reply code (see above)	*/
+	unsigned char reserved3[3];	/* 0x000000			*/
+} __packed;
+
+#define TYPE86_RSP_CODE 0x86
+#define TYPE87_RSP_CODE 0x87
+#define TYPE86_FMT2	0x02
+
+struct type86_fmt2_ext {
+	unsigned char	  reserved[4];	/* 0x00000000			*/
+	unsigned char	  apfs[4];	/* final status			*/
+	unsigned int	  count1;	/* length of CPRB + parameters	*/
+	unsigned int	  offset1;	/* offset to CPRB		*/
+	unsigned int	  count2;	/* 0x00000000			*/
+	unsigned int	  offset2;	/* db offset 0x00000000 for PKD	*/
+	unsigned int	  count3;	/* 0x00000000			*/
+	unsigned int	  offset3;	/* 0x00000000			*/
+	unsigned int	  count4;	/* 0x00000000			*/
+	unsigned int	  offset4;	/* 0x00000000			*/
+} __packed;
+
+unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *,
+			 unsigned int *, unsigned short **);
+unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *,
+			     unsigned int *);
+unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *);
+
+#define LOW	10
+#define MEDIUM	100
+#define HIGH	500
+
+int speed_idx_cca(int);
+int speed_idx_ep11(int);
+
+/**
+ * Prepare a type6 CPRB message for random number generation
+ *
+ * @ap_dev: AP device pointer
+ * @ap_msg: pointer to AP message
+ */
+static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
+				      unsigned int random_number_length,
+				      unsigned int *domain)
+{
+	struct {
+		struct type6_hdr hdr;
+		struct CPRBX cprbx;
+		char function_code[2];
+		short int rule_length;
+		char rule[8];
+		short int verb_length;
+		short int key_length;
+	} __packed * msg = ap_msg->message;
+	static struct type6_hdr static_type6_hdrX = {
+		.type		= 0x06,
+		.offset1	= 0x00000058,
+		.agent_id	= {'C', 'A'},
+		.function_code	= {'R', 'L'},
+		.ToCardLen1	= sizeof(*msg) - sizeof(msg->hdr),
+		.FromCardLen1	= sizeof(*msg) - sizeof(msg->hdr),
+	};
+	static struct CPRBX local_cprbx = {
+		.cprb_len	= 0x00dc,
+		.cprb_ver_id	= 0x02,
+		.func_id	= {0x54, 0x32},
+		.req_parml	= sizeof(*msg) - sizeof(msg->hdr) -
+				  sizeof(msg->cprbx),
+		.rpl_msgbl	= sizeof(*msg) - sizeof(msg->hdr),
+	};
+
+	msg->hdr = static_type6_hdrX;
+	msg->hdr.FromCardLen2 = random_number_length,
+	msg->cprbx = local_cprbx;
+	msg->cprbx.rpl_datal = random_number_length,
+	memcpy(msg->function_code, msg->hdr.function_code, 0x02);
+	msg->rule_length = 0x0a;
+	memcpy(msg->rule, "RANDOM  ", 8);
+	msg->verb_length = 0x02;
+	msg->key_length = 0x02;
+	ap_msg->length = sizeof(*msg);
+	*domain = (unsigned short)msg->cprbx.domain;
+}
+
+void zcrypt_msgtype6_init(void);
+void zcrypt_msgtype6_exit(void);
+
+#endif /* _ZCRYPT_MSGTYPE6_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
new file mode 100644
index 0000000..94d9f72
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  zcrypt 2.1.0
+ *
+ *  Copyright IBM Corp. 2001, 2012
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *  MSGTYPE restruct:		  Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/mod_devicetable.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_pcixcc.h"
+#include "zcrypt_cca_key.h"
+
+#define PCIXCC_MIN_MOD_SIZE	 16	/*  128 bits	*/
+#define PCIXCC_MIN_MOD_SIZE_OLD	 64	/*  512 bits	*/
+#define PCIXCC_MAX_MOD_SIZE	256	/* 2048 bits	*/
+#define CEX3C_MIN_MOD_SIZE	PCIXCC_MIN_MOD_SIZE
+#define CEX3C_MAX_MOD_SIZE	512	/* 4096 bits	*/
+
+#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c  /* max size type6 v2 crt message */
+#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply	    */
+
+#define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024)
+
+#define PCIXCC_CLEANUP_TIME	(15*HZ)
+
+#define CEIL4(x) ((((x)+3)/4)*4)
+
+struct response_type {
+	struct completion work;
+	int type;
+};
+#define PCIXCC_RESPONSE_TYPE_ICA  0
+#define PCIXCC_RESPONSE_TYPE_XCRB 1
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \
+		   "Copyright IBM Corp. 2001, 2012");
+MODULE_LICENSE("GPL");
+
+static struct ap_device_id zcrypt_pcixcc_card_ids[] = {
+	{ .dev_type = AP_DEVICE_TYPE_PCIXCC,
+	  .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX2C,
+	  .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX3C,
+	  .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+	{ /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_card_ids);
+
+static struct ap_device_id zcrypt_pcixcc_queue_ids[] = {
+	{ .dev_type = AP_DEVICE_TYPE_PCIXCC,
+	  .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX2C,
+	  .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX3C,
+	  .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+	{ /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_queue_ids);
+
+/**
+ * Large random number detection function. Its sends a message to a pcixcc
+ * card to find out if large random numbers are supported.
+ * @ap_dev: pointer to the AP device.
+ *
+ * Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
+ */
+static int zcrypt_pcixcc_rng_supported(struct ap_queue *aq)
+{
+	struct ap_message ap_msg;
+	unsigned long long psmid;
+	unsigned int domain;
+	struct {
+		struct type86_hdr hdr;
+		struct type86_fmt2_ext fmt2;
+		struct CPRBX cprbx;
+	} __packed *reply;
+	struct {
+		struct type6_hdr hdr;
+		struct CPRBX cprbx;
+		char function_code[2];
+		short int rule_length;
+		char rule[8];
+		short int verb_length;
+		short int key_length;
+	} __packed *msg;
+	int rc, i;
+
+	ap_init_message(&ap_msg);
+	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!ap_msg.message)
+		return -ENOMEM;
+
+	rng_type6CPRB_msgX(&ap_msg, 4, &domain);
+
+	msg = ap_msg.message;
+	msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
+
+	rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.message,
+		     ap_msg.length);
+	if (rc)
+		goto out_free;
+
+	/* Wait for the test message to complete. */
+	for (i = 0; i < 2 * HZ; i++) {
+		msleep(1000 / HZ);
+		rc = ap_recv(aq->qid, &psmid, ap_msg.message, 4096);
+		if (rc == 0 && psmid == 0x0102030405060708ULL)
+			break;
+	}
+
+	if (i >= 2 * HZ) {
+		/* Got no answer. */
+		rc = -ENODEV;
+		goto out_free;
+	}
+
+	reply = ap_msg.message;
+	if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
+		rc = 1;
+	else
+		rc = 0;
+out_free:
+	free_page((unsigned long) ap_msg.message);
+	return rc;
+}
+
+/**
+ * Probe function for PCIXCC/CEX2C card devices. It always accepts the
+ * AP device since the bus_match already checked the hardware type. The
+ * PCIXCC cards come in two flavours: micro code level 2 and micro code
+ * level 3. This is checked by sending a test message to the device.
+ * @ap_dev: pointer to the AP card device.
+ */
+static int zcrypt_pcixcc_card_probe(struct ap_device *ap_dev)
+{
+	/*
+	 * Normalized speed ratings per crypto adapter
+	 * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
+	 */
+	static const int CEX2C_SPEED_IDX[] = {
+		1000, 1400, 2400, 1100, 1500, 2600, 100, 12};
+	static const int CEX3C_SPEED_IDX[] = {
+		500,  700, 1400,  550,	800, 1500,  80, 10};
+
+	struct ap_card *ac = to_ap_card(&ap_dev->device);
+	struct zcrypt_card *zc;
+	int rc = 0;
+
+	zc = zcrypt_card_alloc();
+	if (!zc)
+		return -ENOMEM;
+	zc->card = ac;
+	ac->private = zc;
+	switch (ac->ap_dev.device_type) {
+	case AP_DEVICE_TYPE_CEX2C:
+		zc->user_space_type = ZCRYPT_CEX2C;
+		zc->type_string = "CEX2C";
+		memcpy(zc->speed_rating, CEX2C_SPEED_IDX,
+		       sizeof(CEX2C_SPEED_IDX));
+		zc->min_mod_size = PCIXCC_MIN_MOD_SIZE;
+		zc->max_mod_size = PCIXCC_MAX_MOD_SIZE;
+		zc->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
+		break;
+	case AP_DEVICE_TYPE_CEX3C:
+		zc->user_space_type = ZCRYPT_CEX3C;
+		zc->type_string = "CEX3C";
+		memcpy(zc->speed_rating, CEX3C_SPEED_IDX,
+		       sizeof(CEX3C_SPEED_IDX));
+		zc->min_mod_size = CEX3C_MIN_MOD_SIZE;
+		zc->max_mod_size = CEX3C_MAX_MOD_SIZE;
+		zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
+		break;
+	default:
+		zcrypt_card_free(zc);
+		return -ENODEV;
+	}
+	zc->online = 1;
+
+	rc = zcrypt_card_register(zc);
+	if (rc) {
+		ac->private = NULL;
+		zcrypt_card_free(zc);
+	}
+
+	return rc;
+}
+
+/**
+ * This is called to remove the PCIXCC/CEX2C card driver information
+ * if an AP card device is removed.
+ */
+static void zcrypt_pcixcc_card_remove(struct ap_device *ap_dev)
+{
+	struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+
+	if (zc)
+		zcrypt_card_unregister(zc);
+}
+
+static struct ap_driver zcrypt_pcixcc_card_driver = {
+	.probe = zcrypt_pcixcc_card_probe,
+	.remove = zcrypt_pcixcc_card_remove,
+	.ids = zcrypt_pcixcc_card_ids,
+	.flags = AP_DRIVER_FLAG_DEFAULT,
+};
+
+/**
+ * Probe function for PCIXCC/CEX2C queue devices. It always accepts the
+ * AP device since the bus_match already checked the hardware type. The
+ * PCIXCC cards come in two flavours: micro code level 2 and micro code
+ * level 3. This is checked by sending a test message to the device.
+ * @ap_dev: pointer to the AP card device.
+ */
+static int zcrypt_pcixcc_queue_probe(struct ap_device *ap_dev)
+{
+	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+	struct zcrypt_queue *zq;
+	int rc;
+
+	zq = zcrypt_queue_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
+	if (!zq)
+		return -ENOMEM;
+	zq->queue = aq;
+	zq->online = 1;
+	atomic_set(&zq->load, 0);
+	rc = zcrypt_pcixcc_rng_supported(aq);
+	if (rc < 0) {
+		zcrypt_queue_free(zq);
+		return rc;
+	}
+	if (rc)
+		zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+					 MSGTYPE06_VARIANT_DEFAULT);
+	else
+		zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+					 MSGTYPE06_VARIANT_NORNG);
+	ap_queue_init_reply(aq, &zq->reply);
+	aq->request_timeout = PCIXCC_CLEANUP_TIME,
+	aq->private = zq;
+	rc = zcrypt_queue_register(zq);
+	if (rc) {
+		aq->private = NULL;
+		zcrypt_queue_free(zq);
+	}
+	return rc;
+}
+
+/**
+ * This is called to remove the PCIXCC/CEX2C queue driver information
+ * if an AP queue device is removed.
+ */
+static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev)
+{
+	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+	struct zcrypt_queue *zq = aq->private;
+
+	ap_queue_remove(aq);
+	if (zq)
+		zcrypt_queue_unregister(zq);
+}
+
+static struct ap_driver zcrypt_pcixcc_queue_driver = {
+	.probe = zcrypt_pcixcc_queue_probe,
+	.remove = zcrypt_pcixcc_queue_remove,
+	.suspend = ap_queue_suspend,
+	.resume = ap_queue_resume,
+	.ids = zcrypt_pcixcc_queue_ids,
+	.flags = AP_DRIVER_FLAG_DEFAULT,
+};
+
+int __init zcrypt_pcixcc_init(void)
+{
+	int rc;
+
+	rc = ap_driver_register(&zcrypt_pcixcc_card_driver,
+				THIS_MODULE, "pcixcccard");
+	if (rc)
+		return rc;
+
+	rc = ap_driver_register(&zcrypt_pcixcc_queue_driver,
+				THIS_MODULE, "pcixccqueue");
+	if (rc)
+		ap_driver_unregister(&zcrypt_pcixcc_card_driver);
+
+	return rc;
+}
+
+void zcrypt_pcixcc_exit(void)
+{
+	ap_driver_unregister(&zcrypt_pcixcc_queue_driver);
+	ap_driver_unregister(&zcrypt_pcixcc_card_driver);
+}
+
+module_init(zcrypt_pcixcc_init);
+module_exit(zcrypt_pcixcc_exit);
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h
new file mode 100644
index 0000000..cf73a0f
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcixcc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ *  zcrypt 2.1.0
+ *
+ *  Copyright IBM Corp. 2001, 2012
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *  MSGTYPE restruct:		  Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#ifndef _ZCRYPT_PCIXCC_H_
+#define _ZCRYPT_PCIXCC_H_
+
+int zcrypt_pcixcc_init(void);
+void zcrypt_pcixcc_exit(void);
+
+#endif /* _ZCRYPT_PCIXCC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
new file mode 100644
index 0000000..8df82c6
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  zcrypt 2.1.0
+ *
+ *  Copyright IBM Corp. 2001, 2012
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *	       Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *  MSGTYPE restruct:		  Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/hw_random.h>
+#include <linux/debugfs.h>
+#include <asm/debug.h>
+
+#include "zcrypt_debug.h"
+#include "zcrypt_api.h"
+
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
+
+/*
+ * Device attributes common for all crypto queue devices.
+ */
+
+static ssize_t online_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", zq->online);
+}
+
+static ssize_t online_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+	struct zcrypt_card *zc = zq->zcard;
+	int online;
+
+	if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
+		return -EINVAL;
+
+	if (online && !zc->online)
+		return -EINVAL;
+	zq->online = online;
+
+	ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x online=%d\n",
+		   AP_QID_CARD(zq->queue->qid),
+		   AP_QID_QUEUE(zq->queue->qid),
+		   online);
+
+	if (!online)
+		ap_flush_queue(zq->queue);
+	return count;
+}
+
+static DEVICE_ATTR_RW(online);
+
+static ssize_t load_show(struct device *dev,
+			 struct device_attribute *attr,
+			 char *buf)
+{
+	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
+}
+
+static DEVICE_ATTR_RO(load);
+
+static struct attribute *zcrypt_queue_attrs[] = {
+	&dev_attr_online.attr,
+	&dev_attr_load.attr,
+	NULL,
+};
+
+static const struct attribute_group zcrypt_queue_attr_group = {
+	.attrs = zcrypt_queue_attrs,
+};
+
+void zcrypt_queue_force_online(struct zcrypt_queue *zq, int online)
+{
+	zq->online = online;
+	if (!online)
+		ap_flush_queue(zq->queue);
+}
+
+struct zcrypt_queue *zcrypt_queue_alloc(size_t max_response_size)
+{
+	struct zcrypt_queue *zq;
+
+	zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL);
+	if (!zq)
+		return NULL;
+	zq->reply.message = kmalloc(max_response_size, GFP_KERNEL);
+	if (!zq->reply.message)
+		goto out_free;
+	zq->reply.length = max_response_size;
+	INIT_LIST_HEAD(&zq->list);
+	kref_init(&zq->refcount);
+	return zq;
+
+out_free:
+	kfree(zq);
+	return NULL;
+}
+EXPORT_SYMBOL(zcrypt_queue_alloc);
+
+void zcrypt_queue_free(struct zcrypt_queue *zq)
+{
+	kfree(zq->reply.message);
+	kfree(zq);
+}
+EXPORT_SYMBOL(zcrypt_queue_free);
+
+static void zcrypt_queue_release(struct kref *kref)
+{
+	struct zcrypt_queue *zq =
+		container_of(kref, struct zcrypt_queue, refcount);
+	zcrypt_queue_free(zq);
+}
+
+void zcrypt_queue_get(struct zcrypt_queue *zq)
+{
+	kref_get(&zq->refcount);
+}
+EXPORT_SYMBOL(zcrypt_queue_get);
+
+int zcrypt_queue_put(struct zcrypt_queue *zq)
+{
+	return kref_put(&zq->refcount, zcrypt_queue_release);
+}
+EXPORT_SYMBOL(zcrypt_queue_put);
+
+/**
+ * zcrypt_queue_register() - Register a crypto queue device.
+ * @zq: Pointer to a crypto queue device
+ *
+ * Register a crypto queue device. Returns 0 if successful.
+ */
+int zcrypt_queue_register(struct zcrypt_queue *zq)
+{
+	struct zcrypt_card *zc;
+	int rc;
+
+	spin_lock(&zcrypt_list_lock);
+	zc = zq->queue->card->private;
+	zcrypt_card_get(zc);
+	zq->zcard = zc;
+	zq->online = 1;	/* New devices are online by default. */
+
+	ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x register online=1\n",
+		   AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
+
+	list_add_tail(&zq->list, &zc->zqueues);
+	zcrypt_device_count++;
+	spin_unlock(&zcrypt_list_lock);
+
+	rc = sysfs_create_group(&zq->queue->ap_dev.device.kobj,
+				&zcrypt_queue_attr_group);
+	if (rc)
+		goto out;
+	get_device(&zq->queue->ap_dev.device);
+
+	if (zq->ops->rng) {
+		rc = zcrypt_rng_device_add();
+		if (rc)
+			goto out_unregister;
+	}
+	return 0;
+
+out_unregister:
+	sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
+			   &zcrypt_queue_attr_group);
+	put_device(&zq->queue->ap_dev.device);
+out:
+	spin_lock(&zcrypt_list_lock);
+	list_del_init(&zq->list);
+	spin_unlock(&zcrypt_list_lock);
+	zcrypt_card_put(zc);
+	return rc;
+}
+EXPORT_SYMBOL(zcrypt_queue_register);
+
+/**
+ * zcrypt_queue_unregister(): Unregister a crypto queue device.
+ * @zq: Pointer to crypto queue device
+ *
+ * Unregister a crypto queue device.
+ */
+void zcrypt_queue_unregister(struct zcrypt_queue *zq)
+{
+	struct zcrypt_card *zc;
+
+	ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x unregister\n",
+		   AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
+
+	zc = zq->zcard;
+	spin_lock(&zcrypt_list_lock);
+	list_del_init(&zq->list);
+	zcrypt_device_count--;
+	spin_unlock(&zcrypt_list_lock);
+	zcrypt_card_put(zc);
+	if (zq->ops->rng)
+		zcrypt_rng_device_remove();
+	sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
+			   &zcrypt_queue_attr_group);
+	put_device(&zq->queue->ap_dev.device);
+	zcrypt_queue_put(zq);
+}
+EXPORT_SYMBOL(zcrypt_queue_unregister);
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
new file mode 100644
index 0000000..7c5a25d
--- /dev/null
+++ b/drivers/s390/net/Kconfig
@@ -0,0 +1,108 @@
+# SPDX-License-Identifier: GPL-2.0
+menu "S/390 network device drivers"
+	depends on NETDEVICES && S390
+
+config LCS
+	def_tristate m
+	prompt "Lan Channel Station Interface"
+	depends on CCW && NETDEVICES && (ETHERNET || FDDI)
+	help
+	   Select this option if you want to use LCS networking on IBM System z.
+	   This device driver supports FDDI (IEEE 802.7) and Ethernet.
+	   To compile as a module, choose M. The module name is lcs.
+	   If you do not know what it is, it's safe to choose Y.
+
+config CTCM
+	def_tristate m
+	prompt "CTC and MPC SNA device support"
+	depends on CCW && NETDEVICES
+	help
+	  Select this option if you want to use channel-to-channel
+	  point-to-point networking on IBM System z.
+	  This device driver supports real CTC coupling using ESCON.
+	  It also supports virtual CTCs when running under VM.
+	  This driver also supports channel-to-channel MPC SNA devices.
+	  MPC is an SNA protocol device used by Communication Server for Linux.
+	  To compile as a module, choose M. The module name is ctcm.
+	  To compile into the kernel, choose Y.
+	  If you do not need any channel-to-channel connection, choose N.
+
+config NETIUCV
+	def_tristate m
+	prompt "IUCV network device support (VM only)"
+	depends on IUCV && NETDEVICES
+	help
+	  Select this option if you want to use inter-user communication
+	  vehicle networking under VM or VIF. It enables a fast communication
+	  link between VM guests. Using ifconfig a point-to-point connection
+	  can be established to the Linux on IBM System z
+	  running on the other VM guest. To compile as a module, choose M.
+	  The module name is netiucv. If unsure, choose Y.
+
+config SMSGIUCV
+	def_tristate m
+	prompt "IUCV special message support (VM only)"
+	depends on IUCV
+	help
+	  Select this option if you want to be able to receive SMSG messages
+	  from other VM guest systems.
+
+config SMSGIUCV_EVENT
+	def_tristate m
+	prompt "Deliver IUCV special messages as uevents (VM only)"
+	depends on SMSGIUCV
+	help
+	  Select this option to deliver CP special messages (SMSGs) as
+	  uevents.  The driver handles only those special messages that
+	  start with "APP".
+
+	  To compile as a module, choose M. The module name is "smsgiucv_app".
+
+config QETH
+	def_tristate y
+	prompt "Gigabit Ethernet device support"
+	depends on CCW && NETDEVICES && IP_MULTICAST && QDIO && ETHERNET
+	help
+	  This driver supports the IBM System z OSA Express adapters
+	  in QDIO mode (all media types), HiperSockets interfaces and z/VM
+	  virtual NICs for Guest LAN and VSWITCH.
+	
+	  For details please refer to the documentation provided by IBM at
+	  <http://www.ibm.com/developerworks/linux/linux390>
+
+	  To compile this driver as a module, choose M.
+	  The module name is qeth.
+
+config QETH_L2
+	def_tristate y
+	prompt "qeth layer 2 device support"
+	depends on QETH
+	help
+	  Select this option to be able to run qeth devices in layer 2 mode.
+	  To compile as a module, choose M. The module name is qeth_l2.
+	  If unsure, choose y.
+
+config QETH_L3
+	def_tristate y
+	prompt "qeth layer 3 device support"
+	depends on QETH
+	help
+	  Select this option to be able to run qeth devices in layer 3 mode.
+	  To compile as a module choose M. The module name is qeth_l3.
+	  If unsure, choose Y.
+
+config CCWGROUP
+	tristate
+	default (LCS || CTCM || QETH)
+
+config ISM
+	tristate "Support for ISM vPCI Adapter"
+	depends on PCI && SMC
+	default n
+	help
+	  Select this option if you want to use the Internal Shared Memory
+	  vPCI Adapter.
+
+	  To compile as a module choose M. The module name is ism.
+	  If unsure, choose N.
+endmenu
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
new file mode 100644
index 0000000..f2d6bbe
--- /dev/null
+++ b/drivers/s390/net/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# S/390 network devices
+#
+
+ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o
+obj-$(CONFIG_CTCM) += ctcm.o fsm.o
+obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
+obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
+obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o
+obj-$(CONFIG_LCS) += lcs.o
+qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
+obj-$(CONFIG_QETH) += qeth.o
+qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o
+obj-$(CONFIG_QETH_L2) += qeth_l2.o
+qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o
+obj-$(CONFIG_QETH_L3) += qeth_l3.o
+
+ism-y := ism_drv.o
+obj-$(CONFIG_ISM) += ism.o
diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c
new file mode 100644
index 0000000..f7ec51d
--- /dev/null
+++ b/drivers/s390/net/ctcm_dbug.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *	Copyright IBM Corp. 2001, 2007
+ *	Authors:	Peter Tiedemann (ptiedem@de.ibm.com)
+ *
+ */
+
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/ctype.h>
+#include <linux/sysctl.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include "ctcm_dbug.h"
+
+/*
+ * Debug Facility Stuff
+ */
+
+struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS] = {
+	[CTCM_DBF_SETUP]     = {"ctc_setup", 8, 1, 64, CTC_DBF_INFO, NULL},
+	[CTCM_DBF_ERROR]     = {"ctc_error", 8, 1, 64, CTC_DBF_ERROR, NULL},
+	[CTCM_DBF_TRACE]     = {"ctc_trace", 8, 1, 64, CTC_DBF_ERROR, NULL},
+	[CTCM_DBF_MPC_SETUP] = {"mpc_setup", 8, 1, 80, CTC_DBF_INFO, NULL},
+	[CTCM_DBF_MPC_ERROR] = {"mpc_error", 8, 1, 80, CTC_DBF_ERROR, NULL},
+	[CTCM_DBF_MPC_TRACE] = {"mpc_trace", 8, 1, 80, CTC_DBF_ERROR, NULL},
+};
+
+void ctcm_unregister_dbf_views(void)
+{
+	int x;
+	for (x = 0; x < CTCM_DBF_INFOS; x++) {
+		debug_unregister(ctcm_dbf[x].id);
+		ctcm_dbf[x].id = NULL;
+	}
+}
+
+int ctcm_register_dbf_views(void)
+{
+	int x;
+	for (x = 0; x < CTCM_DBF_INFOS; x++) {
+		/* register the areas */
+		ctcm_dbf[x].id = debug_register(ctcm_dbf[x].name,
+						ctcm_dbf[x].pages,
+						ctcm_dbf[x].areas,
+						ctcm_dbf[x].len);
+		if (ctcm_dbf[x].id == NULL) {
+			ctcm_unregister_dbf_views();
+			return -ENOMEM;
+		}
+
+		/* register a view */
+		debug_register_view(ctcm_dbf[x].id, &debug_hex_ascii_view);
+		/* set a passing level */
+		debug_set_level(ctcm_dbf[x].id, ctcm_dbf[x].level);
+	}
+
+	return 0;
+}
+
+void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...)
+{
+	char dbf_txt_buf[64];
+	va_list args;
+
+	if (!debug_level_enabled(ctcm_dbf[dbf_nix].id, level))
+		return;
+	va_start(args, fmt);
+	vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
+	va_end(args);
+
+	debug_text_event(ctcm_dbf[dbf_nix].id, level, dbf_txt_buf);
+}
+
diff --git a/drivers/s390/net/ctcm_dbug.h b/drivers/s390/net/ctcm_dbug.h
new file mode 100644
index 0000000..675575e
--- /dev/null
+++ b/drivers/s390/net/ctcm_dbug.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *	Copyright IBM Corp. 2001, 2007
+ *	Authors:	Peter Tiedemann (ptiedem@de.ibm.com)
+ *
+ */
+
+#ifndef _CTCM_DBUG_H_
+#define _CTCM_DBUG_H_
+
+/*
+ * Debug Facility stuff
+ */
+
+#include <asm/debug.h>
+
+#ifdef DEBUG
+ #define do_debug 1
+#else
+ #define do_debug 0
+#endif
+#ifdef DEBUGCCW
+ #define do_debug_ccw 1
+ #define DEBUGDATA 1
+#else
+ #define do_debug_ccw 0
+#endif
+#ifdef DEBUGDATA
+ #define do_debug_data 1
+#else
+ #define do_debug_data 0
+#endif
+
+/* define dbf debug levels similar to kernel msg levels */
+#define	CTC_DBF_ALWAYS	0	/* always print this 			*/
+#define	CTC_DBF_EMERG	0	/* system is unusable			*/
+#define	CTC_DBF_ALERT	1	/* action must be taken immediately	*/
+#define	CTC_DBF_CRIT	2	/* critical conditions			*/
+#define	CTC_DBF_ERROR	3	/* error conditions			*/
+#define	CTC_DBF_WARN	4	/* warning conditions			*/
+#define	CTC_DBF_NOTICE	5	/* normal but significant condition	*/
+#define	CTC_DBF_INFO	5	/* informational			*/
+#define	CTC_DBF_DEBUG	6	/* debug-level messages			*/
+
+enum ctcm_dbf_names {
+	CTCM_DBF_SETUP,
+	CTCM_DBF_ERROR,
+	CTCM_DBF_TRACE,
+	CTCM_DBF_MPC_SETUP,
+	CTCM_DBF_MPC_ERROR,
+	CTCM_DBF_MPC_TRACE,
+	CTCM_DBF_INFOS	/* must be last element */
+};
+
+struct ctcm_dbf_info {
+	char name[DEBUG_MAX_NAME_LEN];
+	int pages;
+	int areas;
+	int len;
+	int level;
+	debug_info_t *id;
+};
+
+extern struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS];
+
+int ctcm_register_dbf_views(void);
+void ctcm_unregister_dbf_views(void);
+void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *text, ...);
+
+static inline const char *strtail(const char *s, int n)
+{
+	int l = strlen(s);
+	return (l > n) ? s + (l - n) : s;
+}
+
+#define CTCM_FUNTAIL strtail((char *)__func__, 16)
+
+#define CTCM_DBF_TEXT(name, level, text) \
+	do { \
+		debug_text_event(ctcm_dbf[CTCM_DBF_##name].id, level, text); \
+	} while (0)
+
+#define CTCM_DBF_HEX(name, level, addr, len) \
+	do { \
+		debug_event(ctcm_dbf[CTCM_DBF_##name].id, \
+					level, (void *)(addr), len); \
+	} while (0)
+
+#define CTCM_DBF_TEXT_(name, level, text...) \
+	ctcm_dbf_longtext(CTCM_DBF_##name, level, text)
+
+/*
+ * cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}.
+ * dev : netdevice with valid name field.
+ * text: any text string.
+ */
+#define CTCM_DBF_DEV_NAME(cat, dev, text) \
+	do { \
+		CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%s) :- %s", \
+			CTCM_FUNTAIL, dev->name, text); \
+	} while (0)
+
+#define MPC_DBF_DEV_NAME(cat, dev, text) \
+	do { \
+		CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%s) := %s", \
+			CTCM_FUNTAIL, dev->name, text); \
+	} while (0)
+
+#define CTCMY_DBF_DEV_NAME(cat, dev, text) \
+	do { \
+		if (IS_MPCDEV(dev)) \
+			MPC_DBF_DEV_NAME(cat, dev, text); \
+		else \
+			CTCM_DBF_DEV_NAME(cat, dev, text); \
+	} while (0)
+
+/*
+ * cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}.
+ * dev : netdevice.
+ * text: any text string.
+ */
+#define CTCM_DBF_DEV(cat, dev, text) \
+	do { \
+		CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%p) :-: %s", \
+			CTCM_FUNTAIL, dev, text); \
+	} while (0)
+
+#define MPC_DBF_DEV(cat, dev, text) \
+	do { \
+		CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%p) :=: %s", \
+			CTCM_FUNTAIL, dev, text); \
+	} while (0)
+
+#define CTCMY_DBF_DEV(cat, dev, text) \
+	do { \
+		if (IS_MPCDEV(dev)) \
+			MPC_DBF_DEV(cat, dev, text); \
+		else \
+			CTCM_DBF_DEV(cat, dev, text); \
+	} while (0)
+
+#endif
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
new file mode 100644
index 0000000..1b4ee57
--- /dev/null
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -0,0 +1,2300 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2001, 2007
+ * Authors:	Fritz Elfert (felfert@millenux.com)
+ * 		Peter Tiedemann (ptiedem@de.ibm.com)
+ *	MPC additions :
+ *		Belinda Thompson (belindat@us.ibm.com)
+ *		Andy Richter (richtera@us.ibm.com)
+ */
+
+#undef DEBUG
+#undef DEBUGDATA
+#undef DEBUGCCW
+
+#define KMSG_COMPONENT "ctcm"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/dst.h>
+
+#include <linux/io.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <linux/uaccess.h>
+
+#include <asm/idals.h>
+
+#include "fsm.h"
+
+#include "ctcm_dbug.h"
+#include "ctcm_main.h"
+#include "ctcm_fsms.h"
+
+const char *dev_state_names[] = {
+	[DEV_STATE_STOPPED]		= "Stopped",
+	[DEV_STATE_STARTWAIT_RXTX]	= "StartWait RXTX",
+	[DEV_STATE_STARTWAIT_RX]	= "StartWait RX",
+	[DEV_STATE_STARTWAIT_TX]	= "StartWait TX",
+	[DEV_STATE_STOPWAIT_RXTX]	= "StopWait RXTX",
+	[DEV_STATE_STOPWAIT_RX]		= "StopWait RX",
+	[DEV_STATE_STOPWAIT_TX]		= "StopWait TX",
+	[DEV_STATE_RUNNING]		= "Running",
+};
+
+const char *dev_event_names[] = {
+	[DEV_EVENT_START]	= "Start",
+	[DEV_EVENT_STOP]	= "Stop",
+	[DEV_EVENT_RXUP]	= "RX up",
+	[DEV_EVENT_TXUP]	= "TX up",
+	[DEV_EVENT_RXDOWN]	= "RX down",
+	[DEV_EVENT_TXDOWN]	= "TX down",
+	[DEV_EVENT_RESTART]	= "Restart",
+};
+
+const char *ctc_ch_event_names[] = {
+	[CTC_EVENT_IO_SUCCESS]	= "ccw_device success",
+	[CTC_EVENT_IO_EBUSY]	= "ccw_device busy",
+	[CTC_EVENT_IO_ENODEV]	= "ccw_device enodev",
+	[CTC_EVENT_IO_UNKNOWN]	= "ccw_device unknown",
+	[CTC_EVENT_ATTNBUSY]	= "Status ATTN & BUSY",
+	[CTC_EVENT_ATTN]	= "Status ATTN",
+	[CTC_EVENT_BUSY]	= "Status BUSY",
+	[CTC_EVENT_UC_RCRESET]	= "Unit check remote reset",
+	[CTC_EVENT_UC_RSRESET]	= "Unit check remote system reset",
+	[CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout",
+	[CTC_EVENT_UC_TXPARITY]	= "Unit check TX parity",
+	[CTC_EVENT_UC_HWFAIL]	= "Unit check Hardware failure",
+	[CTC_EVENT_UC_RXPARITY]	= "Unit check RX parity",
+	[CTC_EVENT_UC_ZERO]	= "Unit check ZERO",
+	[CTC_EVENT_UC_UNKNOWN]	= "Unit check Unknown",
+	[CTC_EVENT_SC_UNKNOWN]	= "SubChannel check Unknown",
+	[CTC_EVENT_MC_FAIL]	= "Machine check failure",
+	[CTC_EVENT_MC_GOOD]	= "Machine check operational",
+	[CTC_EVENT_IRQ]		= "IRQ normal",
+	[CTC_EVENT_FINSTAT]	= "IRQ final",
+	[CTC_EVENT_TIMER]	= "Timer",
+	[CTC_EVENT_START]	= "Start",
+	[CTC_EVENT_STOP]	= "Stop",
+	/*
+	* additional MPC events
+	*/
+	[CTC_EVENT_SEND_XID]	= "XID Exchange",
+	[CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer",
+};
+
+const char *ctc_ch_state_names[] = {
+	[CTC_STATE_IDLE]	= "Idle",
+	[CTC_STATE_STOPPED]	= "Stopped",
+	[CTC_STATE_STARTWAIT]	= "StartWait",
+	[CTC_STATE_STARTRETRY]	= "StartRetry",
+	[CTC_STATE_SETUPWAIT]	= "SetupWait",
+	[CTC_STATE_RXINIT]	= "RX init",
+	[CTC_STATE_TXINIT]	= "TX init",
+	[CTC_STATE_RX]		= "RX",
+	[CTC_STATE_TX]		= "TX",
+	[CTC_STATE_RXIDLE]	= "RX idle",
+	[CTC_STATE_TXIDLE]	= "TX idle",
+	[CTC_STATE_RXERR]	= "RX error",
+	[CTC_STATE_TXERR]	= "TX error",
+	[CTC_STATE_TERM]	= "Terminating",
+	[CTC_STATE_DTERM]	= "Restarting",
+	[CTC_STATE_NOTOP]	= "Not operational",
+	/*
+	* additional MPC states
+	*/
+	[CH_XID0_PENDING]	= "Pending XID0 Start",
+	[CH_XID0_INPROGRESS]	= "In XID0 Negotiations ",
+	[CH_XID7_PENDING]	= "Pending XID7 P1 Start",
+	[CH_XID7_PENDING1]	= "Active XID7 P1 Exchange ",
+	[CH_XID7_PENDING2]	= "Pending XID7 P2 Start ",
+	[CH_XID7_PENDING3]	= "Active XID7 P2 Exchange ",
+	[CH_XID7_PENDING4]	= "XID7 Complete - Pending READY ",
+};
+
+static void ctcm_action_nop(fsm_instance *fi, int event, void *arg);
+
+/*
+ * ----- static ctcm actions for channel statemachine -----
+ *
+*/
+static void chx_txdone(fsm_instance *fi, int event, void *arg);
+static void chx_rx(fsm_instance *fi, int event, void *arg);
+static void chx_rxidle(fsm_instance *fi, int event, void *arg);
+static void chx_firstio(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
+
+/*
+ * ----- static ctcmpc actions for ctcmpc channel statemachine -----
+ *
+*/
+static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg);
+static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg);
+static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg);
+/* shared :
+static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
+*/
+static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg);
+static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *);
+static void ctcmpc_chx_resend(fsm_instance *, int, void *);
+static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
+
+/**
+ * Check return code of a preceding ccw_device call, halt_IO etc...
+ *
+ * ch	:	The channel, the error belongs to.
+ * Returns the error code (!= 0) to inspect.
+ */
+void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
+{
+	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+		"%s(%s): %s: %04x\n",
+		CTCM_FUNTAIL, ch->id, msg, rc);
+	switch (rc) {
+	case -EBUSY:
+		pr_info("%s: The communication peer is busy\n",
+			ch->id);
+		fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch);
+		break;
+	case -ENODEV:
+		pr_err("%s: The specified target device is not valid\n",
+		       ch->id);
+		fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch);
+		break;
+	default:
+		pr_err("An I/O operation resulted in error %04x\n",
+		       rc);
+		fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch);
+	}
+}
+
+void ctcm_purge_skb_queue(struct sk_buff_head *q)
+{
+	struct sk_buff *skb;
+
+	CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__);
+
+	while ((skb = skb_dequeue(q))) {
+		refcount_dec(&skb->users);
+		dev_kfree_skb_any(skb);
+	}
+}
+
+/**
+ * NOP action for statemachines
+ */
+static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
+{
+}
+
+/*
+ * Actions for channel - statemachines.
+ */
+
+/**
+ * Normal data has been send. Free the corresponding
+ * skb (it's in io_queue), reset dev->tbusy and
+ * revert to idle state.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void chx_txdone(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct sk_buff *skb;
+	int first = 1;
+	int i;
+	unsigned long duration;
+	unsigned long done_stamp = jiffies;
+
+	CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
+
+	duration = done_stamp - ch->prof.send_stamp;
+	if (duration > ch->prof.tx_time)
+		ch->prof.tx_time = duration;
+
+	if (ch->irb->scsw.cmd.count != 0)
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+			"%s(%s): TX not complete, remaining %d bytes",
+			     CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
+	fsm_deltimer(&ch->timer);
+	while ((skb = skb_dequeue(&ch->io_queue))) {
+		priv->stats.tx_packets++;
+		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
+		if (first) {
+			priv->stats.tx_bytes += 2;
+			first = 0;
+		}
+		refcount_dec(&skb->users);
+		dev_kfree_skb_irq(skb);
+	}
+	spin_lock(&ch->collect_lock);
+	clear_normalized_cda(&ch->ccw[4]);
+	if (ch->collect_len > 0) {
+		int rc;
+
+		if (ctcm_checkalloc_buffer(ch)) {
+			spin_unlock(&ch->collect_lock);
+			return;
+		}
+		ch->trans_skb->data = ch->trans_skb_data;
+		skb_reset_tail_pointer(ch->trans_skb);
+		ch->trans_skb->len = 0;
+		if (ch->prof.maxmulti < (ch->collect_len + 2))
+			ch->prof.maxmulti = ch->collect_len + 2;
+		if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
+			ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
+		*((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
+		i = 0;
+		while ((skb = skb_dequeue(&ch->collect_queue))) {
+			skb_copy_from_linear_data(skb,
+				skb_put(ch->trans_skb, skb->len), skb->len);
+			priv->stats.tx_packets++;
+			priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
+			refcount_dec(&skb->users);
+			dev_kfree_skb_irq(skb);
+			i++;
+		}
+		ch->collect_len = 0;
+		spin_unlock(&ch->collect_lock);
+		ch->ccw[1].count = ch->trans_skb->len;
+		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+		ch->prof.send_stamp = jiffies;
+		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+						(unsigned long)ch, 0xff, 0);
+		ch->prof.doios_multi++;
+		if (rc != 0) {
+			priv->stats.tx_dropped += i;
+			priv->stats.tx_errors += i;
+			fsm_deltimer(&ch->timer);
+			ctcm_ccw_check_rc(ch, rc, "chained TX");
+		}
+	} else {
+		spin_unlock(&ch->collect_lock);
+		fsm_newstate(fi, CTC_STATE_TXIDLE);
+	}
+	ctcm_clear_busy_do(dev);
+}
+
+/**
+ * Initial data is sent.
+ * Notify device statemachine that we are up and
+ * running.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
+
+	fsm_deltimer(&ch->timer);
+	fsm_newstate(fi, CTC_STATE_TXIDLE);
+	fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
+}
+
+/**
+ * Got normal data, check for sanity, queue it up, allocate new buffer
+ * trigger bottom half, and initiate next read.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void chx_rx(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
+	struct sk_buff *skb = ch->trans_skb;
+	__u16 block_len = *((__u16 *)skb->data);
+	int check_len;
+	int rc;
+
+	fsm_deltimer(&ch->timer);
+	if (len < 8) {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+			"%s(%s): got packet with length %d < 8\n",
+					CTCM_FUNTAIL, dev->name, len);
+		priv->stats.rx_dropped++;
+		priv->stats.rx_length_errors++;
+						goto again;
+	}
+	if (len > ch->max_bufsize) {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+			"%s(%s): got packet with length %d > %d\n",
+				CTCM_FUNTAIL, dev->name, len, ch->max_bufsize);
+		priv->stats.rx_dropped++;
+		priv->stats.rx_length_errors++;
+						goto again;
+	}
+
+	/*
+	 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
+	 */
+	switch (ch->protocol) {
+	case CTCM_PROTO_S390:
+	case CTCM_PROTO_OS390:
+		check_len = block_len + 2;
+		break;
+	default:
+		check_len = block_len;
+		break;
+	}
+	if ((len < block_len) || (len > check_len)) {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+			"%s(%s): got block length %d != rx length %d\n",
+				CTCM_FUNTAIL, dev->name, block_len, len);
+		if (do_debug)
+			ctcmpc_dump_skb(skb, 0);
+
+		*((__u16 *)skb->data) = len;
+		priv->stats.rx_dropped++;
+		priv->stats.rx_length_errors++;
+						goto again;
+	}
+	if (block_len > 2) {
+		*((__u16 *)skb->data) = block_len - 2;
+		ctcm_unpack_skb(ch, skb);
+	}
+ again:
+	skb->data = ch->trans_skb_data;
+	skb_reset_tail_pointer(skb);
+	skb->len = 0;
+	if (ctcm_checkalloc_buffer(ch))
+		return;
+	ch->ccw[1].count = ch->max_bufsize;
+	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+					(unsigned long)ch, 0xff, 0);
+	if (rc != 0)
+		ctcm_ccw_check_rc(ch, rc, "normal RX");
+}
+
+/**
+ * Initialize connection by sending a __u16 of value 0.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void chx_firstio(fsm_instance *fi, int event, void *arg)
+{
+	int rc;
+	struct channel *ch = arg;
+	int fsmstate = fsm_getstate(fi);
+
+	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+		"%s(%s) : %02x",
+		CTCM_FUNTAIL, ch->id, fsmstate);
+
+	ch->sense_rc = 0;	/* reset unit check report control */
+	if (fsmstate == CTC_STATE_TXIDLE)
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+			"%s(%s): remote side issued READ?, init.\n",
+				CTCM_FUNTAIL, ch->id);
+	fsm_deltimer(&ch->timer);
+	if (ctcm_checkalloc_buffer(ch))
+		return;
+	if ((fsmstate == CTC_STATE_SETUPWAIT) &&
+	    (ch->protocol == CTCM_PROTO_OS390)) {
+		/* OS/390 resp. z/OS */
+		if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
+			*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
+			fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
+				     CTC_EVENT_TIMER, ch);
+			chx_rxidle(fi, event, arg);
+		} else {
+			struct net_device *dev = ch->netdev;
+			struct ctcm_priv *priv = dev->ml_priv;
+			fsm_newstate(fi, CTC_STATE_TXIDLE);
+			fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
+		}
+		return;
+	}
+	/*
+	 * Don't setup a timer for receiving the initial RX frame
+	 * if in compatibility mode, since VM TCP delays the initial
+	 * frame until it has some data to send.
+	 */
+	if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
+	    (ch->protocol != CTCM_PROTO_S390))
+		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+
+	*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
+	ch->ccw[1].count = 2;	/* Transfer only length */
+
+	fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
+		     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
+	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+					(unsigned long)ch, 0xff, 0);
+	if (rc != 0) {
+		fsm_deltimer(&ch->timer);
+		fsm_newstate(fi, CTC_STATE_SETUPWAIT);
+		ctcm_ccw_check_rc(ch, rc, "init IO");
+	}
+	/*
+	 * If in compatibility mode since we don't setup a timer, we
+	 * also signal RX channel up immediately. This enables us
+	 * to send packets early which in turn usually triggers some
+	 * reply from VM TCP which brings up the RX channel to it's
+	 * final state.
+	 */
+	if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
+	    (ch->protocol == CTCM_PROTO_S390)) {
+		struct net_device *dev = ch->netdev;
+		struct ctcm_priv *priv = dev->ml_priv;
+		fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
+	}
+}
+
+/**
+ * Got initial data, check it. If OK,
+ * notify device statemachine that we are up and
+ * running.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void chx_rxidle(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	__u16 buflen;
+	int rc;
+
+	fsm_deltimer(&ch->timer);
+	buflen = *((__u16 *)ch->trans_skb->data);
+	CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n",
+			__func__, dev->name, buflen);
+
+	if (buflen >= CTCM_INITIAL_BLOCKLEN) {
+		if (ctcm_checkalloc_buffer(ch))
+			return;
+		ch->ccw[1].count = ch->max_bufsize;
+		fsm_newstate(fi, CTC_STATE_RXIDLE);
+		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+						(unsigned long)ch, 0xff, 0);
+		if (rc != 0) {
+			fsm_newstate(fi, CTC_STATE_RXINIT);
+			ctcm_ccw_check_rc(ch, rc, "initial RX");
+		} else
+			fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
+	} else {
+		CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n",
+				__func__, dev->name,
+					buflen, CTCM_INITIAL_BLOCKLEN);
+		chx_firstio(fi, event, arg);
+	}
+}
+
+/**
+ * Set channel into extended mode.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	int rc;
+	unsigned long saveflags = 0;
+	int timeout = CTCM_TIME_5_SEC;
+
+	fsm_deltimer(&ch->timer);
+	if (IS_MPC(ch)) {
+		timeout = 1500;
+		CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n",
+				__func__, smp_processor_id(), ch, ch->id);
+	}
+	fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch);
+	fsm_newstate(fi, CTC_STATE_SETUPWAIT);
+	CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
+
+	if (event == CTC_EVENT_TIMER)	/* only for timer not yet locked */
+		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+			/* Such conditional locking is undeterministic in
+			 * static view. => ignore sparse warnings here. */
+
+	rc = ccw_device_start(ch->cdev, &ch->ccw[6],
+					(unsigned long)ch, 0xff, 0);
+	if (event == CTC_EVENT_TIMER)	/* see above comments */
+		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+	if (rc != 0) {
+		fsm_deltimer(&ch->timer);
+		fsm_newstate(fi, CTC_STATE_STARTWAIT);
+		ctcm_ccw_check_rc(ch, rc, "set Mode");
+	} else
+		ch->retry = 0;
+}
+
+/**
+ * Setup channel.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch	= arg;
+	unsigned long saveflags;
+	int rc;
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
+		CTCM_FUNTAIL, ch->id,
+		(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
+
+	if (ch->trans_skb != NULL) {
+		clear_normalized_cda(&ch->ccw[1]);
+		dev_kfree_skb(ch->trans_skb);
+		ch->trans_skb = NULL;
+	}
+	if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
+		ch->ccw[1].cmd_code = CCW_CMD_READ;
+		ch->ccw[1].flags = CCW_FLAG_SLI;
+		ch->ccw[1].count = 0;
+	} else {
+		ch->ccw[1].cmd_code = CCW_CMD_WRITE;
+		ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[1].count = 0;
+	}
+	if (ctcm_checkalloc_buffer(ch)) {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+			"%s(%s): %s trans_skb alloc delayed "
+			"until first transfer",
+			CTCM_FUNTAIL, ch->id,
+			(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+				"RX" : "TX");
+	}
+	ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
+	ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+	ch->ccw[0].count = 0;
+	ch->ccw[0].cda = 0;
+	ch->ccw[2].cmd_code = CCW_CMD_NOOP;	/* jointed CE + DE */
+	ch->ccw[2].flags = CCW_FLAG_SLI;
+	ch->ccw[2].count = 0;
+	ch->ccw[2].cda = 0;
+	memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3);
+	ch->ccw[4].cda = 0;
+	ch->ccw[4].flags &= ~CCW_FLAG_IDA;
+
+	fsm_newstate(fi, CTC_STATE_STARTWAIT);
+	fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
+	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
+	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+	if (rc != 0) {
+		if (rc != -EBUSY)
+			fsm_deltimer(&ch->timer);
+		ctcm_ccw_check_rc(ch, rc, "initial HaltIO");
+	}
+}
+
+/**
+ * Shutdown a channel.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	unsigned long saveflags = 0;
+	int rc;
+	int oldstate;
+
+	fsm_deltimer(&ch->timer);
+	if (IS_MPC(ch))
+		fsm_deltimer(&ch->sweep_timer);
+
+	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+
+	if (event == CTC_EVENT_STOP)	/* only for STOP not yet locked */
+		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+			/* Such conditional locking is undeterministic in
+			 * static view. => ignore sparse warnings here. */
+	oldstate = fsm_getstate(fi);
+	fsm_newstate(fi, CTC_STATE_TERM);
+	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
+
+	if (event == CTC_EVENT_STOP)
+		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+			/* see remark above about conditional locking */
+
+	if (rc != 0 && rc != -EBUSY) {
+		fsm_deltimer(&ch->timer);
+		if (event != CTC_EVENT_STOP) {
+			fsm_newstate(fi, oldstate);
+			ctcm_ccw_check_rc(ch, rc, (char *)__func__);
+		}
+	}
+}
+
+/**
+ * Cleanup helper for chx_fail and chx_stopped
+ * cleanup channels queue and notify interface statemachine.
+ *
+ * fi		An instance of a channel statemachine.
+ * state	The next state (depending on caller).
+ * ch		The channel to operate on.
+ */
+static void ctcm_chx_cleanup(fsm_instance *fi, int state,
+		struct channel *ch)
+{
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
+			"%s(%s): %s[%d]\n",
+			CTCM_FUNTAIL, dev->name, ch->id, state);
+
+	fsm_deltimer(&ch->timer);
+	if (IS_MPC(ch))
+		fsm_deltimer(&ch->sweep_timer);
+
+	fsm_newstate(fi, state);
+	if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) {
+		clear_normalized_cda(&ch->ccw[1]);
+		dev_kfree_skb_any(ch->trans_skb);
+		ch->trans_skb = NULL;
+	}
+
+	ch->th_seg = 0x00;
+	ch->th_seq_num = 0x00;
+	if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
+		skb_queue_purge(&ch->io_queue);
+		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+	} else {
+		ctcm_purge_skb_queue(&ch->io_queue);
+		if (IS_MPC(ch))
+			ctcm_purge_skb_queue(&ch->sweep_queue);
+		spin_lock(&ch->collect_lock);
+		ctcm_purge_skb_queue(&ch->collect_queue);
+		ch->collect_len = 0;
+		spin_unlock(&ch->collect_lock);
+		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+	}
+}
+
+/**
+ * A channel has successfully been halted.
+ * Cleanup it's queue and notify interface statemachine.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
+{
+	ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
+}
+
+/**
+ * A stop command from device statemachine arrived and we are in
+ * not operational mode. Set state to stopped.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
+{
+	fsm_newstate(fi, CTC_STATE_STOPPED);
+}
+
+/**
+ * A machine check for no path, not operational status or gone device has
+ * happened.
+ * Cleanup queue and notify interface statemachine.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
+{
+	ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
+}
+
+/**
+ * Handle error during setup of channel.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	/*
+	 * Special case: Got UC_RCRESET on setmode.
+	 * This means that remote side isn't setup. In this case
+	 * simply retry after some 10 secs...
+	 */
+	if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
+	    ((event == CTC_EVENT_UC_RCRESET) ||
+	     (event == CTC_EVENT_UC_RSRESET))) {
+		fsm_newstate(fi, CTC_STATE_STARTRETRY);
+		fsm_deltimer(&ch->timer);
+		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+		if (!IS_MPC(ch) &&
+		    (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
+			int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
+			if (rc != 0)
+				ctcm_ccw_check_rc(ch, rc,
+					"HaltIO in chx_setuperr");
+		}
+		return;
+	}
+
+	CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
+		"%s(%s) : %s error during %s channel setup state=%s\n",
+		CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
+		(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
+		fsm_getstate_str(fi));
+
+	if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
+		fsm_newstate(fi, CTC_STATE_RXERR);
+		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+	} else {
+		fsm_newstate(fi, CTC_STATE_TXERR);
+		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+	}
+}
+
+/**
+ * Restart a channel after an error.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	unsigned long saveflags = 0;
+	int oldstate;
+	int rc;
+
+	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+		"%s: %s[%d] of %s\n",
+			CTCM_FUNTAIL, ch->id, event, dev->name);
+
+	fsm_deltimer(&ch->timer);
+
+	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+	oldstate = fsm_getstate(fi);
+	fsm_newstate(fi, CTC_STATE_STARTWAIT);
+	if (event == CTC_EVENT_TIMER)	/* only for timer not yet locked */
+		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+			/* Such conditional locking is a known problem for
+			 * sparse because its undeterministic in static view.
+			 * Warnings should be ignored here. */
+	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
+	if (event == CTC_EVENT_TIMER)
+		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+	if (rc != 0) {
+		if (rc != -EBUSY) {
+		    fsm_deltimer(&ch->timer);
+		    fsm_newstate(fi, oldstate);
+		}
+		ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart");
+	}
+}
+
+/**
+ * Handle error during RX initial handshake (exchange of
+ * 0-length block header)
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	if (event == CTC_EVENT_TIMER) {
+		if (!IS_MPCDEV(dev))
+			/* TODO : check if MPC deletes timer somewhere */
+			fsm_deltimer(&ch->timer);
+		if (ch->retry++ < 3)
+			ctcm_chx_restart(fi, event, arg);
+		else {
+			fsm_newstate(fi, CTC_STATE_RXERR);
+			fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+		}
+	} else {
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+			"%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
+			ctc_ch_event_names[event], fsm_getstate_str(fi));
+
+		dev_warn(&dev->dev,
+			"Initialization failed with RX/TX init handshake "
+			"error %s\n", ctc_ch_event_names[event]);
+	}
+}
+
+/**
+ * Notify device statemachine if we gave up initialization
+ * of RX channel.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+			"%s(%s): RX %s busy, init. fail",
+				CTCM_FUNTAIL, dev->name, ch->id);
+	fsm_newstate(fi, CTC_STATE_RXERR);
+	fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+}
+
+/**
+ * Handle RX Unit check remote reset (remote disconnected)
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct channel *ch2;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+			"%s: %s: remote disconnect - re-init ...",
+				CTCM_FUNTAIL, dev->name);
+	fsm_deltimer(&ch->timer);
+	/*
+	 * Notify device statemachine
+	 */
+	fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+	fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+
+	fsm_newstate(fi, CTC_STATE_DTERM);
+	ch2 = priv->channel[CTCM_WRITE];
+	fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
+
+	ccw_device_halt(ch->cdev, (unsigned long)ch);
+	ccw_device_halt(ch2->cdev, (unsigned long)ch2);
+}
+
+/**
+ * Handle error during TX channel initialization.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	if (event == CTC_EVENT_TIMER) {
+		fsm_deltimer(&ch->timer);
+		if (ch->retry++ < 3)
+			ctcm_chx_restart(fi, event, arg);
+		else {
+			fsm_newstate(fi, CTC_STATE_TXERR);
+			fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+		}
+	} else {
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+			"%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
+			ctc_ch_event_names[event], fsm_getstate_str(fi));
+
+		dev_warn(&dev->dev,
+			"Initialization failed with RX/TX init handshake "
+			"error %s\n", ctc_ch_event_names[event]);
+	}
+}
+
+/**
+ * Handle TX timeout by retrying operation.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct sk_buff *skb;
+
+	CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n",
+			__func__, smp_processor_id(), ch, ch->id);
+
+	fsm_deltimer(&ch->timer);
+	if (ch->retry++ > 3) {
+		struct mpc_group *gptr = priv->mpcg;
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
+				"%s: %s: retries exceeded",
+					CTCM_FUNTAIL, ch->id);
+		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+		/* call restart if not MPC or if MPC and mpcg fsm is ready.
+			use gptr as mpc indicator */
+		if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY)))
+			ctcm_chx_restart(fi, event, arg);
+				goto done;
+	}
+
+	CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+			"%s : %s: retry %d",
+				CTCM_FUNTAIL, ch->id, ch->retry);
+	skb = skb_peek(&ch->io_queue);
+	if (skb) {
+		int rc = 0;
+		unsigned long saveflags = 0;
+		clear_normalized_cda(&ch->ccw[4]);
+		ch->ccw[4].count = skb->len;
+		if (set_normalized_cda(&ch->ccw[4], skb->data)) {
+			CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
+				"%s: %s: IDAL alloc failed",
+						CTCM_FUNTAIL, ch->id);
+			fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+			ctcm_chx_restart(fi, event, arg);
+				goto done;
+		}
+		fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
+		if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */
+			spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+			/* Such conditional locking is a known problem for
+			 * sparse because its undeterministic in static view.
+			 * Warnings should be ignored here. */
+		if (do_debug_ccw)
+			ctcmpc_dumpit((char *)&ch->ccw[3],
+					sizeof(struct ccw1) * 3);
+
+		rc = ccw_device_start(ch->cdev, &ch->ccw[3],
+						(unsigned long)ch, 0xff, 0);
+		if (event == CTC_EVENT_TIMER)
+			spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
+					saveflags);
+		if (rc != 0) {
+			fsm_deltimer(&ch->timer);
+			ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry");
+			ctcm_purge_skb_queue(&ch->io_queue);
+		}
+	}
+done:
+	return;
+}
+
+/**
+ * Handle fatal errors during an I/O command.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	int rd = CHANNEL_DIRECTION(ch->flags);
+
+	fsm_deltimer(&ch->timer);
+	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+		"%s: %s: %s unrecoverable channel error",
+			CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
+
+	if (IS_MPC(ch)) {
+		priv->stats.tx_dropped++;
+		priv->stats.tx_errors++;
+	}
+	if (rd == CTCM_READ) {
+		fsm_newstate(fi, CTC_STATE_RXERR);
+		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+	} else {
+		fsm_newstate(fi, CTC_STATE_TXERR);
+		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+	}
+}
+
+/*
+ * The ctcm statemachine for a channel.
+ */
+const fsm_node ch_fsm[] = {
+	{ CTC_STATE_STOPPED,	CTC_EVENT_STOP,		ctcm_action_nop  },
+	{ CTC_STATE_STOPPED,	CTC_EVENT_START,	ctcm_chx_start  },
+	{ CTC_STATE_STOPPED,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
+	{ CTC_STATE_STOPPED,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
+
+	{ CTC_STATE_NOTOP,	CTC_EVENT_STOP,		ctcm_chx_stop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_GOOD,	ctcm_chx_start  },
+
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setuperr  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_FINSTAT,	chx_firstio  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_RXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_FINSTAT,	chx_rxidle  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxiniterr  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_rxiniterr  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_TIMER,	ctcm_chx_rxiniterr  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_ATTNBUSY,	ctcm_chx_rxinitfail  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_ZERO,	chx_firstio  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_FINSTAT,	chx_rx  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxdisc  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_ZERO,	chx_rx  },
+
+	{ CTC_STATE_TXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_FINSTAT,	ctcm_chx_txidle  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_txiniterr  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_txiniterr  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_TIMER,	ctcm_chx_txiniterr  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_FINSTAT,	chx_firstio  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_TERM,	CTC_EVENT_STOP,		ctcm_action_nop  },
+	{ CTC_STATE_TERM,	CTC_EVENT_START,	ctcm_chx_restart  },
+	{ CTC_STATE_TERM,	CTC_EVENT_FINSTAT,	ctcm_chx_stopped  },
+	{ CTC_STATE_TERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
+	{ CTC_STATE_TERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
+	{ CTC_STATE_TERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_DTERM,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_START,	ctcm_chx_restart  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_TX,		CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TX,		CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_TX,		CTC_EVENT_FINSTAT,	chx_txdone  },
+	{ CTC_STATE_TX,		CTC_EVENT_UC_RCRESET,	ctcm_chx_txretry  },
+	{ CTC_STATE_TX,		CTC_EVENT_UC_RSRESET,	ctcm_chx_txretry  },
+	{ CTC_STATE_TX,		CTC_EVENT_TIMER,	ctcm_chx_txretry  },
+	{ CTC_STATE_TX,		CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_TX,		CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_RXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_RXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+};
+
+int ch_fsm_len = ARRAY_SIZE(ch_fsm);
+
+/*
+ * MPC actions for mpc channel statemachine
+ * handling of MPC protocol requires extra
+ * statemachine and actions which are prefixed ctcmpc_ .
+ * The ctc_ch_states and ctc_ch_state_names,
+ * ctc_ch_events and ctc_ch_event_names share the ctcm definitions
+ * which are expanded by some elements.
+ */
+
+/*
+ * Actions for mpc channel statemachine.
+ */
+
+/**
+ * Normal data has been send. Free the corresponding
+ * skb (it's in io_queue), reset dev->tbusy and
+ * revert to idle state.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
+{
+	struct channel		*ch = arg;
+	struct net_device	*dev = ch->netdev;
+	struct ctcm_priv	*priv = dev->ml_priv;
+	struct mpc_group	*grp = priv->mpcg;
+	struct sk_buff		*skb;
+	int		first = 1;
+	int		i;
+	__u32		data_space;
+	unsigned long	duration;
+	struct sk_buff	*peekskb;
+	int		rc;
+	struct th_header *header;
+	struct pdu	*p_header;
+	unsigned long done_stamp = jiffies;
+
+	CTCM_PR_DEBUG("Enter %s: %s cp:%i\n",
+			__func__, dev->name, smp_processor_id());
+
+	duration = done_stamp - ch->prof.send_stamp;
+	if (duration > ch->prof.tx_time)
+		ch->prof.tx_time = duration;
+
+	if (ch->irb->scsw.cmd.count != 0)
+		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
+			"%s(%s): TX not complete, remaining %d bytes",
+			     CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
+	fsm_deltimer(&ch->timer);
+	while ((skb = skb_dequeue(&ch->io_queue))) {
+		priv->stats.tx_packets++;
+		priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
+		if (first) {
+			priv->stats.tx_bytes += 2;
+			first = 0;
+		}
+		refcount_dec(&skb->users);
+		dev_kfree_skb_irq(skb);
+	}
+	spin_lock(&ch->collect_lock);
+	clear_normalized_cda(&ch->ccw[4]);
+	if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
+		spin_unlock(&ch->collect_lock);
+		fsm_newstate(fi, CTC_STATE_TXIDLE);
+				goto done;
+	}
+
+	if (ctcm_checkalloc_buffer(ch)) {
+		spin_unlock(&ch->collect_lock);
+				goto done;
+	}
+	ch->trans_skb->data = ch->trans_skb_data;
+	skb_reset_tail_pointer(ch->trans_skb);
+	ch->trans_skb->len = 0;
+	if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH))
+		ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH;
+	if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
+		ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
+	i = 0;
+	p_header = NULL;
+	data_space = grp->group_max_buflen - TH_HEADER_LENGTH;
+
+	CTCM_PR_DBGDATA("%s: building trans_skb from collect_q"
+		       " data_space:%04x\n",
+		       __func__, data_space);
+
+	while ((skb = skb_dequeue(&ch->collect_queue))) {
+		skb_put_data(ch->trans_skb, skb->data, skb->len);
+		p_header = (struct pdu *)
+			(skb_tail_pointer(ch->trans_skb) - skb->len);
+		p_header->pdu_flag = 0x00;
+		if (be16_to_cpu(skb->protocol) == ETH_P_SNAP)
+			p_header->pdu_flag |= 0x60;
+		else
+			p_header->pdu_flag |= 0x20;
+
+		CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
+				__func__, ch->trans_skb->len);
+		CTCM_PR_DBGDATA("%s: pdu header and data for up"
+				" to 32 bytes sent to vtam\n", __func__);
+		CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32));
+
+		ch->collect_len -= skb->len;
+		data_space -= skb->len;
+		priv->stats.tx_packets++;
+		priv->stats.tx_bytes += skb->len;
+		refcount_dec(&skb->users);
+		dev_kfree_skb_any(skb);
+		peekskb = skb_peek(&ch->collect_queue);
+		if (peekskb->len > data_space)
+			break;
+		i++;
+	}
+	/* p_header points to the last one we handled */
+	if (p_header)
+		p_header->pdu_flag |= PDU_LAST;	/*Say it's the last one*/
+	header = kzalloc(TH_HEADER_LENGTH, gfp_type());
+	if (!header) {
+		spin_unlock(&ch->collect_lock);
+		fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
+				goto done;
+	}
+	header->th_ch_flag = TH_HAS_PDU;  /* Normal data */
+	ch->th_seq_num++;
+	header->th_seq_num = ch->th_seq_num;
+
+	CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" ,
+					__func__, ch->th_seq_num);
+
+	memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header,
+		TH_HEADER_LENGTH);	/* put the TH on the packet */
+
+	kfree(header);
+
+	CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
+		       __func__, ch->trans_skb->len);
+	CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb "
+			"data to vtam from collect_q\n", __func__);
+	CTCM_D3_DUMP((char *)ch->trans_skb->data,
+				min_t(int, ch->trans_skb->len, 50));
+
+	spin_unlock(&ch->collect_lock);
+	clear_normalized_cda(&ch->ccw[1]);
+
+	CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
+			(void *)(unsigned long)ch->ccw[1].cda,
+			ch->trans_skb->data);
+	ch->ccw[1].count = ch->max_bufsize;
+
+	if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
+		dev_kfree_skb_any(ch->trans_skb);
+		ch->trans_skb = NULL;
+		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
+			"%s: %s: IDAL alloc failed",
+				CTCM_FUNTAIL, ch->id);
+		fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
+		return;
+	}
+
+	CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
+			(void *)(unsigned long)ch->ccw[1].cda,
+			ch->trans_skb->data);
+
+	ch->ccw[1].count = ch->trans_skb->len;
+	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+	ch->prof.send_stamp = jiffies;
+	if (do_debug_ccw)
+		ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
+	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+					(unsigned long)ch, 0xff, 0);
+	ch->prof.doios_multi++;
+	if (rc != 0) {
+		priv->stats.tx_dropped += i;
+		priv->stats.tx_errors += i;
+		fsm_deltimer(&ch->timer);
+		ctcm_ccw_check_rc(ch, rc, "chained TX");
+	}
+done:
+	ctcm_clear_busy(dev);
+	return;
+}
+
+/**
+ * Got normal data, check for sanity, queue it up, allocate new buffer
+ * trigger bottom half, and initiate next read.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
+{
+	struct channel		*ch = arg;
+	struct net_device	*dev = ch->netdev;
+	struct ctcm_priv	*priv = dev->ml_priv;
+	struct mpc_group	*grp = priv->mpcg;
+	struct sk_buff		*skb = ch->trans_skb;
+	struct sk_buff		*new_skb;
+	unsigned long		saveflags = 0;	/* avoids compiler warning */
+	int len	= ch->max_bufsize - ch->irb->scsw.cmd.count;
+
+	CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n",
+			CTCM_FUNTAIL, dev->name, smp_processor_id(),
+				ch->id, ch->max_bufsize, len);
+	fsm_deltimer(&ch->timer);
+
+	if (skb == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): TRANS_SKB = NULL",
+				CTCM_FUNTAIL, dev->name);
+			goto again;
+	}
+
+	if (len < TH_HEADER_LENGTH) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): packet length %d to short",
+					CTCM_FUNTAIL, dev->name, len);
+		priv->stats.rx_dropped++;
+		priv->stats.rx_length_errors++;
+	} else {
+		/* must have valid th header or game over */
+		__u32	block_len = len;
+		len = TH_HEADER_LENGTH + XID2_LENGTH + 4;
+		new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC);
+
+		if (new_skb == NULL) {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%d): skb allocation failed",
+						CTCM_FUNTAIL, dev->name);
+			fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
+					goto again;
+		}
+		switch (fsm_getstate(grp->fsm)) {
+		case MPCG_STATE_RESET:
+		case MPCG_STATE_INOP:
+			dev_kfree_skb_any(new_skb);
+			break;
+		case MPCG_STATE_FLOWC:
+		case MPCG_STATE_READY:
+			skb_put_data(new_skb, skb->data, block_len);
+			skb_queue_tail(&ch->io_queue, new_skb);
+			tasklet_schedule(&ch->ch_tasklet);
+			break;
+		default:
+			skb_put_data(new_skb, skb->data, len);
+			skb_queue_tail(&ch->io_queue, new_skb);
+			tasklet_hi_schedule(&ch->ch_tasklet);
+			break;
+		}
+	}
+
+again:
+	switch (fsm_getstate(grp->fsm)) {
+	int rc, dolock;
+	case MPCG_STATE_FLOWC:
+	case MPCG_STATE_READY:
+		if (ctcm_checkalloc_buffer(ch))
+			break;
+		ch->trans_skb->data = ch->trans_skb_data;
+		skb_reset_tail_pointer(ch->trans_skb);
+		ch->trans_skb->len = 0;
+		ch->ccw[1].count = ch->max_bufsize;
+			if (do_debug_ccw)
+			ctcmpc_dumpit((char *)&ch->ccw[0],
+					sizeof(struct ccw1) * 3);
+		dolock = !in_irq();
+		if (dolock)
+			spin_lock_irqsave(
+				get_ccwdev_lock(ch->cdev), saveflags);
+		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+						(unsigned long)ch, 0xff, 0);
+		if (dolock) /* see remark about conditional locking */
+			spin_unlock_irqrestore(
+				get_ccwdev_lock(ch->cdev), saveflags);
+		if (rc != 0)
+			ctcm_ccw_check_rc(ch, rc, "normal RX");
+	default:
+		break;
+	}
+
+	CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n",
+			__func__, dev->name, ch, ch->id);
+
+}
+
+/**
+ * Initialize connection by sending a __u16 of value 0.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
+{
+	struct channel		*ch = arg;
+	struct net_device	*dev = ch->netdev;
+	struct ctcm_priv	*priv = dev->ml_priv;
+	struct mpc_group	*gptr = priv->mpcg;
+
+	CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n",
+				__func__, ch->id, ch);
+
+	CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
+			"%s: %s: chstate:%i, grpstate:%i, prot:%i\n",
+			CTCM_FUNTAIL, ch->id, fsm_getstate(fi),
+			fsm_getstate(gptr->fsm), ch->protocol);
+
+	if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
+		MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? ");
+
+	fsm_deltimer(&ch->timer);
+	if (ctcm_checkalloc_buffer(ch))
+				goto done;
+
+	switch (fsm_getstate(fi)) {
+	case CTC_STATE_STARTRETRY:
+	case CTC_STATE_SETUPWAIT:
+		if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
+			ctcmpc_chx_rxidle(fi, event, arg);
+		} else {
+			fsm_newstate(fi, CTC_STATE_TXIDLE);
+			fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
+		}
+				goto done;
+	default:
+		break;
+	}
+
+	fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
+		     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
+
+done:
+	CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n",
+				__func__, ch->id, ch);
+	return;
+}
+
+/**
+ * Got initial data, check it. If OK,
+ * notify device statemachine that we are up and
+ * running.
+ *
+ * fi		An instance of a channel statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from channel * upon call.
+ */
+void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
+{
+	struct channel *ch = arg;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv  *priv = dev->ml_priv;
+	struct mpc_group  *grp = priv->mpcg;
+	int rc;
+	unsigned long saveflags = 0;	/* avoids compiler warning */
+
+	fsm_deltimer(&ch->timer);
+	CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n",
+			__func__, ch->id, dev->name, smp_processor_id(),
+				fsm_getstate(fi), fsm_getstate(grp->fsm));
+
+	fsm_newstate(fi, CTC_STATE_RXIDLE);
+	/* XID processing complete */
+
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_FLOWC:
+	case MPCG_STATE_READY:
+		if (ctcm_checkalloc_buffer(ch))
+				goto done;
+		ch->trans_skb->data = ch->trans_skb_data;
+		skb_reset_tail_pointer(ch->trans_skb);
+		ch->trans_skb->len = 0;
+		ch->ccw[1].count = ch->max_bufsize;
+		CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
+		if (event == CTC_EVENT_START)
+			/* see remark about conditional locking */
+			spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+						(unsigned long)ch, 0xff, 0);
+		if (event == CTC_EVENT_START)
+			spin_unlock_irqrestore(
+					get_ccwdev_lock(ch->cdev), saveflags);
+		if (rc != 0) {
+			fsm_newstate(fi, CTC_STATE_RXINIT);
+			ctcm_ccw_check_rc(ch, rc, "initial RX");
+				goto done;
+		}
+		break;
+	default:
+		break;
+	}
+
+	fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
+done:
+	return;
+}
+
+/*
+ * ctcmpc channel FSM action
+ * called from several points in ctcmpc_ch_fsm
+ * ctcmpc only
+ */
+static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
+{
+	struct channel	  *ch     = arg;
+	struct net_device *dev    = ch->netdev;
+	struct ctcm_priv  *priv   = dev->ml_priv;
+	struct mpc_group  *grp = priv->mpcg;
+
+	CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n",
+		__func__, dev->name, ch->id, ch, smp_processor_id(),
+			fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
+
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_XID2INITW:
+		/* ok..start yside xid exchanges */
+		if (!ch->in_mpcgroup)
+			break;
+		if (fsm_getstate(ch->fsm) ==  CH_XID0_PENDING) {
+			fsm_deltimer(&grp->timer);
+			fsm_addtimer(&grp->timer,
+				MPC_XID_TIMEOUT_VALUE,
+				MPCG_EVENT_TIMER, dev);
+			fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
+
+		} else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
+			/* attn rcvd before xid0 processed via bh */
+			fsm_newstate(ch->fsm, CH_XID7_PENDING1);
+		break;
+	case MPCG_STATE_XID2INITX:
+	case MPCG_STATE_XID0IOWAIT:
+	case MPCG_STATE_XID0IOWAIX:
+		/* attn rcvd before xid0 processed on ch
+		but mid-xid0 processing for group    */
+		if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
+			fsm_newstate(ch->fsm, CH_XID7_PENDING1);
+		break;
+	case MPCG_STATE_XID7INITW:
+	case MPCG_STATE_XID7INITX:
+	case MPCG_STATE_XID7INITI:
+	case MPCG_STATE_XID7INITZ:
+		switch (fsm_getstate(ch->fsm)) {
+		case CH_XID7_PENDING:
+			fsm_newstate(ch->fsm, CH_XID7_PENDING1);
+			break;
+		case CH_XID7_PENDING2:
+			fsm_newstate(ch->fsm, CH_XID7_PENDING3);
+			break;
+		}
+		fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
+		break;
+	}
+
+	return;
+}
+
+/*
+ * ctcmpc channel FSM action
+ * called from one point in ctcmpc_ch_fsm
+ * ctcmpc only
+ */
+static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
+{
+	struct channel	  *ch     = arg;
+	struct net_device *dev    = ch->netdev;
+	struct ctcm_priv  *priv   = dev->ml_priv;
+	struct mpc_group  *grp    = priv->mpcg;
+
+	CTCM_PR_DEBUG("%s(%s): %s\n  ChState:%s GrpState:%s\n",
+			__func__, dev->name, ch->id,
+			fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
+
+	fsm_deltimer(&ch->timer);
+
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_XID0IOWAIT:
+		/* vtam wants to be primary.start yside xid exchanges*/
+		/* only receive one attn-busy at a time so must not  */
+		/* change state each time			     */
+		grp->changed_side = 1;
+		fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
+		break;
+	case MPCG_STATE_XID2INITW:
+		if (grp->changed_side == 1) {
+			grp->changed_side = 2;
+			break;
+		}
+		/* process began via call to establish_conn	 */
+		/* so must report failure instead of reverting	 */
+		/* back to ready-for-xid passive state		 */
+		if (grp->estconnfunc)
+				goto done;
+		/* this attnbusy is NOT the result of xside xid  */
+		/* collisions so yside must have been triggered  */
+		/* by an ATTN that was not intended to start XID */
+		/* processing. Revert back to ready-for-xid and  */
+		/* wait for ATTN interrupt to signal xid start	 */
+		if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
+			fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
+			fsm_deltimer(&grp->timer);
+				goto done;
+		}
+		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+				goto done;
+	case MPCG_STATE_XID2INITX:
+		/* XID2 was received before ATTN Busy for second
+		   channel.Send yside xid for second channel.
+		*/
+		if (grp->changed_side == 1) {
+			grp->changed_side = 2;
+			break;
+		}
+	case MPCG_STATE_XID0IOWAIX:
+	case MPCG_STATE_XID7INITW:
+	case MPCG_STATE_XID7INITX:
+	case MPCG_STATE_XID7INITI:
+	case MPCG_STATE_XID7INITZ:
+	default:
+		/* multiple attn-busy indicates too out-of-sync      */
+		/* and they are certainly not being received as part */
+		/* of valid mpc group negotiations..		     */
+		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+				goto done;
+	}
+
+	if (grp->changed_side == 1) {
+		fsm_deltimer(&grp->timer);
+		fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
+			     MPCG_EVENT_TIMER, dev);
+	}
+	if (ch->in_mpcgroup)
+		fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
+	else
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): channel %s not added to group",
+				CTCM_FUNTAIL, dev->name, ch->id);
+
+done:
+	return;
+}
+
+/*
+ * ctcmpc channel FSM action
+ * called from several points in ctcmpc_ch_fsm
+ * ctcmpc only
+ */
+static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
+{
+	struct channel	   *ch	   = arg;
+	struct net_device  *dev    = ch->netdev;
+	struct ctcm_priv   *priv   = dev->ml_priv;
+	struct mpc_group   *grp    = priv->mpcg;
+
+	fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
+	return;
+}
+
+/*
+ * ctcmpc channel FSM action
+ * called from several points in ctcmpc_ch_fsm
+ * ctcmpc only
+ */
+static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
+{
+	struct channel *ach = arg;
+	struct net_device *dev = ach->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct mpc_group *grp = priv->mpcg;
+	struct channel *wch = priv->channel[CTCM_WRITE];
+	struct channel *rch = priv->channel[CTCM_READ];
+	struct sk_buff *skb;
+	struct th_sweep *header;
+	int rc = 0;
+	unsigned long saveflags = 0;
+
+	CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
+			__func__, smp_processor_id(), ach, ach->id);
+
+	if (grp->in_sweep == 0)
+				goto done;
+
+	CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" ,
+				__func__, wch->th_seq_num);
+	CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" ,
+				__func__, rch->th_seq_num);
+
+	if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) {
+		/* give the previous IO time to complete */
+		fsm_addtimer(&wch->sweep_timer,
+			200, CTC_EVENT_RSWEEP_TIMER, wch);
+				goto done;
+	}
+
+	skb = skb_dequeue(&wch->sweep_queue);
+	if (!skb)
+				goto done;
+
+	if (set_normalized_cda(&wch->ccw[4], skb->data)) {
+		grp->in_sweep = 0;
+		ctcm_clear_busy_do(dev);
+		dev_kfree_skb_any(skb);
+		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+				goto done;
+	} else {
+		refcount_inc(&skb->users);
+		skb_queue_tail(&wch->io_queue, skb);
+	}
+
+	/* send out the sweep */
+	wch->ccw[4].count = skb->len;
+
+	header = (struct th_sweep *)skb->data;
+	switch (header->th.th_ch_flag) {
+	case TH_SWEEP_REQ:
+		grp->sweep_req_pend_num--;
+		break;
+	case TH_SWEEP_RESP:
+		grp->sweep_rsp_pend_num--;
+		break;
+	}
+
+	header->sw.th_last_seq = wch->th_seq_num;
+
+	CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3);
+	CTCM_PR_DBGDATA("%s: sweep packet\n", __func__);
+	CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH);
+
+	fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch);
+	fsm_newstate(wch->fsm, CTC_STATE_TX);
+
+	spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags);
+	wch->prof.send_stamp = jiffies;
+	rc = ccw_device_start(wch->cdev, &wch->ccw[3],
+					(unsigned long) wch, 0xff, 0);
+	spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags);
+
+	if ((grp->sweep_req_pend_num == 0) &&
+	   (grp->sweep_rsp_pend_num == 0)) {
+		grp->in_sweep = 0;
+		rch->th_seq_num = 0x00;
+		wch->th_seq_num = 0x00;
+		ctcm_clear_busy_do(dev);
+	}
+
+	CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" ,
+			__func__, wch->th_seq_num, rch->th_seq_num);
+
+	if (rc != 0)
+		ctcm_ccw_check_rc(wch, rc, "send sweep");
+
+done:
+	return;
+}
+
+
+/*
+ * The ctcmpc statemachine for a channel.
+ */
+
+const fsm_node ctcmpc_ch_fsm[] = {
+	{ CTC_STATE_STOPPED,	CTC_EVENT_STOP,		ctcm_action_nop  },
+	{ CTC_STATE_STOPPED,	CTC_EVENT_START,	ctcm_chx_start  },
+	{ CTC_STATE_STOPPED,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_STOPPED,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
+	{ CTC_STATE_STOPPED,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
+
+	{ CTC_STATE_NOTOP,	CTC_EVENT_STOP,		ctcm_chx_stop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_GOOD,	ctcm_chx_start  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_UC_RCRESET,	ctcm_chx_stop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_UC_RSRESET,	ctcm_chx_stop  },
+	{ CTC_STATE_NOTOP,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setuperr  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_STARTWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_STARTRETRY,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_FINSTAT,	ctcmpc_chx_firstio  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CTC_STATE_RXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rxidle  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxiniterr  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_rxiniterr  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_TIMER,	ctcm_chx_rxiniterr  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_ATTNBUSY,	ctcm_chx_rxinitfail  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_firstio  },
+	{ CTC_STATE_RXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+
+	{ CH_XID0_PENDING,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
+	{ CH_XID0_PENDING,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
+	{ CH_XID0_PENDING,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CH_XID0_PENDING,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CH_XID0_PENDING,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CH_XID0_PENDING,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CH_XID0_PENDING,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
+	{ CH_XID0_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CH_XID0_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CH_XID0_PENDING,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
+
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_ATTNBUSY,	ctcmpc_chx_attnbusy  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
+	{ CH_XID0_INPROGRESS,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
+
+	{ CH_XID7_PENDING,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
+	{ CH_XID7_PENDING,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CH_XID7_PENDING,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CH_XID7_PENDING,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CH_XID7_PENDING,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
+	{ CH_XID7_PENDING,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
+
+	{ CH_XID7_PENDING1,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
+	{ CH_XID7_PENDING1,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
+
+	{ CH_XID7_PENDING2,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
+	{ CH_XID7_PENDING2,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
+
+	{ CH_XID7_PENDING3,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
+	{ CH_XID7_PENDING3,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
+
+	{ CH_XID7_PENDING4,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
+	{ CH_XID7_PENDING4,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
+
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxdisc  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_RSRESET,	ctcm_chx_fail  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
+
+	{ CTC_STATE_TXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_FINSTAT,	ctcm_chx_txidle  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_txiniterr  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_txiniterr  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_TIMER,	ctcm_chx_txiniterr  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_TXINIT,	CTC_EVENT_RSWEEP_TIMER,	ctcmpc_chx_send_sweep },
+
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_FINSTAT,	ctcmpc_chx_firstio  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_chx_fail  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RSRESET,	ctcm_chx_fail  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_TXIDLE,	CTC_EVENT_RSWEEP_TIMER,	ctcmpc_chx_send_sweep },
+
+	{ CTC_STATE_TERM,	CTC_EVENT_STOP,		ctcm_action_nop  },
+	{ CTC_STATE_TERM,	CTC_EVENT_START,	ctcm_chx_restart  },
+	{ CTC_STATE_TERM,	CTC_EVENT_FINSTAT,	ctcm_chx_stopped  },
+	{ CTC_STATE_TERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
+	{ CTC_STATE_TERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
+	{ CTC_STATE_TERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_TERM,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
+	{ CTC_STATE_TERM,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+
+	{ CTC_STATE_DTERM,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_START,	ctcm_chx_restart  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_DTERM,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+
+	{ CTC_STATE_TX,		CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TX,		CTC_EVENT_START,	ctcm_action_nop  },
+	{ CTC_STATE_TX,		CTC_EVENT_FINSTAT,	ctcmpc_chx_txdone  },
+	{ CTC_STATE_TX,		CTC_EVENT_UC_RCRESET,	ctcm_chx_fail  },
+	{ CTC_STATE_TX,		CTC_EVENT_UC_RSRESET,	ctcm_chx_fail  },
+	{ CTC_STATE_TX,		CTC_EVENT_TIMER,	ctcm_chx_txretry  },
+	{ CTC_STATE_TX,		CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_TX,		CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_TX,		CTC_EVENT_RSWEEP_TIMER,	ctcmpc_chx_send_sweep },
+	{ CTC_STATE_TX,		CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
+
+	{ CTC_STATE_RXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
+	{ CTC_STATE_TXERR,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
+	{ CTC_STATE_TXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+	{ CTC_STATE_RXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
+};
+
+int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
+
+/*
+ * Actions for interface - statemachine.
+ */
+
+/**
+ * Startup channels by sending CTC_EVENT_START to each channel.
+ *
+ * fi		An instance of an interface statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from struct net_device * upon call.
+ */
+static void dev_action_start(fsm_instance *fi, int event, void *arg)
+{
+	struct net_device *dev = arg;
+	struct ctcm_priv *priv = dev->ml_priv;
+	int direction;
+
+	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
+
+	fsm_deltimer(&priv->restart_timer);
+	fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
+	if (IS_MPC(priv))
+		priv->mpcg->channels_terminating = 0;
+	for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
+		struct channel *ch = priv->channel[direction];
+		fsm_event(ch->fsm, CTC_EVENT_START, ch);
+	}
+}
+
+/**
+ * Shutdown channels by sending CTC_EVENT_STOP to each channel.
+ *
+ * fi		An instance of an interface statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from struct net_device * upon call.
+ */
+static void dev_action_stop(fsm_instance *fi, int event, void *arg)
+{
+	int direction;
+	struct net_device *dev = arg;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
+
+	fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
+	for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
+		struct channel *ch = priv->channel[direction];
+		fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
+		ch->th_seq_num = 0x00;
+		CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n",
+				__func__, ch->th_seq_num);
+	}
+	if (IS_MPC(priv))
+		fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
+}
+
+static void dev_action_restart(fsm_instance *fi, int event, void *arg)
+{
+	int restart_timer;
+	struct net_device *dev = arg;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCMY_DBF_DEV_NAME(TRACE, dev, "");
+
+	if (IS_MPC(priv)) {
+		restart_timer = CTCM_TIME_1_SEC;
+	} else {
+		restart_timer = CTCM_TIME_5_SEC;
+	}
+	dev_info(&dev->dev, "Restarting device\n");
+
+	dev_action_stop(fi, event, arg);
+	fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
+	if (IS_MPC(priv))
+		fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
+
+	/* going back into start sequence too quickly can	  */
+	/* result in the other side becoming unreachable   due	  */
+	/* to sense reported when IO is aborted			  */
+	fsm_addtimer(&priv->restart_timer, restart_timer,
+			DEV_EVENT_START, dev);
+}
+
+/**
+ * Called from channel statemachine
+ * when a channel is up and running.
+ *
+ * fi		An instance of an interface statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from struct net_device * upon call.
+ */
+static void dev_action_chup(fsm_instance *fi, int event, void *arg)
+{
+	struct net_device *dev = arg;
+	struct ctcm_priv *priv = dev->ml_priv;
+	int dev_stat = fsm_getstate(fi);
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
+			"%s(%s): priv = %p [%d,%d]\n ",	CTCM_FUNTAIL,
+				dev->name, dev->ml_priv, dev_stat, event);
+
+	switch (fsm_getstate(fi)) {
+	case DEV_STATE_STARTWAIT_RXTX:
+		if (event == DEV_EVENT_RXUP)
+			fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
+		else
+			fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
+		break;
+	case DEV_STATE_STARTWAIT_RX:
+		if (event == DEV_EVENT_RXUP) {
+			fsm_newstate(fi, DEV_STATE_RUNNING);
+			dev_info(&dev->dev,
+				"Connected with remote side\n");
+			ctcm_clear_busy(dev);
+		}
+		break;
+	case DEV_STATE_STARTWAIT_TX:
+		if (event == DEV_EVENT_TXUP) {
+			fsm_newstate(fi, DEV_STATE_RUNNING);
+			dev_info(&dev->dev,
+				"Connected with remote side\n");
+			ctcm_clear_busy(dev);
+		}
+		break;
+	case DEV_STATE_STOPWAIT_TX:
+		if (event == DEV_EVENT_RXUP)
+			fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
+		break;
+	case DEV_STATE_STOPWAIT_RX:
+		if (event == DEV_EVENT_TXUP)
+			fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
+		break;
+	}
+
+	if (IS_MPC(priv)) {
+		if (event == DEV_EVENT_RXUP)
+			mpc_channel_action(priv->channel[CTCM_READ],
+				CTCM_READ, MPC_CHANNEL_ADD);
+		else
+			mpc_channel_action(priv->channel[CTCM_WRITE],
+				CTCM_WRITE, MPC_CHANNEL_ADD);
+	}
+}
+
+/**
+ * Called from device statemachine
+ * when a channel has been shutdown.
+ *
+ * fi		An instance of an interface statemachine.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from struct net_device * upon call.
+ */
+static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
+{
+
+	struct net_device *dev = arg;
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
+
+	switch (fsm_getstate(fi)) {
+	case DEV_STATE_RUNNING:
+		if (event == DEV_EVENT_TXDOWN)
+			fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
+		else
+			fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
+		break;
+	case DEV_STATE_STARTWAIT_RX:
+		if (event == DEV_EVENT_TXDOWN)
+			fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
+		break;
+	case DEV_STATE_STARTWAIT_TX:
+		if (event == DEV_EVENT_RXDOWN)
+			fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
+		break;
+	case DEV_STATE_STOPWAIT_RXTX:
+		if (event == DEV_EVENT_TXDOWN)
+			fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
+		else
+			fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
+		break;
+	case DEV_STATE_STOPWAIT_RX:
+		if (event == DEV_EVENT_RXDOWN)
+			fsm_newstate(fi, DEV_STATE_STOPPED);
+		break;
+	case DEV_STATE_STOPWAIT_TX:
+		if (event == DEV_EVENT_TXDOWN)
+			fsm_newstate(fi, DEV_STATE_STOPPED);
+		break;
+	}
+	if (IS_MPC(priv)) {
+		if (event == DEV_EVENT_RXDOWN)
+			mpc_channel_action(priv->channel[CTCM_READ],
+				CTCM_READ, MPC_CHANNEL_REMOVE);
+		else
+			mpc_channel_action(priv->channel[CTCM_WRITE],
+				CTCM_WRITE, MPC_CHANNEL_REMOVE);
+	}
+}
+
+const fsm_node dev_fsm[] = {
+	{ DEV_STATE_STOPPED,        DEV_EVENT_START,   dev_action_start   },
+	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_START,   dev_action_start   },
+	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_RXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_TXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_RESTART, dev_action_restart },
+	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_START,   dev_action_start   },
+	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RXUP,    dev_action_chup    },
+	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_TXUP,    dev_action_chup    },
+	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RESTART, dev_action_restart },
+	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_START,   dev_action_start   },
+	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_RXUP,    dev_action_chup    },
+	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_TXUP,    dev_action_chup    },
+	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_TXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_RESTART, dev_action_restart },
+	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP,    dev_action_stop    },
+	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP,    dev_action_chup    },
+	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP,    dev_action_chup    },
+	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
+	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_STOP,    dev_action_stop    },
+	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RXUP,    dev_action_chup    },
+	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_TXUP,    dev_action_chup    },
+	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RESTART, dev_action_restart },
+	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_STOP,    dev_action_stop    },
+	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_RXUP,    dev_action_chup    },
+	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_TXUP,    dev_action_chup    },
+	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_TXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_RESTART, dev_action_restart },
+	{ DEV_STATE_RUNNING,        DEV_EVENT_STOP,    dev_action_stop    },
+	{ DEV_STATE_RUNNING,        DEV_EVENT_RXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_RUNNING,        DEV_EVENT_TXDOWN,  dev_action_chdown  },
+	{ DEV_STATE_RUNNING,        DEV_EVENT_TXUP,    ctcm_action_nop    },
+	{ DEV_STATE_RUNNING,        DEV_EVENT_RXUP,    ctcm_action_nop    },
+	{ DEV_STATE_RUNNING,        DEV_EVENT_RESTART, dev_action_restart },
+};
+
+int dev_fsm_len = ARRAY_SIZE(dev_fsm);
+
+/* --- This is the END my friend --- */
+
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
new file mode 100644
index 0000000..2257372
--- /dev/null
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2001, 2007
+ * Authors: 	Fritz Elfert (felfert@millenux.com)
+ * 		Peter Tiedemann (ptiedem@de.ibm.com)
+ * 	MPC additions :
+ *		Belinda Thompson (belindat@us.ibm.com)
+ *		Andy Richter (richtera@us.ibm.com)
+ */
+#ifndef _CTCM_FSMS_H_
+#define _CTCM_FSMS_H_
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/dst.h>
+
+#include <linux/io.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <linux/uaccess.h>
+
+#include <asm/idals.h>
+
+#include "fsm.h"
+#include "ctcm_main.h"
+
+/*
+ * Definitions for the channel statemachine(s) for ctc and ctcmpc
+ *
+ * To allow better kerntyping, prefix-less definitions for channel states
+ * and channel events have been replaced :
+ * ch_event... -> ctc_ch_event...
+ * CH_EVENT... -> CTC_EVENT...
+ * ch_state... -> ctc_ch_state...
+ * CH_STATE... -> CTC_STATE...
+ */
+/*
+ * Events of the channel statemachine(s) for ctc and ctcmpc
+ */
+enum ctc_ch_events {
+	/*
+	 * Events, representing return code of
+	 * I/O operations (ccw_device_start, ccw_device_halt et al.)
+	 */
+	CTC_EVENT_IO_SUCCESS,
+	CTC_EVENT_IO_EBUSY,
+	CTC_EVENT_IO_ENODEV,
+	CTC_EVENT_IO_UNKNOWN,
+
+	CTC_EVENT_ATTNBUSY,
+	CTC_EVENT_ATTN,
+	CTC_EVENT_BUSY,
+	/*
+	 * Events, representing unit-check
+	 */
+	CTC_EVENT_UC_RCRESET,
+	CTC_EVENT_UC_RSRESET,
+	CTC_EVENT_UC_TXTIMEOUT,
+	CTC_EVENT_UC_TXPARITY,
+	CTC_EVENT_UC_HWFAIL,
+	CTC_EVENT_UC_RXPARITY,
+	CTC_EVENT_UC_ZERO,
+	CTC_EVENT_UC_UNKNOWN,
+	/*
+	 * Events, representing subchannel-check
+	 */
+	CTC_EVENT_SC_UNKNOWN,
+	/*
+	 * Events, representing machine checks
+	 */
+	CTC_EVENT_MC_FAIL,
+	CTC_EVENT_MC_GOOD,
+	/*
+	 * Event, representing normal IRQ
+	 */
+	CTC_EVENT_IRQ,
+	CTC_EVENT_FINSTAT,
+	/*
+	 * Event, representing timer expiry.
+	 */
+	CTC_EVENT_TIMER,
+	/*
+	 * Events, representing commands from upper levels.
+	 */
+	CTC_EVENT_START,
+	CTC_EVENT_STOP,
+	CTC_NR_EVENTS,
+	/*
+	 * additional MPC events
+	 */
+	CTC_EVENT_SEND_XID = CTC_NR_EVENTS,
+	CTC_EVENT_RSWEEP_TIMER,
+	/*
+	 * MUST be always the last element!!
+	 */
+	CTC_MPC_NR_EVENTS,
+};
+
+/*
+ * States of the channel statemachine(s) for ctc and ctcmpc.
+ */
+enum ctc_ch_states {
+	/*
+	 * Channel not assigned to any device,
+	 * initial state, direction invalid
+	 */
+	CTC_STATE_IDLE,
+	/*
+	 * Channel assigned but not operating
+	 */
+	CTC_STATE_STOPPED,
+	CTC_STATE_STARTWAIT,
+	CTC_STATE_STARTRETRY,
+	CTC_STATE_SETUPWAIT,
+	CTC_STATE_RXINIT,
+	CTC_STATE_TXINIT,
+	CTC_STATE_RX,
+	CTC_STATE_TX,
+	CTC_STATE_RXIDLE,
+	CTC_STATE_TXIDLE,
+	CTC_STATE_RXERR,
+	CTC_STATE_TXERR,
+	CTC_STATE_TERM,
+	CTC_STATE_DTERM,
+	CTC_STATE_NOTOP,
+	CTC_NR_STATES,     /* MUST be the last element of non-expanded states */
+	/*
+	 * additional MPC states
+	 */
+	CH_XID0_PENDING = CTC_NR_STATES,
+	CH_XID0_INPROGRESS,
+	CH_XID7_PENDING,
+	CH_XID7_PENDING1,
+	CH_XID7_PENDING2,
+	CH_XID7_PENDING3,
+	CH_XID7_PENDING4,
+	CTC_MPC_NR_STATES, /* MUST be the last element of expanded mpc states */
+};
+
+extern const char *ctc_ch_event_names[];
+
+extern const char *ctc_ch_state_names[];
+
+void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg);
+void ctcm_purge_skb_queue(struct sk_buff_head *q);
+void fsm_action_nop(fsm_instance *fi, int event, void *arg);
+
+/*
+ * ----- non-static actions for ctcm channel statemachine -----
+ *
+ */
+void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg);
+
+/*
+ * ----- FSM (state/event/action) of the ctcm channel statemachine -----
+ */
+extern const fsm_node ch_fsm[];
+extern int ch_fsm_len;
+
+
+/*
+ * ----- non-static actions for ctcmpc channel statemachine ----
+ *
+ */
+/* shared :
+void ctcm_chx_txidle(fsm_instance * fi, int event, void *arg);
+ */
+void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg);
+
+/*
+ * ----- FSM (state/event/action) of the ctcmpc channel statemachine -----
+ */
+extern const fsm_node ctcmpc_ch_fsm[];
+extern int mpc_ch_fsm_len;
+
+/*
+ * Definitions for the device interface statemachine for ctc and mpc
+ */
+
+/*
+ * States of the device interface statemachine.
+ */
+enum dev_states {
+	DEV_STATE_STOPPED,
+	DEV_STATE_STARTWAIT_RXTX,
+	DEV_STATE_STARTWAIT_RX,
+	DEV_STATE_STARTWAIT_TX,
+	DEV_STATE_STOPWAIT_RXTX,
+	DEV_STATE_STOPWAIT_RX,
+	DEV_STATE_STOPWAIT_TX,
+	DEV_STATE_RUNNING,
+	/*
+	 * MUST be always the last element!!
+	 */
+	CTCM_NR_DEV_STATES
+};
+
+extern const char *dev_state_names[];
+
+/*
+ * Events of the device interface statemachine.
+ * ctcm and ctcmpc
+ */
+enum dev_events {
+	DEV_EVENT_START,
+	DEV_EVENT_STOP,
+	DEV_EVENT_RXUP,
+	DEV_EVENT_TXUP,
+	DEV_EVENT_RXDOWN,
+	DEV_EVENT_TXDOWN,
+	DEV_EVENT_RESTART,
+	/*
+	 * MUST be always the last element!!
+	 */
+	CTCM_NR_DEV_EVENTS
+};
+
+extern const char *dev_event_names[];
+
+/*
+ * Actions for the device interface statemachine.
+ * ctc and ctcmpc
+ */
+/*
+static void dev_action_start(fsm_instance * fi, int event, void *arg);
+static void dev_action_stop(fsm_instance * fi, int event, void *arg);
+static void dev_action_restart(fsm_instance *fi, int event, void *arg);
+static void dev_action_chup(fsm_instance * fi, int event, void *arg);
+static void dev_action_chdown(fsm_instance * fi, int event, void *arg);
+*/
+
+/*
+ * The (state/event/action) fsm table of the device interface statemachine.
+ * ctcm and ctcmpc
+ */
+extern const fsm_node dev_fsm[];
+extern int dev_fsm_len;
+
+
+/*
+ * Definitions for the MPC Group statemachine
+ */
+
+/*
+ * MPC Group Station FSM States
+
+State Name		When In This State
+======================	=======================================
+MPCG_STATE_RESET	Initial State When Driver Loaded
+			We receive and send NOTHING
+
+MPCG_STATE_INOP         INOP Received.
+			Group level non-recoverable error
+
+MPCG_STATE_READY	XID exchanges for at least 1 write and
+			1 read channel have completed.
+			Group is ready for data transfer.
+
+States from ctc_mpc_alloc_channel
+==============================================================
+MPCG_STATE_XID2INITW	Awaiting XID2(0) Initiation
+			      ATTN from other side will start
+			      XID negotiations.
+			      Y-side protocol only.
+
+MPCG_STATE_XID2INITX	XID2(0) negotiations are in progress.
+			      At least 1, but not all, XID2(0)'s
+			      have been received from partner.
+
+MPCG_STATE_XID7INITW	XID2(0) complete
+			      No XID2(7)'s have yet been received.
+			      XID2(7) negotiations pending.
+
+MPCG_STATE_XID7INITX	XID2(7) negotiations in progress.
+			      At least 1, but not all, XID2(7)'s
+			      have been received from partner.
+
+MPCG_STATE_XID7INITF	XID2(7) negotiations complete.
+			      Transitioning to READY.
+
+MPCG_STATE_READY	      Ready for Data Transfer.
+
+
+States from ctc_mpc_establish_connectivity call
+==============================================================
+MPCG_STATE_XID0IOWAIT	Initiating XID2(0) negotiations.
+			      X-side protocol only.
+			      ATTN-BUSY from other side will convert
+			      this to Y-side protocol and the
+			      ctc_mpc_alloc_channel flow will begin.
+
+MPCG_STATE_XID0IOWAIX	XID2(0) negotiations are in progress.
+			      At least 1, but not all, XID2(0)'s
+			      have been received from partner.
+
+MPCG_STATE_XID7INITI	XID2(0) complete
+			      No XID2(7)'s have yet been received.
+			      XID2(7) negotiations pending.
+
+MPCG_STATE_XID7INITZ	XID2(7) negotiations in progress.
+			      At least 1, but not all, XID2(7)'s
+			      have been received from partner.
+
+MPCG_STATE_XID7INITF	XID2(7) negotiations complete.
+			      Transitioning to READY.
+
+MPCG_STATE_READY	      Ready for Data Transfer.
+
+*/
+
+enum mpcg_events {
+	MPCG_EVENT_INOP,
+	MPCG_EVENT_DISCONC,
+	MPCG_EVENT_XID0DO,
+	MPCG_EVENT_XID2,
+	MPCG_EVENT_XID2DONE,
+	MPCG_EVENT_XID7DONE,
+	MPCG_EVENT_TIMER,
+	MPCG_EVENT_DOIO,
+	MPCG_NR_EVENTS,
+};
+
+enum mpcg_states {
+	MPCG_STATE_RESET,
+	MPCG_STATE_INOP,
+	MPCG_STATE_XID2INITW,
+	MPCG_STATE_XID2INITX,
+	MPCG_STATE_XID7INITW,
+	MPCG_STATE_XID7INITX,
+	MPCG_STATE_XID0IOWAIT,
+	MPCG_STATE_XID0IOWAIX,
+	MPCG_STATE_XID7INITI,
+	MPCG_STATE_XID7INITZ,
+	MPCG_STATE_XID7INITF,
+	MPCG_STATE_FLOWC,
+	MPCG_STATE_READY,
+	MPCG_NR_STATES,
+};
+
+#endif
+/* --- This is the END my friend --- */
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
new file mode 100644
index 0000000..7617d21
--- /dev/null
+++ b/drivers/s390/net/ctcm_main.c
@@ -0,0 +1,1871 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2001, 2009
+ * Author(s):
+ *	Original CTC driver(s):
+ *		Fritz Elfert (felfert@millenux.com)
+ *		Dieter Wellerdiek (wel@de.ibm.com)
+ *		Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *		Denis Joseph Barrow (barrow_dj@yahoo.com)
+ *		Jochen Roehrig (roehrig@de.ibm.com)
+ *		Cornelia Huck <cornelia.huck@de.ibm.com>
+ *	MPC additions:
+ *		Belinda Thompson (belindat@us.ibm.com)
+ *		Andy Richter (richtera@us.ibm.com)
+ *	Revived by:
+ *		Peter Tiedemann (ptiedem@de.ibm.com)
+ */
+
+#undef DEBUG
+#undef DEBUGDATA
+#undef DEBUGCCW
+
+#define KMSG_COMPONENT "ctcm"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/dst.h>
+
+#include <linux/io.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <linux/uaccess.h>
+
+#include <asm/idals.h>
+
+#include "ctcm_fsms.h"
+#include "ctcm_main.h"
+
+/* Some common global variables */
+
+/**
+ * The root device for ctcm group devices
+ */
+static struct device *ctcm_root_dev;
+
+/*
+ * Linked list of all detected channels.
+ */
+struct channel *channels;
+
+/**
+ * Unpack a just received skb and hand it over to
+ * upper layers.
+ *
+ *  ch		The channel where this skb has been received.
+ *  pskb	The received skb.
+ */
+void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
+{
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	__u16 len = *((__u16 *) pskb->data);
+
+	skb_put(pskb, 2 + LL_HEADER_LENGTH);
+	skb_pull(pskb, 2);
+	pskb->dev = dev;
+	pskb->ip_summed = CHECKSUM_UNNECESSARY;
+	while (len > 0) {
+		struct sk_buff *skb;
+		int skblen;
+		struct ll_header *header = (struct ll_header *)pskb->data;
+
+		skb_pull(pskb, LL_HEADER_LENGTH);
+		if ((ch->protocol == CTCM_PROTO_S390) &&
+		    (header->type != ETH_P_IP)) {
+			if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
+				ch->logflags |= LOG_FLAG_ILLEGALPKT;
+				/*
+				 * Check packet type only if we stick strictly
+				 * to S/390's protocol of OS390. This only
+				 * supports IP. Otherwise allow any packet
+				 * type.
+				 */
+				CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+					"%s(%s): Illegal packet type 0x%04x"
+					" - dropping",
+					CTCM_FUNTAIL, dev->name, header->type);
+			}
+			priv->stats.rx_dropped++;
+			priv->stats.rx_frame_errors++;
+			return;
+		}
+		pskb->protocol = cpu_to_be16(header->type);
+		if ((header->length <= LL_HEADER_LENGTH) ||
+		    (len <= LL_HEADER_LENGTH)) {
+			if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
+				CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+					"%s(%s): Illegal packet size %d(%d,%d)"
+					"- dropping",
+					CTCM_FUNTAIL, dev->name,
+					header->length, dev->mtu, len);
+				ch->logflags |= LOG_FLAG_ILLEGALSIZE;
+			}
+
+			priv->stats.rx_dropped++;
+			priv->stats.rx_length_errors++;
+			return;
+		}
+		header->length -= LL_HEADER_LENGTH;
+		len -= LL_HEADER_LENGTH;
+		if ((header->length > skb_tailroom(pskb)) ||
+		    (header->length > len)) {
+			if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
+				CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+					"%s(%s): Packet size %d (overrun)"
+					" - dropping", CTCM_FUNTAIL,
+						dev->name, header->length);
+				ch->logflags |= LOG_FLAG_OVERRUN;
+			}
+
+			priv->stats.rx_dropped++;
+			priv->stats.rx_length_errors++;
+			return;
+		}
+		skb_put(pskb, header->length);
+		skb_reset_mac_header(pskb);
+		len -= header->length;
+		skb = dev_alloc_skb(pskb->len);
+		if (!skb) {
+			if (!(ch->logflags & LOG_FLAG_NOMEM)) {
+				CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+					"%s(%s): MEMORY allocation error",
+						CTCM_FUNTAIL, dev->name);
+				ch->logflags |= LOG_FLAG_NOMEM;
+			}
+			priv->stats.rx_dropped++;
+			return;
+		}
+		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
+					  pskb->len);
+		skb_reset_mac_header(skb);
+		skb->dev = pskb->dev;
+		skb->protocol = pskb->protocol;
+		pskb->ip_summed = CHECKSUM_UNNECESSARY;
+		skblen = skb->len;
+		/*
+		 * reset logflags
+		 */
+		ch->logflags = 0;
+		priv->stats.rx_packets++;
+		priv->stats.rx_bytes += skblen;
+		netif_rx_ni(skb);
+		if (len > 0) {
+			skb_pull(pskb, header->length);
+			if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
+				CTCM_DBF_DEV_NAME(TRACE, dev,
+					"Overrun in ctcm_unpack_skb");
+				ch->logflags |= LOG_FLAG_OVERRUN;
+				return;
+			}
+			skb_put(pskb, LL_HEADER_LENGTH);
+		}
+	}
+}
+
+/**
+ * Release a specific channel in the channel list.
+ *
+ *  ch		Pointer to channel struct to be released.
+ */
+static void channel_free(struct channel *ch)
+{
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s)", CTCM_FUNTAIL, ch->id);
+	ch->flags &= ~CHANNEL_FLAGS_INUSE;
+	fsm_newstate(ch->fsm, CTC_STATE_IDLE);
+}
+
+/**
+ * Remove a specific channel in the channel list.
+ *
+ *  ch		Pointer to channel struct to be released.
+ */
+static void channel_remove(struct channel *ch)
+{
+	struct channel **c = &channels;
+	char chid[CTCM_ID_SIZE+1];
+	int ok = 0;
+
+	if (ch == NULL)
+		return;
+	else
+		strncpy(chid, ch->id, CTCM_ID_SIZE);
+
+	channel_free(ch);
+	while (*c) {
+		if (*c == ch) {
+			*c = ch->next;
+			fsm_deltimer(&ch->timer);
+			if (IS_MPC(ch))
+				fsm_deltimer(&ch->sweep_timer);
+
+			kfree_fsm(ch->fsm);
+			clear_normalized_cda(&ch->ccw[4]);
+			if (ch->trans_skb != NULL) {
+				clear_normalized_cda(&ch->ccw[1]);
+				dev_kfree_skb_any(ch->trans_skb);
+			}
+			if (IS_MPC(ch)) {
+				tasklet_kill(&ch->ch_tasklet);
+				tasklet_kill(&ch->ch_disc_tasklet);
+				kfree(ch->discontact_th);
+			}
+			kfree(ch->ccw);
+			kfree(ch->irb);
+			kfree(ch);
+			ok = 1;
+			break;
+		}
+		c = &((*c)->next);
+	}
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s) %s", CTCM_FUNTAIL,
+			chid, ok ? "OK" : "failed");
+}
+
+/**
+ * Get a specific channel from the channel list.
+ *
+ *  type	Type of channel we are interested in.
+ *  id		Id of channel we are interested in.
+ *  direction	Direction we want to use this channel for.
+ *
+ * returns Pointer to a channel or NULL if no matching channel available.
+ */
+static struct channel *channel_get(enum ctcm_channel_types type,
+					char *id, int direction)
+{
+	struct channel *ch = channels;
+
+	while (ch && (strncmp(ch->id, id, CTCM_ID_SIZE) || (ch->type != type)))
+		ch = ch->next;
+	if (!ch) {
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+				"%s(%d, %s, %d) not found in channel list\n",
+				CTCM_FUNTAIL, type, id, direction);
+	} else {
+		if (ch->flags & CHANNEL_FLAGS_INUSE)
+			ch = NULL;
+		else {
+			ch->flags |= CHANNEL_FLAGS_INUSE;
+			ch->flags &= ~CHANNEL_FLAGS_RWMASK;
+			ch->flags |= (direction == CTCM_WRITE)
+			    ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
+			fsm_newstate(ch->fsm, CTC_STATE_STOPPED);
+		}
+	}
+	return ch;
+}
+
+static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
+{
+	if (!IS_ERR(irb))
+		return 0;
+
+	CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN,
+			"irb error %ld on device %s\n",
+				PTR_ERR(irb), dev_name(&cdev->dev));
+
+	switch (PTR_ERR(irb)) {
+	case -EIO:
+		dev_err(&cdev->dev,
+			"An I/O-error occurred on the CTCM device\n");
+		break;
+	case -ETIMEDOUT:
+		dev_err(&cdev->dev,
+			"An adapter hardware operation timed out\n");
+		break;
+	default:
+		dev_err(&cdev->dev,
+			"An error occurred on the adapter hardware\n");
+	}
+	return PTR_ERR(irb);
+}
+
+
+/**
+ * Check sense of a unit check.
+ *
+ *  ch		The channel, the sense code belongs to.
+ *  sense	The sense code to inspect.
+ */
+static void ccw_unit_check(struct channel *ch, __u8 sense)
+{
+	CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+			"%s(%s): %02x",
+				CTCM_FUNTAIL, ch->id, sense);
+
+	if (sense & SNS0_INTERVENTION_REQ) {
+		if (sense & 0x01) {
+			if (ch->sense_rc != 0x01) {
+				pr_notice(
+					"%s: The communication peer has "
+					"disconnected\n", ch->id);
+				ch->sense_rc = 0x01;
+			}
+			fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch);
+		} else {
+			if (ch->sense_rc != SNS0_INTERVENTION_REQ) {
+				pr_notice(
+					"%s: The remote operating system is "
+					"not available\n", ch->id);
+				ch->sense_rc = SNS0_INTERVENTION_REQ;
+			}
+			fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch);
+		}
+	} else if (sense & SNS0_EQUIPMENT_CHECK) {
+		if (sense & SNS0_BUS_OUT_CHECK) {
+			if (ch->sense_rc != SNS0_BUS_OUT_CHECK) {
+				CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+					"%s(%s): remote HW error %02x",
+						CTCM_FUNTAIL, ch->id, sense);
+				ch->sense_rc = SNS0_BUS_OUT_CHECK;
+			}
+			fsm_event(ch->fsm, CTC_EVENT_UC_HWFAIL, ch);
+		} else {
+			if (ch->sense_rc != SNS0_EQUIPMENT_CHECK) {
+				CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+					"%s(%s): remote read parity error %02x",
+						CTCM_FUNTAIL, ch->id, sense);
+				ch->sense_rc = SNS0_EQUIPMENT_CHECK;
+			}
+			fsm_event(ch->fsm, CTC_EVENT_UC_RXPARITY, ch);
+		}
+	} else if (sense & SNS0_BUS_OUT_CHECK) {
+		if (ch->sense_rc != SNS0_BUS_OUT_CHECK) {
+			CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+				"%s(%s): BUS OUT error %02x",
+					CTCM_FUNTAIL, ch->id, sense);
+			ch->sense_rc = SNS0_BUS_OUT_CHECK;
+		}
+		if (sense & 0x04)	/* data-streaming timeout */
+			fsm_event(ch->fsm, CTC_EVENT_UC_TXTIMEOUT, ch);
+		else			/* Data-transfer parity error */
+			fsm_event(ch->fsm, CTC_EVENT_UC_TXPARITY, ch);
+	} else if (sense & SNS0_CMD_REJECT) {
+		if (ch->sense_rc != SNS0_CMD_REJECT) {
+			CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+				"%s(%s): Command rejected",
+						CTCM_FUNTAIL, ch->id);
+			ch->sense_rc = SNS0_CMD_REJECT;
+		}
+	} else if (sense == 0) {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+			"%s(%s): Unit check ZERO",
+					CTCM_FUNTAIL, ch->id);
+		fsm_event(ch->fsm, CTC_EVENT_UC_ZERO, ch);
+	} else {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+			"%s(%s): Unit check code %02x unknown",
+					CTCM_FUNTAIL, ch->id, sense);
+		fsm_event(ch->fsm, CTC_EVENT_UC_UNKNOWN, ch);
+	}
+}
+
+int ctcm_ch_alloc_buffer(struct channel *ch)
+{
+	clear_normalized_cda(&ch->ccw[1]);
+	ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC | GFP_DMA);
+	if (ch->trans_skb == NULL) {
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+			"%s(%s): %s trans_skb allocation error",
+			CTCM_FUNTAIL, ch->id,
+			(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+				"RX" : "TX");
+		return -ENOMEM;
+	}
+
+	ch->ccw[1].count = ch->max_bufsize;
+	if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
+		dev_kfree_skb(ch->trans_skb);
+		ch->trans_skb = NULL;
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+			"%s(%s): %s set norm_cda failed",
+			CTCM_FUNTAIL, ch->id,
+			(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+				"RX" : "TX");
+		return -ENOMEM;
+	}
+
+	ch->ccw[1].count = 0;
+	ch->trans_skb_data = ch->trans_skb->data;
+	ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
+	return 0;
+}
+
+/*
+ * Interface API for upper network layers
+ */
+
+/**
+ * Open an interface.
+ * Called from generic network layer when ifconfig up is run.
+ *
+ *  dev		Pointer to interface struct.
+ *
+ * returns 0 on success, -ERRNO on failure. (Never fails.)
+ */
+int ctcm_open(struct net_device *dev)
+{
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
+	if (!IS_MPC(priv))
+		fsm_event(priv->fsm,	DEV_EVENT_START, dev);
+	return 0;
+}
+
+/**
+ * Close an interface.
+ * Called from generic network layer when ifconfig down is run.
+ *
+ *  dev		Pointer to interface struct.
+ *
+ * returns 0 on success, -ERRNO on failure. (Never fails.)
+ */
+int ctcm_close(struct net_device *dev)
+{
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
+	if (!IS_MPC(priv))
+		fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
+	return 0;
+}
+
+
+/**
+ * Transmit a packet.
+ * This is a helper function for ctcm_tx().
+ *
+ *  ch		Channel to be used for sending.
+ *  skb		Pointer to struct sk_buff of packet to send.
+ *            The linklevel header has already been set up
+ *            by ctcm_tx().
+ *
+ * returns 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
+{
+	unsigned long saveflags;
+	struct ll_header header;
+	int rc = 0;
+	__u16 block_len;
+	int ccw_idx;
+	struct sk_buff *nskb;
+	unsigned long hi;
+
+	/* we need to acquire the lock for testing the state
+	 * otherwise we can have an IRQ changing the state to
+	 * TXIDLE after the test but before acquiring the lock.
+	 */
+	spin_lock_irqsave(&ch->collect_lock, saveflags);
+	if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) {
+		int l = skb->len + LL_HEADER_LENGTH;
+
+		if (ch->collect_len + l > ch->max_bufsize - 2) {
+			spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+			return -EBUSY;
+		} else {
+			refcount_inc(&skb->users);
+			header.length = l;
+			header.type = be16_to_cpu(skb->protocol);
+			header.unused = 0;
+			memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
+			       LL_HEADER_LENGTH);
+			skb_queue_tail(&ch->collect_queue, skb);
+			ch->collect_len += l;
+		}
+		spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+				goto done;
+	}
+	spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+	/*
+	 * Protect skb against beeing free'd by upper
+	 * layers.
+	 */
+	refcount_inc(&skb->users);
+	ch->prof.txlen += skb->len;
+	header.length = skb->len + LL_HEADER_LENGTH;
+	header.type = be16_to_cpu(skb->protocol);
+	header.unused = 0;
+	memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
+	block_len = skb->len + 2;
+	*((__u16 *)skb_push(skb, 2)) = block_len;
+
+	/*
+	 * IDAL support in CTCM is broken, so we have to
+	 * care about skb's above 2G ourselves.
+	 */
+	hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
+	if (hi) {
+		nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+		if (!nskb) {
+			refcount_dec(&skb->users);
+			skb_pull(skb, LL_HEADER_LENGTH + 2);
+			ctcm_clear_busy(ch->netdev);
+			return -ENOMEM;
+		} else {
+			skb_put_data(nskb, skb->data, skb->len);
+			refcount_inc(&nskb->users);
+			refcount_dec(&skb->users);
+			dev_kfree_skb_irq(skb);
+			skb = nskb;
+		}
+	}
+
+	ch->ccw[4].count = block_len;
+	if (set_normalized_cda(&ch->ccw[4], skb->data)) {
+		/*
+		 * idal allocation failed, try via copying to
+		 * trans_skb. trans_skb usually has a pre-allocated
+		 * idal.
+		 */
+		if (ctcm_checkalloc_buffer(ch)) {
+			/*
+			 * Remove our header. It gets added
+			 * again on retransmit.
+			 */
+			refcount_dec(&skb->users);
+			skb_pull(skb, LL_HEADER_LENGTH + 2);
+			ctcm_clear_busy(ch->netdev);
+			return -ENOMEM;
+		}
+
+		skb_reset_tail_pointer(ch->trans_skb);
+		ch->trans_skb->len = 0;
+		ch->ccw[1].count = skb->len;
+		skb_copy_from_linear_data(skb,
+				skb_put(ch->trans_skb, skb->len), skb->len);
+		refcount_dec(&skb->users);
+		dev_kfree_skb_irq(skb);
+		ccw_idx = 0;
+	} else {
+		skb_queue_tail(&ch->io_queue, skb);
+		ccw_idx = 3;
+	}
+	if (do_debug_ccw)
+		ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
+					sizeof(struct ccw1) * 3);
+	ch->retry = 0;
+	fsm_newstate(ch->fsm, CTC_STATE_TX);
+	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+	ch->prof.send_stamp = jiffies;
+	rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
+					(unsigned long)ch, 0xff, 0);
+	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+	if (ccw_idx == 3)
+		ch->prof.doios_single++;
+	if (rc != 0) {
+		fsm_deltimer(&ch->timer);
+		ctcm_ccw_check_rc(ch, rc, "single skb TX");
+		if (ccw_idx == 3)
+			skb_dequeue_tail(&ch->io_queue);
+		/*
+		 * Remove our header. It gets added
+		 * again on retransmit.
+		 */
+		skb_pull(skb, LL_HEADER_LENGTH + 2);
+	} else if (ccw_idx == 0) {
+		struct net_device *dev = ch->netdev;
+		struct ctcm_priv *priv = dev->ml_priv;
+		priv->stats.tx_packets++;
+		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
+	}
+done:
+	ctcm_clear_busy(ch->netdev);
+	return rc;
+}
+
+static void ctcmpc_send_sweep_req(struct channel *rch)
+{
+	struct net_device *dev = rch->netdev;
+	struct ctcm_priv *priv;
+	struct mpc_group *grp;
+	struct th_sweep *header;
+	struct sk_buff *sweep_skb;
+	struct channel *ch;
+	/* int rc = 0; */
+
+	priv = dev->ml_priv;
+	grp = priv->mpcg;
+	ch = priv->channel[CTCM_WRITE];
+
+	/* sweep processing is not complete until response and request */
+	/* has completed for all read channels in group		       */
+	if (grp->in_sweep == 0) {
+		grp->in_sweep = 1;
+		grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
+		grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
+	}
+
+	sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
+
+	if (sweep_skb == NULL)	{
+		/* rc = -ENOMEM; */
+				goto nomem;
+	}
+
+	header = kmalloc(TH_SWEEP_LENGTH, gfp_type());
+
+	if (!header) {
+		dev_kfree_skb_any(sweep_skb);
+		/* rc = -ENOMEM; */
+				goto nomem;
+	}
+
+	header->th.th_seg	= 0x00 ;
+	header->th.th_ch_flag	= TH_SWEEP_REQ;  /* 0x0f */
+	header->th.th_blk_flag	= 0x00;
+	header->th.th_is_xid	= 0x00;
+	header->th.th_seq_num	= 0x00;
+	header->sw.th_last_seq	= ch->th_seq_num;
+
+	skb_put_data(sweep_skb, header, TH_SWEEP_LENGTH);
+
+	kfree(header);
+
+	netif_trans_update(dev);
+	skb_queue_tail(&ch->sweep_queue, sweep_skb);
+
+	fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
+
+	return;
+
+nomem:
+	grp->in_sweep = 0;
+	ctcm_clear_busy(dev);
+	fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+
+	return;
+}
+
+/*
+ * MPC mode version of transmit_skb
+ */
+static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
+{
+	struct pdu *p_header;
+	struct net_device *dev = ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct mpc_group *grp = priv->mpcg;
+	struct th_header *header;
+	struct sk_buff *nskb;
+	int rc = 0;
+	int ccw_idx;
+	unsigned long hi;
+	unsigned long saveflags = 0;	/* avoids compiler warning */
+
+	CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n",
+			__func__, dev->name, smp_processor_id(), ch,
+					ch->id, fsm_getstate_str(ch->fsm));
+
+	if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
+		spin_lock_irqsave(&ch->collect_lock, saveflags);
+		refcount_inc(&skb->users);
+		p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
+
+		if (!p_header) {
+			spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+				goto nomem_exit;
+		}
+
+		p_header->pdu_offset = skb->len;
+		p_header->pdu_proto = 0x01;
+		p_header->pdu_flag = 0x00;
+		if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) {
+			p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
+		} else {
+			p_header->pdu_flag |= PDU_FIRST;
+		}
+		p_header->pdu_seq = 0;
+		memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header,
+		       PDU_HEADER_LENGTH);
+
+		CTCM_PR_DEBUG("%s(%s): Put on collect_q - skb len: %04x \n"
+				"pdu header and data for up to 32 bytes:\n",
+				__func__, dev->name, skb->len);
+		CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
+
+		skb_queue_tail(&ch->collect_queue, skb);
+		ch->collect_len += skb->len;
+		kfree(p_header);
+
+		spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+			goto done;
+	}
+
+	/*
+	 * Protect skb against beeing free'd by upper
+	 * layers.
+	 */
+	refcount_inc(&skb->users);
+
+	/*
+	 * IDAL support in CTCM is broken, so we have to
+	 * care about skb's above 2G ourselves.
+	 */
+	hi = ((unsigned long)skb->tail + TH_HEADER_LENGTH) >> 31;
+	if (hi) {
+		nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+		if (!nskb) {
+			goto nomem_exit;
+		} else {
+			skb_put_data(nskb, skb->data, skb->len);
+			refcount_inc(&nskb->users);
+			refcount_dec(&skb->users);
+			dev_kfree_skb_irq(skb);
+			skb = nskb;
+		}
+	}
+
+	p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
+
+	if (!p_header)
+		goto nomem_exit;
+
+	p_header->pdu_offset = skb->len;
+	p_header->pdu_proto = 0x01;
+	p_header->pdu_flag = 0x00;
+	p_header->pdu_seq = 0;
+	if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) {
+		p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
+	} else {
+		p_header->pdu_flag |= PDU_FIRST;
+	}
+	memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header, PDU_HEADER_LENGTH);
+
+	kfree(p_header);
+
+	if (ch->collect_len > 0) {
+		spin_lock_irqsave(&ch->collect_lock, saveflags);
+		skb_queue_tail(&ch->collect_queue, skb);
+		ch->collect_len += skb->len;
+		skb = skb_dequeue(&ch->collect_queue);
+		ch->collect_len -= skb->len;
+		spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+	}
+
+	p_header = (struct pdu *)skb->data;
+	p_header->pdu_flag |= PDU_LAST;
+
+	ch->prof.txlen += skb->len - PDU_HEADER_LENGTH;
+
+	header = kmalloc(TH_HEADER_LENGTH, gfp_type());
+	if (!header)
+		goto nomem_exit;
+
+	header->th_seg = 0x00;
+	header->th_ch_flag = TH_HAS_PDU;  /* Normal data */
+	header->th_blk_flag = 0x00;
+	header->th_is_xid = 0x00;          /* Just data here */
+	ch->th_seq_num++;
+	header->th_seq_num = ch->th_seq_num;
+
+	CTCM_PR_DBGDATA("%s(%s) ToVTAM_th_seq= %08x\n" ,
+		       __func__, dev->name, ch->th_seq_num);
+
+	/* put the TH on the packet */
+	memcpy(skb_push(skb, TH_HEADER_LENGTH), header, TH_HEADER_LENGTH);
+
+	kfree(header);
+
+	CTCM_PR_DBGDATA("%s(%s): skb len: %04x\n - pdu header and data for "
+			"up to 32 bytes sent to vtam:\n",
+				__func__, dev->name, skb->len);
+	CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
+
+	ch->ccw[4].count = skb->len;
+	if (set_normalized_cda(&ch->ccw[4], skb->data)) {
+		/*
+		 * idal allocation failed, try via copying to trans_skb.
+		 * trans_skb usually has a pre-allocated idal.
+		 */
+		if (ctcm_checkalloc_buffer(ch)) {
+			/*
+			 * Remove our header.
+			 * It gets added again on retransmit.
+			 */
+				goto nomem_exit;
+		}
+
+		skb_reset_tail_pointer(ch->trans_skb);
+		ch->trans_skb->len = 0;
+		ch->ccw[1].count = skb->len;
+		skb_put_data(ch->trans_skb, skb->data, skb->len);
+		refcount_dec(&skb->users);
+		dev_kfree_skb_irq(skb);
+		ccw_idx = 0;
+		CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
+				"up to 32 bytes sent to vtam:\n",
+				__func__, dev->name, ch->trans_skb->len);
+		CTCM_D3_DUMP((char *)ch->trans_skb->data,
+				min_t(int, 32, ch->trans_skb->len));
+	} else {
+		skb_queue_tail(&ch->io_queue, skb);
+		ccw_idx = 3;
+	}
+	ch->retry = 0;
+	fsm_newstate(ch->fsm, CTC_STATE_TX);
+	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+
+	if (do_debug_ccw)
+		ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
+					sizeof(struct ccw1) * 3);
+
+	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+	ch->prof.send_stamp = jiffies;
+	rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
+					(unsigned long)ch, 0xff, 0);
+	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+	if (ccw_idx == 3)
+		ch->prof.doios_single++;
+	if (rc != 0) {
+		fsm_deltimer(&ch->timer);
+		ctcm_ccw_check_rc(ch, rc, "single skb TX");
+		if (ccw_idx == 3)
+			skb_dequeue_tail(&ch->io_queue);
+	} else if (ccw_idx == 0) {
+		priv->stats.tx_packets++;
+		priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
+	}
+	if (ch->th_seq_num > 0xf0000000)	/* Chose at random. */
+		ctcmpc_send_sweep_req(ch);
+
+	goto done;
+nomem_exit:
+	CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_CRIT,
+			"%s(%s): MEMORY allocation ERROR\n",
+			CTCM_FUNTAIL, ch->id);
+	rc = -ENOMEM;
+	refcount_dec(&skb->users);
+	dev_kfree_skb_any(skb);
+	fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
+done:
+	CTCM_PR_DEBUG("Exit %s(%s)\n", __func__, dev->name);
+	return rc;
+}
+
+/**
+ * Start transmission of a packet.
+ * Called from generic network device layer.
+ *
+ *  skb		Pointer to buffer containing the packet.
+ *  dev		Pointer to interface struct.
+ *
+ * returns 0 if packet consumed, !0 if packet rejected.
+ *         Note: If we return !0, then the packet is free'd by
+ *               the generic network layer.
+ */
+/* first merge version - leaving both functions separated */
+static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ctcm_priv *priv = dev->ml_priv;
+
+	if (skb == NULL) {
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+				"%s(%s): NULL sk_buff passed",
+					CTCM_FUNTAIL, dev->name);
+		priv->stats.tx_dropped++;
+		return NETDEV_TX_OK;
+	}
+	if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+			"%s(%s): Got sk_buff with head room < %ld bytes",
+			CTCM_FUNTAIL, dev->name, LL_HEADER_LENGTH + 2);
+		dev_kfree_skb(skb);
+		priv->stats.tx_dropped++;
+		return NETDEV_TX_OK;
+	}
+
+	/*
+	 * If channels are not running, try to restart them
+	 * and throw away packet.
+	 */
+	if (fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) {
+		fsm_event(priv->fsm, DEV_EVENT_START, dev);
+		dev_kfree_skb(skb);
+		priv->stats.tx_dropped++;
+		priv->stats.tx_errors++;
+		priv->stats.tx_carrier_errors++;
+		return NETDEV_TX_OK;
+	}
+
+	if (ctcm_test_and_set_busy(dev))
+		return NETDEV_TX_BUSY;
+
+	netif_trans_update(dev);
+	if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
+		return NETDEV_TX_BUSY;
+	return NETDEV_TX_OK;
+}
+
+/* unmerged MPC variant of ctcm_tx */
+static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	int len = 0;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct mpc_group *grp  = priv->mpcg;
+	struct sk_buff *newskb = NULL;
+
+	/*
+	 * Some sanity checks ...
+	 */
+	if (skb == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): NULL sk_buff passed",
+					CTCM_FUNTAIL, dev->name);
+		priv->stats.tx_dropped++;
+					goto done;
+	}
+	if (skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH)) {
+		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
+			"%s(%s): Got sk_buff with head room < %ld bytes",
+			CTCM_FUNTAIL, dev->name,
+				TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
+
+		CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
+
+		len =  skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
+		newskb = __dev_alloc_skb(len, gfp_type() | GFP_DMA);
+
+		if (!newskb) {
+			CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
+				"%s: %s: __dev_alloc_skb failed",
+						__func__, dev->name);
+
+			dev_kfree_skb_any(skb);
+			priv->stats.tx_dropped++;
+			priv->stats.tx_errors++;
+			priv->stats.tx_carrier_errors++;
+			fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+					goto done;
+		}
+		newskb->protocol = skb->protocol;
+		skb_reserve(newskb, TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
+		skb_put_data(newskb, skb->data, skb->len);
+		dev_kfree_skb_any(skb);
+		skb = newskb;
+	}
+
+	/*
+	 * If channels are not running,
+	 * notify anybody about a link failure and throw
+	 * away packet.
+	 */
+	if ((fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) ||
+	   (fsm_getstate(grp->fsm) <  MPCG_STATE_XID2INITW)) {
+		dev_kfree_skb_any(skb);
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): inactive MPCGROUP - dropped",
+					CTCM_FUNTAIL, dev->name);
+		priv->stats.tx_dropped++;
+		priv->stats.tx_errors++;
+		priv->stats.tx_carrier_errors++;
+					goto done;
+	}
+
+	if (ctcm_test_and_set_busy(dev)) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): device busy - dropped",
+					CTCM_FUNTAIL, dev->name);
+		dev_kfree_skb_any(skb);
+		priv->stats.tx_dropped++;
+		priv->stats.tx_errors++;
+		priv->stats.tx_carrier_errors++;
+		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+					goto done;
+	}
+
+	netif_trans_update(dev);
+	if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): device error - dropped",
+					CTCM_FUNTAIL, dev->name);
+		dev_kfree_skb_any(skb);
+		priv->stats.tx_dropped++;
+		priv->stats.tx_errors++;
+		priv->stats.tx_carrier_errors++;
+		ctcm_clear_busy(dev);
+		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+					goto done;
+	}
+	ctcm_clear_busy(dev);
+done:
+	if (do_debug)
+		MPC_DBF_DEV_NAME(TRACE, dev, "exit");
+
+	return NETDEV_TX_OK;	/* handle freeing of skb here */
+}
+
+
+/**
+ * Sets MTU of an interface.
+ *
+ *  dev		Pointer to interface struct.
+ *  new_mtu	The new MTU to use for this interface.
+ *
+ * returns 0 on success, -EINVAL if MTU is out of valid range.
+ *         (valid range is 576 .. 65527). If VM is on the
+ *         remote side, maximum MTU is 32760, however this is
+ *         not checked here.
+ */
+static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct ctcm_priv *priv;
+	int max_bufsize;
+
+	priv = dev->ml_priv;
+	max_bufsize = priv->channel[CTCM_READ]->max_bufsize;
+
+	if (IS_MPC(priv)) {
+		if (new_mtu > max_bufsize - TH_HEADER_LENGTH)
+			return -EINVAL;
+		dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
+	} else {
+		if (new_mtu > max_bufsize - LL_HEADER_LENGTH - 2)
+			return -EINVAL;
+		dev->hard_header_len = LL_HEADER_LENGTH + 2;
+	}
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+/**
+ * Returns interface statistics of a device.
+ *
+ *  dev		Pointer to interface struct.
+ *
+ * returns Pointer to stats struct of this interface.
+ */
+static struct net_device_stats *ctcm_stats(struct net_device *dev)
+{
+	return &((struct ctcm_priv *)dev->ml_priv)->stats;
+}
+
+static void ctcm_free_netdevice(struct net_device *dev)
+{
+	struct ctcm_priv *priv;
+	struct mpc_group *grp;
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+			"%s(%s)", CTCM_FUNTAIL, dev->name);
+	priv = dev->ml_priv;
+	if (priv) {
+		grp = priv->mpcg;
+		if (grp) {
+			if (grp->fsm)
+				kfree_fsm(grp->fsm);
+			if (grp->xid_skb)
+				dev_kfree_skb(grp->xid_skb);
+			if (grp->rcvd_xid_skb)
+				dev_kfree_skb(grp->rcvd_xid_skb);
+			tasklet_kill(&grp->mpc_tasklet2);
+			kfree(grp);
+			priv->mpcg = NULL;
+		}
+		if (priv->fsm) {
+			kfree_fsm(priv->fsm);
+			priv->fsm = NULL;
+		}
+		kfree(priv->xid);
+		priv->xid = NULL;
+	/*
+	 * Note: kfree(priv); is done in "opposite" function of
+	 * allocator function probe_device which is remove_device.
+	 */
+	}
+#ifdef MODULE
+	free_netdev(dev);
+#endif
+}
+
+struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
+
+static const struct net_device_ops ctcm_netdev_ops = {
+	.ndo_open		= ctcm_open,
+	.ndo_stop		= ctcm_close,
+	.ndo_get_stats		= ctcm_stats,
+	.ndo_change_mtu	   	= ctcm_change_mtu,
+	.ndo_start_xmit		= ctcm_tx,
+};
+
+static const struct net_device_ops ctcm_mpc_netdev_ops = {
+	.ndo_open		= ctcm_open,
+	.ndo_stop		= ctcm_close,
+	.ndo_get_stats		= ctcm_stats,
+	.ndo_change_mtu	   	= ctcm_change_mtu,
+	.ndo_start_xmit		= ctcmpc_tx,
+};
+
+static void ctcm_dev_setup(struct net_device *dev)
+{
+	dev->type = ARPHRD_SLIP;
+	dev->tx_queue_len = 100;
+	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+	dev->min_mtu = 576;
+	dev->max_mtu = 65527;
+}
+
+/*
+ * Initialize everything of the net device except the name and the
+ * channel structs.
+ */
+static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
+{
+	struct net_device *dev;
+	struct mpc_group *grp;
+	if (!priv)
+		return NULL;
+
+	if (IS_MPC(priv))
+		dev = alloc_netdev(0, MPC_DEVICE_GENE, NET_NAME_UNKNOWN,
+				   ctcm_dev_setup);
+	else
+		dev = alloc_netdev(0, CTC_DEVICE_GENE, NET_NAME_UNKNOWN,
+				   ctcm_dev_setup);
+
+	if (!dev) {
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
+			"%s: MEMORY allocation ERROR",
+			CTCM_FUNTAIL);
+		return NULL;
+	}
+	dev->ml_priv = priv;
+	priv->fsm = init_fsm("ctcmdev", dev_state_names, dev_event_names,
+				CTCM_NR_DEV_STATES, CTCM_NR_DEV_EVENTS,
+				dev_fsm, dev_fsm_len, GFP_KERNEL);
+	if (priv->fsm == NULL) {
+		CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
+		free_netdev(dev);
+		return NULL;
+	}
+	fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
+	fsm_settimer(priv->fsm, &priv->restart_timer);
+
+	if (IS_MPC(priv)) {
+		/*  MPC Group Initializations  */
+		grp = ctcmpc_init_mpc_group(priv);
+		if (grp == NULL) {
+			MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
+			free_netdev(dev);
+			return NULL;
+		}
+		tasklet_init(&grp->mpc_tasklet2,
+				mpc_group_ready, (unsigned long)dev);
+		dev->mtu = MPC_BUFSIZE_DEFAULT -
+				TH_HEADER_LENGTH - PDU_HEADER_LENGTH;
+
+		dev->netdev_ops = &ctcm_mpc_netdev_ops;
+		dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
+		priv->buffer_size = MPC_BUFSIZE_DEFAULT;
+	} else {
+		dev->mtu = CTCM_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
+		dev->netdev_ops = &ctcm_netdev_ops;
+		dev->hard_header_len = LL_HEADER_LENGTH + 2;
+	}
+
+	CTCMY_DBF_DEV(SETUP, dev, "finished");
+
+	return dev;
+}
+
+/**
+ * Main IRQ handler.
+ *
+ *  cdev	The ccw_device the interrupt is for.
+ *  intparm	interruption parameter.
+ *  irb		interruption response block.
+ */
+static void ctcm_irq_handler(struct ccw_device *cdev,
+				unsigned long intparm, struct irb *irb)
+{
+	struct channel		*ch;
+	struct net_device	*dev;
+	struct ctcm_priv	*priv;
+	struct ccwgroup_device	*cgdev;
+	int cstat;
+	int dstat;
+
+	CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+		"Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev));
+
+	if (ctcm_check_irb_error(cdev, irb))
+		return;
+
+	cgdev = dev_get_drvdata(&cdev->dev);
+
+	cstat = irb->scsw.cmd.cstat;
+	dstat = irb->scsw.cmd.dstat;
+
+	/* Check for unsolicited interrupts. */
+	if (cgdev == NULL) {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_ERROR,
+			"%s(%s) unsolicited irq: c-%02x d-%02x\n",
+			CTCM_FUNTAIL, dev_name(&cdev->dev), cstat, dstat);
+		dev_warn(&cdev->dev,
+			"The adapter received a non-specific IRQ\n");
+		return;
+	}
+
+	priv = dev_get_drvdata(&cgdev->dev);
+
+	/* Try to extract channel from driver data. */
+	if (priv->channel[CTCM_READ]->cdev == cdev)
+		ch = priv->channel[CTCM_READ];
+	else if (priv->channel[CTCM_WRITE]->cdev == cdev)
+		ch = priv->channel[CTCM_WRITE];
+	else {
+		dev_err(&cdev->dev,
+			"%s: Internal error: Can't determine channel for "
+			"interrupt device %s\n",
+			__func__, dev_name(&cdev->dev));
+			/* Explain: inconsistent internal structures */
+		return;
+	}
+
+	dev = ch->netdev;
+	if (dev == NULL) {
+		dev_err(&cdev->dev,
+			"%s Internal error: net_device is NULL, ch = 0x%p\n",
+			__func__, ch);
+			/* Explain: inconsistent internal structures */
+		return;
+	}
+
+	/* Copy interruption response block. */
+	memcpy(ch->irb, irb, sizeof(struct irb));
+
+	/* Issue error message and return on subchannel error code */
+	if (irb->scsw.cmd.cstat) {
+		fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch);
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+			"%s(%s): sub-ch check %s: cs=%02x ds=%02x",
+				CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat);
+		dev_warn(&cdev->dev,
+				"A check occurred on the subchannel\n");
+		return;
+	}
+
+	/* Check the reason-code of a unit check */
+	if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+		if ((irb->ecw[0] & ch->sense_rc) == 0)
+			/* print it only once */
+			CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+				"%s(%s): sense=%02x, ds=%02x",
+				CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat);
+		ccw_unit_check(ch, irb->ecw[0]);
+		return;
+	}
+	if (irb->scsw.cmd.dstat & DEV_STAT_BUSY) {
+		if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
+			fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch);
+		else
+			fsm_event(ch->fsm, CTC_EVENT_BUSY, ch);
+		return;
+	}
+	if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+		fsm_event(ch->fsm, CTC_EVENT_ATTN, ch);
+		return;
+	}
+	if ((irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
+	    (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
+	    (irb->scsw.cmd.stctl ==
+	     (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
+		fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch);
+	else
+		fsm_event(ch->fsm, CTC_EVENT_IRQ, ch);
+
+}
+
+static const struct device_type ctcm_devtype = {
+	.name = "ctcm",
+	.groups = ctcm_attr_groups,
+};
+
+/**
+ * Add ctcm specific attributes.
+ * Add ctcm private data.
+ *
+ *  cgdev	pointer to ccwgroup_device just added
+ *
+ * returns 0 on success, !0 on failure.
+ */
+static int ctcm_probe_device(struct ccwgroup_device *cgdev)
+{
+	struct ctcm_priv *priv;
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+			"%s %p",
+			__func__, cgdev);
+
+	if (!get_device(&cgdev->dev))
+		return -ENODEV;
+
+	priv = kzalloc(sizeof(struct ctcm_priv), GFP_KERNEL);
+	if (!priv) {
+		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+			"%s: memory allocation failure",
+			CTCM_FUNTAIL);
+		put_device(&cgdev->dev);
+		return -ENOMEM;
+	}
+	priv->buffer_size = CTCM_BUFSIZE_DEFAULT;
+	cgdev->cdev[0]->handler = ctcm_irq_handler;
+	cgdev->cdev[1]->handler = ctcm_irq_handler;
+	dev_set_drvdata(&cgdev->dev, priv);
+	cgdev->dev.type = &ctcm_devtype;
+
+	return 0;
+}
+
+/**
+ * Add a new channel to the list of channels.
+ * Keeps the channel list sorted.
+ *
+ *  cdev	The ccw_device to be added.
+ *  type	The type class of the new channel.
+ *  priv	Points to the private data of the ccwgroup_device.
+ *
+ * returns 0 on success, !0 on error.
+ */
+static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
+				struct ctcm_priv *priv)
+{
+	struct channel **c = &channels;
+	struct channel *ch;
+	int ccw_num;
+	int rc = 0;
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+		"%s(%s), type %d, proto %d",
+			__func__, dev_name(&cdev->dev),	type, priv->protocol);
+
+	ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
+	if (ch == NULL)
+		return -ENOMEM;
+
+	ch->protocol = priv->protocol;
+	if (IS_MPC(priv)) {
+		ch->discontact_th = kzalloc(TH_HEADER_LENGTH, gfp_type());
+		if (ch->discontact_th == NULL)
+					goto nomem_return;
+
+		ch->discontact_th->th_blk_flag = TH_DISCONTACT;
+		tasklet_init(&ch->ch_disc_tasklet,
+			mpc_action_send_discontact, (unsigned long)ch);
+
+		tasklet_init(&ch->ch_tasklet, ctcmpc_bh, (unsigned long)ch);
+		ch->max_bufsize = (MPC_BUFSIZE_DEFAULT - 35);
+		ccw_num = 17;
+	} else
+		ccw_num = 8;
+
+	ch->ccw = kcalloc(ccw_num, sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+	if (ch->ccw == NULL)
+					goto nomem_return;
+
+	ch->cdev = cdev;
+	snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev->dev));
+	ch->type = type;
+
+	/**
+	 * "static" ccws are used in the following way:
+	 *
+	 * ccw[0..2] (Channel program for generic I/O):
+	 *           0: prepare
+	 *           1: read or write (depending on direction) with fixed
+	 *              buffer (idal allocated once when buffer is allocated)
+	 *           2: nop
+	 * ccw[3..5] (Channel program for direct write of packets)
+	 *           3: prepare
+	 *           4: write (idal allocated on every write).
+	 *           5: nop
+	 * ccw[6..7] (Channel program for initial channel setup):
+	 *           6: set extended mode
+	 *           7: nop
+	 *
+	 * ch->ccw[0..5] are initialized in ch_action_start because
+	 * the channel's direction is yet unknown here.
+	 *
+	 * ccws used for xid2 negotiations
+	 *  ch-ccw[8-14] need to be used for the XID exchange either
+	 *    X side XID2 Processing
+	 *       8:  write control
+	 *       9:  write th
+	 *	     10: write XID
+	 *	     11: read th from secondary
+	 *	     12: read XID   from secondary
+	 *	     13: read 4 byte ID
+	 *	     14: nop
+	 *    Y side XID Processing
+	 *	     8:  sense
+	 *       9:  read th
+	 *	     10: read XID
+	 *	     11: write th
+	 *	     12: write XID
+	 *	     13: write 4 byte ID
+	 *	     14: nop
+	 *
+	 *  ccws used for double noop due to VM timing issues
+	 *  which result in unrecoverable Busy on channel
+	 *       15: nop
+	 *       16: nop
+	 */
+	ch->ccw[6].cmd_code	= CCW_CMD_SET_EXTENDED;
+	ch->ccw[6].flags	= CCW_FLAG_SLI;
+
+	ch->ccw[7].cmd_code	= CCW_CMD_NOOP;
+	ch->ccw[7].flags	= CCW_FLAG_SLI;
+
+	if (IS_MPC(priv)) {
+		ch->ccw[15].cmd_code = CCW_CMD_WRITE;
+		ch->ccw[15].flags    = CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[15].count    = TH_HEADER_LENGTH;
+		ch->ccw[15].cda      = virt_to_phys(ch->discontact_th);
+
+		ch->ccw[16].cmd_code = CCW_CMD_NOOP;
+		ch->ccw[16].flags    = CCW_FLAG_SLI;
+
+		ch->fsm = init_fsm(ch->id, ctc_ch_state_names,
+				ctc_ch_event_names, CTC_MPC_NR_STATES,
+				CTC_MPC_NR_EVENTS, ctcmpc_ch_fsm,
+				mpc_ch_fsm_len, GFP_KERNEL);
+	} else {
+		ch->fsm = init_fsm(ch->id, ctc_ch_state_names,
+				ctc_ch_event_names, CTC_NR_STATES,
+				CTC_NR_EVENTS, ch_fsm,
+				ch_fsm_len, GFP_KERNEL);
+	}
+	if (ch->fsm == NULL)
+				goto nomem_return;
+
+	fsm_newstate(ch->fsm, CTC_STATE_IDLE);
+
+	ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
+	if (ch->irb == NULL)
+				goto nomem_return;
+
+	while (*c && ctcm_less_than((*c)->id, ch->id))
+		c = &(*c)->next;
+
+	if (*c && (!strncmp((*c)->id, ch->id, CTCM_ID_SIZE))) {
+		CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+				"%s (%s) already in list, using old entry",
+				__func__, (*c)->id);
+
+				goto free_return;
+	}
+
+	spin_lock_init(&ch->collect_lock);
+
+	fsm_settimer(ch->fsm, &ch->timer);
+	skb_queue_head_init(&ch->io_queue);
+	skb_queue_head_init(&ch->collect_queue);
+
+	if (IS_MPC(priv)) {
+		fsm_settimer(ch->fsm, &ch->sweep_timer);
+		skb_queue_head_init(&ch->sweep_queue);
+	}
+	ch->next = *c;
+	*c = ch;
+	return 0;
+
+nomem_return:
+	rc = -ENOMEM;
+
+free_return:	/* note that all channel pointers are 0 or valid */
+	kfree(ch->ccw);
+	kfree(ch->discontact_th);
+	kfree_fsm(ch->fsm);
+	kfree(ch->irb);
+	kfree(ch);
+	return rc;
+}
+
+/*
+ * Return type of a detected device.
+ */
+static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id)
+{
+	enum ctcm_channel_types type;
+	type = (enum ctcm_channel_types)id->driver_info;
+
+	if (type == ctcm_channel_type_ficon)
+		type = ctcm_channel_type_escon;
+
+	return type;
+}
+
+/**
+ *
+ * Setup an interface.
+ *
+ *  cgdev	Device to be setup.
+ *
+ * returns 0 on success, !0 on failure.
+ */
+static int ctcm_new_device(struct ccwgroup_device *cgdev)
+{
+	char read_id[CTCM_ID_SIZE];
+	char write_id[CTCM_ID_SIZE];
+	int direction;
+	enum ctcm_channel_types type;
+	struct ctcm_priv *priv;
+	struct net_device *dev;
+	struct ccw_device *cdev0;
+	struct ccw_device *cdev1;
+	struct channel *readc;
+	struct channel *writec;
+	int ret;
+	int result;
+
+	priv = dev_get_drvdata(&cgdev->dev);
+	if (!priv) {
+		result = -ENODEV;
+		goto out_err_result;
+	}
+
+	cdev0 = cgdev->cdev[0];
+	cdev1 = cgdev->cdev[1];
+
+	type = get_channel_type(&cdev0->id);
+
+	snprintf(read_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev0->dev));
+	snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev));
+
+	ret = add_channel(cdev0, type, priv);
+	if (ret) {
+		result = ret;
+		goto out_err_result;
+	}
+	ret = add_channel(cdev1, type, priv);
+	if (ret) {
+		result = ret;
+		goto out_remove_channel1;
+	}
+
+	ret = ccw_device_set_online(cdev0);
+	if (ret != 0) {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+			"%s(%s) set_online rc=%d",
+				CTCM_FUNTAIL, read_id, ret);
+		result = -EIO;
+		goto out_remove_channel2;
+	}
+
+	ret = ccw_device_set_online(cdev1);
+	if (ret != 0) {
+		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+			"%s(%s) set_online rc=%d",
+				CTCM_FUNTAIL, write_id, ret);
+
+		result = -EIO;
+		goto out_ccw1;
+	}
+
+	dev = ctcm_init_netdevice(priv);
+	if (dev == NULL) {
+		result = -ENODEV;
+		goto out_ccw2;
+	}
+
+	for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
+		priv->channel[direction] =
+			channel_get(type, direction == CTCM_READ ?
+				read_id : write_id, direction);
+		if (priv->channel[direction] == NULL) {
+			if (direction == CTCM_WRITE)
+				channel_free(priv->channel[CTCM_READ]);
+			goto out_dev;
+		}
+		priv->channel[direction]->netdev = dev;
+		priv->channel[direction]->protocol = priv->protocol;
+		priv->channel[direction]->max_bufsize = priv->buffer_size;
+	}
+	/* sysfs magic */
+	SET_NETDEV_DEV(dev, &cgdev->dev);
+
+	if (register_netdev(dev)) {
+		result = -ENODEV;
+		goto out_dev;
+	}
+
+	strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
+
+	dev_info(&dev->dev,
+		"setup OK : r/w = %s/%s, protocol : %d\n",
+			priv->channel[CTCM_READ]->id,
+			priv->channel[CTCM_WRITE]->id, priv->protocol);
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+		"setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name,
+			priv->channel[CTCM_READ]->id,
+			priv->channel[CTCM_WRITE]->id, priv->protocol);
+
+	return 0;
+out_dev:
+	ctcm_free_netdevice(dev);
+out_ccw2:
+	ccw_device_set_offline(cgdev->cdev[1]);
+out_ccw1:
+	ccw_device_set_offline(cgdev->cdev[0]);
+out_remove_channel2:
+	readc = channel_get(type, read_id, CTCM_READ);
+	channel_remove(readc);
+out_remove_channel1:
+	writec = channel_get(type, write_id, CTCM_WRITE);
+	channel_remove(writec);
+out_err_result:
+	return result;
+}
+
+/**
+ * Shutdown an interface.
+ *
+ *  cgdev	Device to be shut down.
+ *
+ * returns 0 on success, !0 on failure.
+ */
+static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
+{
+	struct ctcm_priv *priv;
+	struct net_device *dev;
+
+	priv = dev_get_drvdata(&cgdev->dev);
+	if (!priv)
+		return -ENODEV;
+
+	if (priv->channel[CTCM_READ]) {
+		dev = priv->channel[CTCM_READ]->netdev;
+		CTCM_DBF_DEV(SETUP, dev, "");
+		/* Close the device */
+		ctcm_close(dev);
+		dev->flags &= ~IFF_RUNNING;
+		channel_free(priv->channel[CTCM_READ]);
+	} else
+		dev = NULL;
+
+	if (priv->channel[CTCM_WRITE])
+		channel_free(priv->channel[CTCM_WRITE]);
+
+	if (dev) {
+		unregister_netdev(dev);
+		ctcm_free_netdevice(dev);
+	}
+
+	if (priv->fsm)
+		kfree_fsm(priv->fsm);
+
+	ccw_device_set_offline(cgdev->cdev[1]);
+	ccw_device_set_offline(cgdev->cdev[0]);
+	channel_remove(priv->channel[CTCM_READ]);
+	channel_remove(priv->channel[CTCM_WRITE]);
+	priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL;
+
+	return 0;
+
+}
+
+
+static void ctcm_remove_device(struct ccwgroup_device *cgdev)
+{
+	struct ctcm_priv *priv = dev_get_drvdata(&cgdev->dev);
+
+	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+			"removing device %p, proto : %d",
+			cgdev, priv->protocol);
+
+	if (cgdev->state == CCWGROUP_ONLINE)
+		ctcm_shutdown_device(cgdev);
+	dev_set_drvdata(&cgdev->dev, NULL);
+	kfree(priv);
+	put_device(&cgdev->dev);
+}
+
+static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
+{
+	struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
+
+	if (gdev->state == CCWGROUP_OFFLINE)
+		return 0;
+	netif_device_detach(priv->channel[CTCM_READ]->netdev);
+	ctcm_close(priv->channel[CTCM_READ]->netdev);
+	if (!wait_event_timeout(priv->fsm->wait_q,
+	    fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
+		netif_device_attach(priv->channel[CTCM_READ]->netdev);
+		return -EBUSY;
+	}
+	ccw_device_set_offline(gdev->cdev[1]);
+	ccw_device_set_offline(gdev->cdev[0]);
+	return 0;
+}
+
+static int ctcm_pm_resume(struct ccwgroup_device *gdev)
+{
+	struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
+	int rc;
+
+	if (gdev->state == CCWGROUP_OFFLINE)
+		return 0;
+	rc = ccw_device_set_online(gdev->cdev[1]);
+	if (rc)
+		goto err_out;
+	rc = ccw_device_set_online(gdev->cdev[0]);
+	if (rc)
+		goto err_out;
+	ctcm_open(priv->channel[CTCM_READ]->netdev);
+err_out:
+	netif_device_attach(priv->channel[CTCM_READ]->netdev);
+	return rc;
+}
+
+static struct ccw_device_id ctcm_ids[] = {
+	{CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel},
+	{CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon},
+	{CCW_DEVICE(0x3088, 0x1f), .driver_info = ctcm_channel_type_escon},
+	{},
+};
+MODULE_DEVICE_TABLE(ccw, ctcm_ids);
+
+static struct ccw_driver ctcm_ccw_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "ctcm",
+	},
+	.ids	= ctcm_ids,
+	.probe	= ccwgroup_probe_ccwdev,
+	.remove	= ccwgroup_remove_ccwdev,
+	.int_class = IRQIO_CTC,
+};
+
+static struct ccwgroup_driver ctcm_group_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= CTC_DRIVER_NAME,
+	},
+	.ccw_driver  = &ctcm_ccw_driver,
+	.setup	     = ctcm_probe_device,
+	.remove      = ctcm_remove_device,
+	.set_online  = ctcm_new_device,
+	.set_offline = ctcm_shutdown_device,
+	.freeze	     = ctcm_pm_suspend,
+	.thaw	     = ctcm_pm_resume,
+	.restore     = ctcm_pm_resume,
+};
+
+static ssize_t group_store(struct device_driver *ddrv, const char *buf,
+			   size_t count)
+{
+	int err;
+
+	err = ccwgroup_create_dev(ctcm_root_dev, &ctcm_group_driver, 2, buf);
+	return err ? err : count;
+}
+static DRIVER_ATTR_WO(group);
+
+static struct attribute *ctcm_drv_attrs[] = {
+	&driver_attr_group.attr,
+	NULL,
+};
+static struct attribute_group ctcm_drv_attr_group = {
+	.attrs = ctcm_drv_attrs,
+};
+static const struct attribute_group *ctcm_drv_attr_groups[] = {
+	&ctcm_drv_attr_group,
+	NULL,
+};
+
+/*
+ * Module related routines
+ */
+
+/*
+ * Prepare to be unloaded. Free IRQ's and release all resources.
+ * This is called just before this module is unloaded. It is
+ * not called, if the usage count is !0, so we don't need to check
+ * for that.
+ */
+static void __exit ctcm_exit(void)
+{
+	ccwgroup_driver_unregister(&ctcm_group_driver);
+	ccw_driver_unregister(&ctcm_ccw_driver);
+	root_device_unregister(ctcm_root_dev);
+	ctcm_unregister_dbf_views();
+	pr_info("CTCM driver unloaded\n");
+}
+
+/*
+ * Print Banner.
+ */
+static void print_banner(void)
+{
+	pr_info("CTCM driver initialized\n");
+}
+
+/**
+ * Initialize module.
+ * This is called just after the module is loaded.
+ *
+ * returns 0 on success, !0 on error.
+ */
+static int __init ctcm_init(void)
+{
+	int ret;
+
+	channels = NULL;
+
+	ret = ctcm_register_dbf_views();
+	if (ret)
+		goto out_err;
+	ctcm_root_dev = root_device_register("ctcm");
+	ret = PTR_ERR_OR_ZERO(ctcm_root_dev);
+	if (ret)
+		goto register_err;
+	ret = ccw_driver_register(&ctcm_ccw_driver);
+	if (ret)
+		goto ccw_err;
+	ctcm_group_driver.driver.groups = ctcm_drv_attr_groups;
+	ret = ccwgroup_driver_register(&ctcm_group_driver);
+	if (ret)
+		goto ccwgroup_err;
+	print_banner();
+	return 0;
+
+ccwgroup_err:
+	ccw_driver_unregister(&ctcm_ccw_driver);
+ccw_err:
+	root_device_unregister(ctcm_root_dev);
+register_err:
+	ctcm_unregister_dbf_views();
+out_err:
+	pr_err("%s / Initializing the ctcm device driver failed, ret = %d\n",
+		__func__, ret);
+	return ret;
+}
+
+module_init(ctcm_init);
+module_exit(ctcm_exit);
+
+MODULE_AUTHOR("Peter Tiedemann <ptiedem@de.ibm.com>");
+MODULE_DESCRIPTION("Network driver for S/390 CTC + CTCMPC (SNA)");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
new file mode 100644
index 0000000..16bdf23
--- /dev/null
+++ b/drivers/s390/net/ctcm_main.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *	Copyright IBM Corp. 2001, 2007
+ *	Authors:	Fritz Elfert (felfert@millenux.com)
+ *			Peter Tiedemann (ptiedem@de.ibm.com)
+ */
+
+#ifndef _CTCM_MAIN_H_
+#define _CTCM_MAIN_H_
+
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include "fsm.h"
+#include "ctcm_dbug.h"
+#include "ctcm_mpc.h"
+
+#define CTC_DRIVER_NAME	"ctcm"
+#define CTC_DEVICE_NAME	"ctc"
+#define MPC_DEVICE_NAME	"mpc"
+#define CTC_DEVICE_GENE CTC_DEVICE_NAME "%d"
+#define MPC_DEVICE_GENE	MPC_DEVICE_NAME "%d"
+
+#define CHANNEL_FLAGS_READ	0
+#define CHANNEL_FLAGS_WRITE	1
+#define CHANNEL_FLAGS_INUSE	2
+#define CHANNEL_FLAGS_BUFSIZE_CHANGED	4
+#define CHANNEL_FLAGS_FAILED	8
+#define CHANNEL_FLAGS_WAITIRQ	16
+#define CHANNEL_FLAGS_RWMASK	1
+#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
+
+#define LOG_FLAG_ILLEGALPKT	1
+#define LOG_FLAG_ILLEGALSIZE	2
+#define LOG_FLAG_OVERRUN	4
+#define LOG_FLAG_NOMEM		8
+
+#define ctcm_pr_debug(fmt, arg...) printk(KERN_DEBUG fmt, ##arg)
+
+#define CTCM_PR_DEBUG(fmt, arg...) \
+	do { \
+		if (do_debug) \
+			printk(KERN_DEBUG fmt, ##arg); \
+	} while (0)
+
+#define	CTCM_PR_DBGDATA(fmt, arg...) \
+	do { \
+		if (do_debug_data) \
+			printk(KERN_DEBUG fmt, ##arg); \
+	} while (0)
+
+#define	CTCM_D3_DUMP(buf, len) \
+	do { \
+		if (do_debug_data) \
+			ctcmpc_dumpit(buf, len); \
+	} while (0)
+
+#define	CTCM_CCW_DUMP(buf, len) \
+	do { \
+		if (do_debug_ccw) \
+			ctcmpc_dumpit(buf, len); \
+	} while (0)
+
+/**
+ * Enum for classifying detected devices
+ */
+enum ctcm_channel_types {
+	/* Device is not a channel  */
+	ctcm_channel_type_none,
+
+	/* Device is a CTC/A */
+	ctcm_channel_type_parallel,
+
+	/* Device is a FICON channel */
+	ctcm_channel_type_ficon,
+
+	/* Device is a ESCON channel */
+	ctcm_channel_type_escon
+};
+
+/*
+ * CCW commands, used in this driver.
+ */
+#define CCW_CMD_WRITE		0x01
+#define CCW_CMD_READ		0x02
+#define CCW_CMD_NOOP		0x03
+#define CCW_CMD_TIC             0x08
+#define CCW_CMD_SENSE_CMD	0x14
+#define CCW_CMD_WRITE_CTL	0x17
+#define CCW_CMD_SET_EXTENDED	0xc3
+#define CCW_CMD_PREPARE		0xe3
+
+#define CTCM_PROTO_S390		0
+#define CTCM_PROTO_LINUX	1
+#define CTCM_PROTO_LINUX_TTY	2
+#define CTCM_PROTO_OS390	3
+#define CTCM_PROTO_MPC		4
+#define CTCM_PROTO_MAX		4
+
+#define CTCM_BUFSIZE_LIMIT	65535
+#define CTCM_BUFSIZE_DEFAULT	32768
+#define MPC_BUFSIZE_DEFAULT	CTCM_BUFSIZE_LIMIT
+
+#define CTCM_TIME_1_SEC		1000
+#define CTCM_TIME_5_SEC		5000
+#define CTCM_TIME_10_SEC	10000
+
+#define CTCM_INITIAL_BLOCKLEN	2
+
+#define CTCM_READ		0
+#define CTCM_WRITE		1
+
+#define CTCM_ID_SIZE		20+3
+
+struct ctcm_profile {
+	unsigned long maxmulti;
+	unsigned long maxcqueue;
+	unsigned long doios_single;
+	unsigned long doios_multi;
+	unsigned long txlen;
+	unsigned long tx_time;
+	unsigned long send_stamp;
+};
+
+/*
+ * Definition of one channel
+ */
+struct channel {
+	struct channel *next;
+	char id[CTCM_ID_SIZE];
+	struct ccw_device *cdev;
+	/*
+	 * Type of this channel.
+	 * CTC/A or Escon for valid channels.
+	 */
+	enum ctcm_channel_types type;
+	/*
+	 * Misc. flags. See CHANNEL_FLAGS_... below
+	 */
+	__u32 flags;
+	__u16 protocol;		/* protocol of this channel (4 = MPC) */
+	/*
+	 * I/O and irq related stuff
+	 */
+	struct ccw1 *ccw;
+	struct irb *irb;
+	/*
+	 * RX/TX buffer size
+	 */
+	int max_bufsize;
+	struct sk_buff *trans_skb;	/* transmit/receive buffer */
+	struct sk_buff_head io_queue;	/* universal I/O queue */
+	struct tasklet_struct ch_tasklet;	/* MPC ONLY */
+	/*
+	 * TX queue for collecting skb's during busy.
+	 */
+	struct sk_buff_head collect_queue;
+	/*
+	 * Amount of data in collect_queue.
+	 */
+	int collect_len;
+	/*
+	 * spinlock for collect_queue and collect_len
+	 */
+	spinlock_t collect_lock;
+	/*
+	 * Timer for detecting unresposive
+	 * I/O operations.
+	 */
+	fsm_timer timer;
+	/* MPC ONLY section begin */
+	__u32	th_seq_num;	/* SNA TH seq number */
+	__u8	th_seg;
+	__u32	pdu_seq;
+	struct sk_buff		*xid_skb;
+	char			*xid_skb_data;
+	struct th_header	*xid_th;
+	struct xid2		*xid;
+	char			*xid_id;
+	struct th_header	*rcvd_xid_th;
+	struct xid2		*rcvd_xid;
+	char			*rcvd_xid_id;
+	__u8			in_mpcgroup;
+	fsm_timer		sweep_timer;
+	struct sk_buff_head	sweep_queue;
+	struct th_header	*discontact_th;
+	struct tasklet_struct	ch_disc_tasklet;
+	/* MPC ONLY section end */
+
+	int retry;		/* retry counter for misc. operations */
+	fsm_instance *fsm;	/* finite state machine of this channel */
+	struct net_device *netdev;	/* corresponding net_device */
+	struct ctcm_profile prof;
+	__u8 *trans_skb_data;
+	__u16 logflags;
+	__u8  sense_rc; /* last unit check sense code report control */
+};
+
+struct ctcm_priv {
+	struct net_device_stats	stats;
+	unsigned long	tbusy;
+
+	/* The MPC group struct of this interface */
+	struct	mpc_group	*mpcg;	/* MPC only */
+	struct	xid2		*xid;	/* MPC only */
+
+	/* The finite state machine of this interface */
+	fsm_instance *fsm;
+
+	/* The protocol of this device */
+	__u16 protocol;
+
+	/* Timer for restarting after I/O Errors */
+	fsm_timer	restart_timer;
+
+	int buffer_size;	/* ctc only */
+
+	struct channel *channel[2];
+};
+
+int ctcm_open(struct net_device *dev);
+int ctcm_close(struct net_device *dev);
+
+extern const struct attribute_group *ctcm_attr_groups[];
+
+/*
+ * Compatibility macros for busy handling
+ * of network devices.
+ */
+static inline void ctcm_clear_busy_do(struct net_device *dev)
+{
+	clear_bit(0, &(((struct ctcm_priv *)dev->ml_priv)->tbusy));
+	netif_wake_queue(dev);
+}
+
+static inline void ctcm_clear_busy(struct net_device *dev)
+{
+	struct mpc_group *grp;
+	grp = ((struct ctcm_priv *)dev->ml_priv)->mpcg;
+
+	if (!(grp && grp->in_sweep))
+		ctcm_clear_busy_do(dev);
+}
+
+
+static inline int ctcm_test_and_set_busy(struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	return test_and_set_bit(0,
+			&(((struct ctcm_priv *)dev->ml_priv)->tbusy));
+}
+
+extern int loglevel;
+extern struct channel *channels;
+
+void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb);
+
+/*
+ * Functions related to setup and device detection.
+ */
+
+static inline int ctcm_less_than(char *id1, char *id2)
+{
+	unsigned long dev1, dev2;
+
+	id1 = id1 + 5;
+	id2 = id2 + 5;
+
+	dev1 = simple_strtoul(id1, &id1, 16);
+	dev2 = simple_strtoul(id2, &id2, 16);
+
+	return (dev1 < dev2);
+}
+
+int ctcm_ch_alloc_buffer(struct channel *ch);
+
+static inline int ctcm_checkalloc_buffer(struct channel *ch)
+{
+	if (ch->trans_skb == NULL)
+		return ctcm_ch_alloc_buffer(ch);
+	if (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED) {
+		dev_kfree_skb(ch->trans_skb);
+		return ctcm_ch_alloc_buffer(ch);
+	}
+	return 0;
+}
+
+struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
+
+/* test if protocol attribute (of struct ctcm_priv or struct channel)
+ * has MPC protocol setting. Type is not checked
+ */
+#define IS_MPC(p) ((p)->protocol == CTCM_PROTO_MPC)
+
+/* test if struct ctcm_priv of struct net_device has MPC protocol setting */
+#define IS_MPCDEV(dev) IS_MPC((struct ctcm_priv *)dev->ml_priv)
+
+static inline gfp_t gfp_type(void)
+{
+	return in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+}
+
+/*
+ * Definition of our link level header.
+ */
+struct ll_header {
+	__u16 length;
+	__u16 type;
+	__u16 unused;
+};
+#define LL_HEADER_LENGTH (sizeof(struct ll_header))
+
+#endif
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
new file mode 100644
index 0000000..e02f295
--- /dev/null
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -0,0 +1,2155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *	Copyright IBM Corp. 2004, 2007
+ *	Authors:	Belinda Thompson (belindat@us.ibm.com)
+ *			Andy Richter (richtera@us.ibm.com)
+ *			Peter Tiedemann (ptiedem@de.ibm.com)
+ */
+
+/*
+	This module exports functions to be used by CCS:
+	EXPORT_SYMBOL(ctc_mpc_alloc_channel);
+	EXPORT_SYMBOL(ctc_mpc_establish_connectivity);
+	EXPORT_SYMBOL(ctc_mpc_dealloc_ch);
+	EXPORT_SYMBOL(ctc_mpc_flow_control);
+*/
+
+#undef DEBUG
+#undef DEBUGDATA
+#undef DEBUGCCW
+
+#define KMSG_COMPONENT "ctcm"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+#include <linux/proc_fs.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <linux/netdevice.h>
+#include <net/dst.h>
+
+#include <linux/io.h>		/* instead of <asm/io.h> ok ? */
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <linux/bitops.h>	/* instead of <asm/bitops.h> ok ? */
+#include <linux/uaccess.h>	/* instead of <asm/uaccess.h> ok ? */
+#include <linux/wait.h>
+#include <linux/moduleparam.h>
+#include <asm/idals.h>
+
+#include "ctcm_main.h"
+#include "ctcm_mpc.h"
+#include "ctcm_fsms.h"
+
+static const struct xid2 init_xid = {
+	.xid2_type_id	=	XID_FM2,
+	.xid2_len	=	0x45,
+	.xid2_adj_id	=	0,
+	.xid2_rlen	=	0x31,
+	.xid2_resv1	=	0,
+	.xid2_flag1	=	0,
+	.xid2_fmtt	=	0,
+	.xid2_flag4	=	0x80,
+	.xid2_resv2	=	0,
+	.xid2_tgnum	=	0,
+	.xid2_sender_id	=	0,
+	.xid2_flag2	=	0,
+	.xid2_option	=	XID2_0,
+	.xid2_resv3	=	"\x00",
+	.xid2_resv4	=	0,
+	.xid2_dlc_type	=	XID2_READ_SIDE,
+	.xid2_resv5	=	0,
+	.xid2_mpc_flag	=	0,
+	.xid2_resv6	=	0,
+	.xid2_buf_len	=	(MPC_BUFSIZE_DEFAULT - 35),
+};
+
+static const struct th_header thnorm = {
+	.th_seg		=	0x00,
+	.th_ch_flag	=	TH_IS_XID,
+	.th_blk_flag	=	TH_DATA_IS_XID,
+	.th_is_xid	=	0x01,
+	.th_seq_num	=	0x00000000,
+};
+
+static const struct th_header thdummy = {
+	.th_seg		=	0x00,
+	.th_ch_flag	=	0x00,
+	.th_blk_flag	=	TH_DATA_IS_XID,
+	.th_is_xid	=	0x01,
+	.th_seq_num	=	0x00000000,
+};
+
+/*
+ * Definition of one MPC group
+ */
+
+/*
+ * Compatibility macros for busy handling
+ * of network devices.
+ */
+
+static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb);
+
+/*
+ * MPC Group state machine actions (static prototypes)
+ */
+static void mpc_action_nop(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg);
+static void mpc_action_timeout(fsm_instance *fi, int event, void *arg);
+static int  mpc_validate_xid(struct mpcg_info *mpcginfo);
+static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg);
+
+#ifdef DEBUGDATA
+/*-------------------------------------------------------------------*
+* Dump buffer format						     *
+*								     *
+*--------------------------------------------------------------------*/
+void ctcmpc_dumpit(char *buf, int len)
+{
+	__u32	ct, sw, rm, dup;
+	char	*ptr, *rptr;
+	char	tbuf[82], tdup[82];
+	char	addr[22];
+	char	boff[12];
+	char	bhex[82], duphex[82];
+	char	basc[40];
+
+	sw  = 0;
+	rptr = ptr = buf;
+	rm  = 16;
+	duphex[0] = 0x00;
+	dup = 0;
+
+	for (ct = 0; ct < len; ct++, ptr++, rptr++) {
+		if (sw == 0) {
+			sprintf(addr, "%16.16llx", (__u64)rptr);
+
+			sprintf(boff, "%4.4X", (__u32)ct);
+			bhex[0] = '\0';
+			basc[0] = '\0';
+		}
+		if ((sw == 4) || (sw == 12))
+			strcat(bhex, " ");
+		if (sw == 8)
+			strcat(bhex, "	");
+
+		sprintf(tbuf, "%2.2llX", (__u64)*ptr);
+
+		tbuf[2] = '\0';
+		strcat(bhex, tbuf);
+		if ((0 != isprint(*ptr)) && (*ptr >= 0x20))
+			basc[sw] = *ptr;
+		else
+			basc[sw] = '.';
+
+		basc[sw+1] = '\0';
+		sw++;
+		rm--;
+		if (sw != 16)
+			continue;
+		if ((strcmp(duphex, bhex)) != 0) {
+			if (dup != 0) {
+				sprintf(tdup,
+					"Duplicate as above to %s", addr);
+				ctcm_pr_debug("		       --- %s ---\n",
+						tdup);
+			}
+			ctcm_pr_debug("   %s (+%s) : %s  [%s]\n",
+					addr, boff, bhex, basc);
+			dup = 0;
+			strcpy(duphex, bhex);
+		} else
+			dup++;
+
+		sw = 0;
+		rm = 16;
+	}  /* endfor */
+
+	if (sw != 0) {
+		for ( ; rm > 0; rm--, sw++) {
+			if ((sw == 4) || (sw == 12))
+				strcat(bhex, " ");
+			if (sw == 8)
+				strcat(bhex, "	");
+			strcat(bhex, "	");
+			strcat(basc, " ");
+		}
+		if (dup != 0) {
+			sprintf(tdup, "Duplicate as above to %s", addr);
+			ctcm_pr_debug("		       --- %s ---\n", tdup);
+		}
+		ctcm_pr_debug("   %s (+%s) : %s  [%s]\n",
+					addr, boff, bhex, basc);
+	} else {
+		if (dup >= 1) {
+			sprintf(tdup, "Duplicate as above to %s", addr);
+			ctcm_pr_debug("		       --- %s ---\n", tdup);
+		}
+		if (dup != 0) {
+			ctcm_pr_debug("   %s (+%s) : %s  [%s]\n",
+				addr, boff, bhex, basc);
+		}
+	}
+
+	return;
+
+}   /*	 end of ctcmpc_dumpit  */
+#endif
+
+#ifdef DEBUGDATA
+/*
+ * Dump header and first 16 bytes of an sk_buff for debugging purposes.
+ *
+ * skb		The sk_buff to dump.
+ * offset	Offset relative to skb-data, where to start the dump.
+ */
+void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
+{
+	__u8 *p = skb->data;
+	struct th_header *header;
+	struct pdu *pheader;
+	int bl = skb->len;
+	int i;
+
+	if (p == NULL)
+		return;
+
+	p += offset;
+	header = (struct th_header *)p;
+
+	ctcm_pr_debug("dump:\n");
+	ctcm_pr_debug("skb len=%d \n", skb->len);
+	if (skb->len > 2) {
+		switch (header->th_ch_flag) {
+		case TH_HAS_PDU:
+			break;
+		case 0x00:
+		case TH_IS_XID:
+			if ((header->th_blk_flag == TH_DATA_IS_XID) &&
+			   (header->th_is_xid == 0x01))
+				goto dumpth;
+		case TH_SWEEP_REQ:
+				goto dumpth;
+		case TH_SWEEP_RESP:
+				goto dumpth;
+		default:
+			break;
+		}
+
+		pheader = (struct pdu *)p;
+		ctcm_pr_debug("pdu->offset: %d hex: %04x\n",
+			       pheader->pdu_offset, pheader->pdu_offset);
+		ctcm_pr_debug("pdu->flag  : %02x\n", pheader->pdu_flag);
+		ctcm_pr_debug("pdu->proto : %02x\n", pheader->pdu_proto);
+		ctcm_pr_debug("pdu->seq   : %02x\n", pheader->pdu_seq);
+					goto dumpdata;
+
+dumpth:
+		ctcm_pr_debug("th->seg     : %02x\n", header->th_seg);
+		ctcm_pr_debug("th->ch      : %02x\n", header->th_ch_flag);
+		ctcm_pr_debug("th->blk_flag: %02x\n", header->th_blk_flag);
+		ctcm_pr_debug("th->type    : %s\n",
+			       (header->th_is_xid) ? "DATA" : "XID");
+		ctcm_pr_debug("th->seqnum  : %04x\n", header->th_seq_num);
+
+	}
+dumpdata:
+	if (bl > 32)
+		bl = 32;
+	ctcm_pr_debug("data: ");
+	for (i = 0; i < bl; i++)
+		ctcm_pr_debug("%02x%s", *p++, (i % 16) ? " " : "\n");
+	ctcm_pr_debug("\n");
+}
+#endif
+
+static struct net_device *ctcmpc_get_dev(int port_num)
+{
+	char device[20];
+	struct net_device *dev;
+	struct ctcm_priv *priv;
+
+	sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num);
+
+	dev = __dev_get_by_name(&init_net, device);
+
+	if (dev == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s: Device not found by name: %s",
+					CTCM_FUNTAIL, device);
+		return NULL;
+	}
+	priv = dev->ml_priv;
+	if (priv == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): dev->ml_priv is NULL",
+					CTCM_FUNTAIL, device);
+		return NULL;
+	}
+	if (priv->mpcg == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): priv->mpcg is NULL",
+					CTCM_FUNTAIL, device);
+		return NULL;
+	}
+	return dev;
+}
+
+/*
+ * ctc_mpc_alloc_channel
+ *	(exported interface)
+ *
+ * Device Initialization :
+ *	ACTPATH  driven IO operations
+ */
+int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int))
+{
+	struct net_device *dev;
+	struct mpc_group *grp;
+	struct ctcm_priv *priv;
+
+	dev = ctcmpc_get_dev(port_num);
+	if (dev == NULL)
+		return 1;
+	priv = dev->ml_priv;
+	grp = priv->mpcg;
+
+	grp->allochanfunc = callback;
+	grp->port_num = port_num;
+	grp->port_persist = 1;
+
+	CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
+			"%s(%s): state=%s",
+			CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm));
+
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_INOP:
+		/* Group is in the process of terminating */
+		grp->alloc_called = 1;
+		break;
+	case MPCG_STATE_RESET:
+		/* MPC Group will transition to state		  */
+		/* MPCG_STATE_XID2INITW iff the minimum number	  */
+		/* of 1 read and 1 write channel have successfully*/
+		/* activated					  */
+		/*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/
+		if (callback)
+			grp->send_qllc_disc = 1;
+	case MPCG_STATE_XID0IOWAIT:
+		fsm_deltimer(&grp->timer);
+		grp->outstanding_xid2 = 0;
+		grp->outstanding_xid7 = 0;
+		grp->outstanding_xid7_p2 = 0;
+		grp->saved_xid2 = NULL;
+		if (callback)
+			ctcm_open(dev);
+		fsm_event(priv->fsm, DEV_EVENT_START, dev);
+		break;
+	case MPCG_STATE_READY:
+		/* XID exchanges completed after PORT was activated */
+		/* Link station already active			    */
+		/* Maybe timing issue...retry callback		    */
+		grp->allocchan_callback_retries++;
+		if (grp->allocchan_callback_retries < 4) {
+			if (grp->allochanfunc)
+				grp->allochanfunc(grp->port_num,
+						  grp->group_max_buflen);
+		} else {
+			/* there are problems...bail out	    */
+			/* there may be a state mismatch so restart */
+			fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+			grp->allocchan_callback_retries = 0;
+		}
+		break;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ctc_mpc_alloc_channel);
+
+/*
+ * ctc_mpc_establish_connectivity
+ *	(exported interface)
+ */
+void ctc_mpc_establish_connectivity(int port_num,
+				void (*callback)(int, int, int))
+{
+	struct net_device *dev;
+	struct mpc_group *grp;
+	struct ctcm_priv *priv;
+	struct channel *rch, *wch;
+
+	dev = ctcmpc_get_dev(port_num);
+	if (dev == NULL)
+		return;
+	priv = dev->ml_priv;
+	grp = priv->mpcg;
+	rch = priv->channel[CTCM_READ];
+	wch = priv->channel[CTCM_WRITE];
+
+	CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
+			"%s(%s): state=%s",
+			CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm));
+
+	grp->estconnfunc = callback;
+	grp->port_num = port_num;
+
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_READY:
+		/* XID exchanges completed after PORT was activated */
+		/* Link station already active			    */
+		/* Maybe timing issue...retry callback		    */
+		fsm_deltimer(&grp->timer);
+		grp->estconn_callback_retries++;
+		if (grp->estconn_callback_retries < 4) {
+			if (grp->estconnfunc) {
+				grp->estconnfunc(grp->port_num, 0,
+						grp->group_max_buflen);
+				grp->estconnfunc = NULL;
+			}
+		} else {
+			/* there are problems...bail out	 */
+			fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+			grp->estconn_callback_retries = 0;
+		}
+		break;
+	case MPCG_STATE_INOP:
+	case MPCG_STATE_RESET:
+		/* MPC Group is not ready to start XID - min num of */
+		/* 1 read and 1 write channel have not been acquired*/
+
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): REJECTED - inactive channels",
+					CTCM_FUNTAIL, dev->name);
+		if (grp->estconnfunc) {
+			grp->estconnfunc(grp->port_num, -1, 0);
+			grp->estconnfunc = NULL;
+		}
+		break;
+	case MPCG_STATE_XID2INITW:
+		/* alloc channel was called but no XID exchange    */
+		/* has occurred. initiate xside XID exchange	   */
+		/* make sure yside XID0 processing has not started */
+
+		if ((fsm_getstate(rch->fsm) > CH_XID0_PENDING) ||
+			(fsm_getstate(wch->fsm) > CH_XID0_PENDING)) {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): ABORT - PASSIVE XID",
+					CTCM_FUNTAIL, dev->name);
+			break;
+		}
+		grp->send_qllc_disc = 1;
+		fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIT);
+		fsm_deltimer(&grp->timer);
+		fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
+						MPCG_EVENT_TIMER, dev);
+		grp->outstanding_xid7 = 0;
+		grp->outstanding_xid7_p2 = 0;
+		grp->saved_xid2 = NULL;
+		if ((rch->in_mpcgroup) &&
+				(fsm_getstate(rch->fsm) == CH_XID0_PENDING))
+			fsm_event(grp->fsm, MPCG_EVENT_XID0DO, rch);
+		else {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): RX-%s not ready for ACTIVE XID0",
+					CTCM_FUNTAIL, dev->name, rch->id);
+			if (grp->estconnfunc) {
+				grp->estconnfunc(grp->port_num, -1, 0);
+				grp->estconnfunc = NULL;
+			}
+			fsm_deltimer(&grp->timer);
+				goto done;
+		}
+		if ((wch->in_mpcgroup) &&
+				(fsm_getstate(wch->fsm) == CH_XID0_PENDING))
+			fsm_event(grp->fsm, MPCG_EVENT_XID0DO, wch);
+		else {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): WX-%s not ready for ACTIVE XID0",
+					CTCM_FUNTAIL, dev->name, wch->id);
+			if (grp->estconnfunc) {
+				grp->estconnfunc(grp->port_num, -1, 0);
+				grp->estconnfunc = NULL;
+			}
+			fsm_deltimer(&grp->timer);
+				goto done;
+			}
+		break;
+	case MPCG_STATE_XID0IOWAIT:
+		/* already in active XID negotiations */
+	default:
+		break;
+	}
+
+done:
+	CTCM_PR_DEBUG("Exit %s()\n", __func__);
+	return;
+}
+EXPORT_SYMBOL(ctc_mpc_establish_connectivity);
+
+/*
+ * ctc_mpc_dealloc_ch
+ *	(exported interface)
+ */
+void ctc_mpc_dealloc_ch(int port_num)
+{
+	struct net_device *dev;
+	struct ctcm_priv *priv;
+	struct mpc_group *grp;
+
+	dev = ctcmpc_get_dev(port_num);
+	if (dev == NULL)
+		return;
+	priv = dev->ml_priv;
+	grp = priv->mpcg;
+
+	CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG,
+			"%s: %s: refcount = %d\n",
+			CTCM_FUNTAIL, dev->name, netdev_refcnt_read(dev));
+
+	fsm_deltimer(&priv->restart_timer);
+	grp->channels_terminating = 0;
+	fsm_deltimer(&grp->timer);
+	grp->allochanfunc = NULL;
+	grp->estconnfunc = NULL;
+	grp->port_persist = 0;
+	grp->send_qllc_disc = 0;
+	fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+
+	ctcm_close(dev);
+	return;
+}
+EXPORT_SYMBOL(ctc_mpc_dealloc_ch);
+
+/*
+ * ctc_mpc_flow_control
+ *	(exported interface)
+ */
+void ctc_mpc_flow_control(int port_num, int flowc)
+{
+	struct ctcm_priv *priv;
+	struct mpc_group *grp;
+	struct net_device *dev;
+	struct channel *rch;
+	int mpcg_state;
+
+	dev = ctcmpc_get_dev(port_num);
+	if (dev == NULL)
+		return;
+	priv = dev->ml_priv;
+	grp = priv->mpcg;
+
+	CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
+			"%s: %s: flowc = %d",
+				CTCM_FUNTAIL, dev->name, flowc);
+
+	rch = priv->channel[CTCM_READ];
+
+	mpcg_state = fsm_getstate(grp->fsm);
+	switch (flowc) {
+	case 1:
+		if (mpcg_state == MPCG_STATE_FLOWC)
+			break;
+		if (mpcg_state == MPCG_STATE_READY) {
+			if (grp->flow_off_called == 1)
+				grp->flow_off_called = 0;
+			else
+				fsm_newstate(grp->fsm, MPCG_STATE_FLOWC);
+			break;
+		}
+		break;
+	case 0:
+		if (mpcg_state == MPCG_STATE_FLOWC) {
+			fsm_newstate(grp->fsm, MPCG_STATE_READY);
+			/* ensure any data that has accumulated */
+			/* on the io_queue will now be sen t	*/
+			tasklet_schedule(&rch->ch_tasklet);
+		}
+		/* possible race condition			*/
+		if (mpcg_state == MPCG_STATE_READY) {
+			grp->flow_off_called = 1;
+			break;
+		}
+		break;
+	}
+
+}
+EXPORT_SYMBOL(ctc_mpc_flow_control);
+
+static int mpc_send_qllc_discontact(struct net_device *);
+
+/*
+ * helper function of ctcmpc_unpack_skb
+*/
+static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
+{
+	struct channel	  *rch = mpcginfo->ch;
+	struct net_device *dev = rch->netdev;
+	struct ctcm_priv   *priv = dev->ml_priv;
+	struct mpc_group  *grp = priv->mpcg;
+	struct channel	  *ch = priv->channel[CTCM_WRITE];
+
+	CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id);
+	CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
+
+	grp->sweep_rsp_pend_num--;
+
+	if ((grp->sweep_req_pend_num == 0) &&
+			(grp->sweep_rsp_pend_num == 0)) {
+		fsm_deltimer(&ch->sweep_timer);
+		grp->in_sweep = 0;
+		rch->th_seq_num = 0x00;
+		ch->th_seq_num = 0x00;
+		ctcm_clear_busy_do(dev);
+	}
+
+	kfree(mpcginfo);
+
+	return;
+
+}
+
+/*
+ * helper function of mpc_rcvd_sweep_req
+ * which is a helper of ctcmpc_unpack_skb
+ */
+static void ctcmpc_send_sweep_resp(struct channel *rch)
+{
+	struct net_device *dev = rch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct mpc_group *grp = priv->mpcg;
+	struct th_sweep *header;
+	struct sk_buff *sweep_skb;
+	struct channel *ch  = priv->channel[CTCM_WRITE];
+
+	CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id);
+
+	sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA);
+	if (sweep_skb == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): sweep_skb allocation ERROR\n",
+			CTCM_FUNTAIL, rch->id);
+		goto done;
+	}
+
+	header = kmalloc(sizeof(struct th_sweep), gfp_type());
+
+	if (!header) {
+		dev_kfree_skb_any(sweep_skb);
+		goto done;
+	}
+
+	header->th.th_seg	= 0x00 ;
+	header->th.th_ch_flag	= TH_SWEEP_RESP;
+	header->th.th_blk_flag	= 0x00;
+	header->th.th_is_xid	= 0x00;
+	header->th.th_seq_num	= 0x00;
+	header->sw.th_last_seq	= ch->th_seq_num;
+
+	skb_put_data(sweep_skb, header, TH_SWEEP_LENGTH);
+
+	kfree(header);
+
+	netif_trans_update(dev);
+	skb_queue_tail(&ch->sweep_queue, sweep_skb);
+
+	fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
+
+	return;
+
+done:
+	grp->in_sweep = 0;
+	ctcm_clear_busy_do(dev);
+	fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+
+	return;
+}
+
+/*
+ * helper function of ctcmpc_unpack_skb
+ */
+static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
+{
+	struct channel	  *rch     = mpcginfo->ch;
+	struct net_device *dev     = rch->netdev;
+	struct ctcm_priv  *priv = dev->ml_priv;
+	struct mpc_group  *grp  = priv->mpcg;
+	struct channel	  *ch	   = priv->channel[CTCM_WRITE];
+
+	if (do_debug)
+		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
+			" %s(): ch=0x%p id=%s\n", __func__, ch, ch->id);
+
+	if (grp->in_sweep == 0) {
+		grp->in_sweep = 1;
+		ctcm_test_and_set_busy(dev);
+		grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
+		grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
+	}
+
+	CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
+
+	grp->sweep_req_pend_num--;
+	ctcmpc_send_sweep_resp(ch);
+	kfree(mpcginfo);
+	return;
+}
+
+/*
+  * MPC Group Station FSM definitions
+ */
+static const char *mpcg_event_names[] = {
+	[MPCG_EVENT_INOP]	= "INOP Condition",
+	[MPCG_EVENT_DISCONC]	= "Discontact Received",
+	[MPCG_EVENT_XID0DO]	= "Channel Active - Start XID",
+	[MPCG_EVENT_XID2]	= "XID2 Received",
+	[MPCG_EVENT_XID2DONE]	= "XID0 Complete",
+	[MPCG_EVENT_XID7DONE]	= "XID7 Complete",
+	[MPCG_EVENT_TIMER]	= "XID Setup Timer",
+	[MPCG_EVENT_DOIO]	= "XID DoIO",
+};
+
+static const char *mpcg_state_names[] = {
+	[MPCG_STATE_RESET]	= "Reset",
+	[MPCG_STATE_INOP]	= "INOP",
+	[MPCG_STATE_XID2INITW]	= "Passive XID- XID0 Pending Start",
+	[MPCG_STATE_XID2INITX]	= "Passive XID- XID0 Pending Complete",
+	[MPCG_STATE_XID7INITW]	= "Passive XID- XID7 Pending P1 Start",
+	[MPCG_STATE_XID7INITX]	= "Passive XID- XID7 Pending P2 Complete",
+	[MPCG_STATE_XID0IOWAIT]	= "Active  XID- XID0 Pending Start",
+	[MPCG_STATE_XID0IOWAIX]	= "Active  XID- XID0 Pending Complete",
+	[MPCG_STATE_XID7INITI]	= "Active  XID- XID7 Pending Start",
+	[MPCG_STATE_XID7INITZ]	= "Active  XID- XID7 Pending Complete ",
+	[MPCG_STATE_XID7INITF]	= "XID        - XID7 Complete ",
+	[MPCG_STATE_FLOWC]	= "FLOW CONTROL ON",
+	[MPCG_STATE_READY]	= "READY",
+};
+
+/*
+ * The MPC Group Station FSM
+ *   22 events
+ */
+static const fsm_node mpcg_fsm[] = {
+	{ MPCG_STATE_RESET,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_INOP,	MPCG_EVENT_INOP,	mpc_action_nop        },
+	{ MPCG_STATE_FLOWC,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+
+	{ MPCG_STATE_READY,	MPCG_EVENT_DISCONC,	mpc_action_discontact },
+	{ MPCG_STATE_READY,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+
+	{ MPCG_STATE_XID2INITW,	MPCG_EVENT_XID0DO,	mpc_action_doxid0     },
+	{ MPCG_STATE_XID2INITW,	MPCG_EVENT_XID2,	mpc_action_rcvd_xid0  },
+	{ MPCG_STATE_XID2INITW,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID2INITW,	MPCG_EVENT_TIMER,	mpc_action_timeout    },
+	{ MPCG_STATE_XID2INITW,	MPCG_EVENT_DOIO,	mpc_action_yside_xid  },
+
+	{ MPCG_STATE_XID2INITX,	MPCG_EVENT_XID0DO,	mpc_action_doxid0     },
+	{ MPCG_STATE_XID2INITX,	MPCG_EVENT_XID2,	mpc_action_rcvd_xid0  },
+	{ MPCG_STATE_XID2INITX,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID2INITX,	MPCG_EVENT_TIMER,	mpc_action_timeout    },
+	{ MPCG_STATE_XID2INITX,	MPCG_EVENT_DOIO,	mpc_action_yside_xid  },
+
+	{ MPCG_STATE_XID7INITW,	MPCG_EVENT_XID2DONE,	mpc_action_doxid7     },
+	{ MPCG_STATE_XID7INITW,	MPCG_EVENT_DISCONC,	mpc_action_discontact },
+	{ MPCG_STATE_XID7INITW,	MPCG_EVENT_XID2,	mpc_action_rcvd_xid7  },
+	{ MPCG_STATE_XID7INITW,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID7INITW,	MPCG_EVENT_TIMER,	mpc_action_timeout    },
+	{ MPCG_STATE_XID7INITW,	MPCG_EVENT_XID7DONE,	mpc_action_doxid7     },
+	{ MPCG_STATE_XID7INITW,	MPCG_EVENT_DOIO,	mpc_action_yside_xid  },
+
+	{ MPCG_STATE_XID7INITX,	MPCG_EVENT_DISCONC,	mpc_action_discontact },
+	{ MPCG_STATE_XID7INITX,	MPCG_EVENT_XID2,	mpc_action_rcvd_xid7  },
+	{ MPCG_STATE_XID7INITX,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID7INITX,	MPCG_EVENT_XID7DONE,	mpc_action_doxid7     },
+	{ MPCG_STATE_XID7INITX,	MPCG_EVENT_TIMER,	mpc_action_timeout    },
+	{ MPCG_STATE_XID7INITX,	MPCG_EVENT_DOIO,	mpc_action_yside_xid  },
+
+	{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID0DO,	mpc_action_doxid0     },
+	{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DISCONC,	mpc_action_discontact },
+	{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID2,	mpc_action_rcvd_xid0  },
+	{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_TIMER,	mpc_action_timeout    },
+	{ MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DOIO,	mpc_action_xside_xid  },
+
+	{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID0DO,	mpc_action_doxid0     },
+	{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DISCONC,	mpc_action_discontact },
+	{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID2,	mpc_action_rcvd_xid0  },
+	{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_TIMER,	mpc_action_timeout    },
+	{ MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DOIO,	mpc_action_xside_xid  },
+
+	{ MPCG_STATE_XID7INITI,	MPCG_EVENT_XID2DONE,	mpc_action_doxid7     },
+	{ MPCG_STATE_XID7INITI,	MPCG_EVENT_XID2,	mpc_action_rcvd_xid7  },
+	{ MPCG_STATE_XID7INITI,	MPCG_EVENT_DISCONC,	mpc_action_discontact },
+	{ MPCG_STATE_XID7INITI,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID7INITI,	MPCG_EVENT_TIMER,	mpc_action_timeout    },
+	{ MPCG_STATE_XID7INITI,	MPCG_EVENT_XID7DONE,	mpc_action_doxid7     },
+	{ MPCG_STATE_XID7INITI,	MPCG_EVENT_DOIO,	mpc_action_xside_xid  },
+
+	{ MPCG_STATE_XID7INITZ,	MPCG_EVENT_XID2,	mpc_action_rcvd_xid7  },
+	{ MPCG_STATE_XID7INITZ,	MPCG_EVENT_XID7DONE,	mpc_action_doxid7     },
+	{ MPCG_STATE_XID7INITZ,	MPCG_EVENT_DISCONC,	mpc_action_discontact },
+	{ MPCG_STATE_XID7INITZ,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID7INITZ,	MPCG_EVENT_TIMER,	mpc_action_timeout    },
+	{ MPCG_STATE_XID7INITZ,	MPCG_EVENT_DOIO,	mpc_action_xside_xid  },
+
+	{ MPCG_STATE_XID7INITF,	MPCG_EVENT_INOP,	mpc_action_go_inop    },
+	{ MPCG_STATE_XID7INITF,	MPCG_EVENT_XID7DONE,	mpc_action_go_ready   },
+};
+
+static int mpcg_fsm_len = ARRAY_SIZE(mpcg_fsm);
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg)
+{
+	struct net_device *dev = arg;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct mpc_group *grp = priv->mpcg;
+
+	if (grp == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): No MPC group",
+				CTCM_FUNTAIL, dev->name);
+		return;
+	}
+
+	fsm_deltimer(&grp->timer);
+
+	if (grp->saved_xid2->xid2_flag2 == 0x40) {
+		priv->xid->xid2_flag2 = 0x00;
+		if (grp->estconnfunc) {
+			grp->estconnfunc(grp->port_num, 1,
+					grp->group_max_buflen);
+			grp->estconnfunc = NULL;
+		} else if (grp->allochanfunc)
+			grp->send_qllc_disc = 1;
+
+		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): fails",
+					CTCM_FUNTAIL, dev->name);
+		return;
+	}
+
+	grp->port_persist = 1;
+	grp->out_of_sequence = 0;
+	grp->estconn_called = 0;
+
+	tasklet_hi_schedule(&grp->mpc_tasklet2);
+
+	return;
+}
+
+/*
+ * helper of ctcm_init_netdevice
+ * CTCM_PROTO_MPC only
+ */
+void mpc_group_ready(unsigned long adev)
+{
+	struct net_device *dev = (struct net_device *)adev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct mpc_group *grp = priv->mpcg;
+	struct channel *ch = NULL;
+
+	if (grp == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): No MPC group",
+				CTCM_FUNTAIL, dev->name);
+		return;
+	}
+
+	CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
+		"%s: %s: GROUP TRANSITIONED TO READY, maxbuf = %d\n",
+			CTCM_FUNTAIL, dev->name, grp->group_max_buflen);
+
+	fsm_newstate(grp->fsm, MPCG_STATE_READY);
+
+	/* Put up a read on the channel */
+	ch = priv->channel[CTCM_READ];
+	ch->pdu_seq = 0;
+	CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" ,
+			__func__, ch->pdu_seq);
+
+	ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch);
+	/* Put the write channel in idle state */
+	ch = priv->channel[CTCM_WRITE];
+	if (ch->collect_len > 0) {
+		spin_lock(&ch->collect_lock);
+		ctcm_purge_skb_queue(&ch->collect_queue);
+		ch->collect_len = 0;
+		spin_unlock(&ch->collect_lock);
+	}
+	ctcm_chx_txidle(ch->fsm, CTC_EVENT_START, ch);
+	ctcm_clear_busy(dev);
+
+	if (grp->estconnfunc) {
+		grp->estconnfunc(grp->port_num, 0,
+				    grp->group_max_buflen);
+		grp->estconnfunc = NULL;
+	} else 	if (grp->allochanfunc)
+		grp->allochanfunc(grp->port_num, grp->group_max_buflen);
+
+	grp->send_qllc_disc = 1;
+	grp->changed_side = 0;
+
+	return;
+
+}
+
+/*
+ * Increment the MPC Group Active Channel Counts
+ * helper of dev_action (called from channel fsm)
+ */
+void mpc_channel_action(struct channel *ch, int direction, int action)
+{
+	struct net_device  *dev  = ch->netdev;
+	struct ctcm_priv   *priv = dev->ml_priv;
+	struct mpc_group   *grp  = priv->mpcg;
+
+	if (grp == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): No MPC group",
+				CTCM_FUNTAIL, dev->name);
+		return;
+	}
+
+	CTCM_PR_DEBUG("enter %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
+
+	CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
+		"%s: %i / Grp:%s total_channels=%i, active_channels: "
+		"read=%i, write=%i\n", __func__, action,
+		fsm_getstate_str(grp->fsm), grp->num_channel_paths,
+		grp->active_channels[CTCM_READ],
+		grp->active_channels[CTCM_WRITE]);
+
+	if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) {
+		grp->num_channel_paths++;
+		grp->active_channels[direction]++;
+		grp->outstanding_xid2++;
+		ch->in_mpcgroup = 1;
+
+		if (ch->xid_skb != NULL)
+			dev_kfree_skb_any(ch->xid_skb);
+
+		ch->xid_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT,
+					GFP_ATOMIC | GFP_DMA);
+		if (ch->xid_skb == NULL) {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): Couldn't alloc ch xid_skb\n",
+				CTCM_FUNTAIL, dev->name);
+			fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+			return;
+		}
+		ch->xid_skb_data = ch->xid_skb->data;
+		ch->xid_th = (struct th_header *)ch->xid_skb->data;
+		skb_put(ch->xid_skb, TH_HEADER_LENGTH);
+		ch->xid = (struct xid2 *)skb_tail_pointer(ch->xid_skb);
+		skb_put(ch->xid_skb, XID2_LENGTH);
+		ch->xid_id = skb_tail_pointer(ch->xid_skb);
+		ch->xid_skb->data = ch->xid_skb_data;
+		skb_reset_tail_pointer(ch->xid_skb);
+		ch->xid_skb->len = 0;
+
+		skb_put_data(ch->xid_skb, grp->xid_skb->data,
+			     grp->xid_skb->len);
+
+		ch->xid->xid2_dlc_type =
+			((CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
+				? XID2_READ_SIDE : XID2_WRITE_SIDE);
+
+		if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE)
+			ch->xid->xid2_buf_len = 0x00;
+
+		ch->xid_skb->data = ch->xid_skb_data;
+		skb_reset_tail_pointer(ch->xid_skb);
+		ch->xid_skb->len = 0;
+
+		fsm_newstate(ch->fsm, CH_XID0_PENDING);
+
+		if ((grp->active_channels[CTCM_READ] > 0) &&
+		    (grp->active_channels[CTCM_WRITE] > 0) &&
+			(fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
+			fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
+			CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
+				"%s: %s: MPC GROUP CHANNELS ACTIVE\n",
+						__func__, dev->name);
+		}
+	} else if ((action == MPC_CHANNEL_REMOVE) &&
+			(ch->in_mpcgroup == 1)) {
+		ch->in_mpcgroup = 0;
+		grp->num_channel_paths--;
+		grp->active_channels[direction]--;
+
+		if (ch->xid_skb != NULL)
+			dev_kfree_skb_any(ch->xid_skb);
+		ch->xid_skb = NULL;
+
+		if (grp->channels_terminating)
+					goto done;
+
+		if (((grp->active_channels[CTCM_READ] == 0) &&
+					(grp->active_channels[CTCM_WRITE] > 0))
+			|| ((grp->active_channels[CTCM_WRITE] == 0) &&
+					(grp->active_channels[CTCM_READ] > 0)))
+			fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+	}
+done:
+	CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
+		"exit %s: %i / Grp:%s total_channels=%i, active_channels: "
+		"read=%i, write=%i\n", __func__, action,
+		fsm_getstate_str(grp->fsm), grp->num_channel_paths,
+		grp->active_channels[CTCM_READ],
+		grp->active_channels[CTCM_WRITE]);
+
+	CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
+}
+
+/**
+ * Unpack a just received skb and hand it over to
+ * upper layers.
+ * special MPC version of unpack_skb.
+ *
+ * ch		The channel where this skb has been received.
+ * pskb		The received skb.
+ */
+static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
+{
+	struct net_device *dev	= ch->netdev;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct mpc_group *grp = priv->mpcg;
+	struct pdu *curr_pdu;
+	struct mpcg_info *mpcginfo;
+	struct th_header *header = NULL;
+	struct th_sweep *sweep = NULL;
+	int pdu_last_seen = 0;
+	__u32 new_len;
+	struct sk_buff *skb;
+	int skblen;
+	int sendrc = 0;
+
+	CTCM_PR_DEBUG("ctcmpc enter: %s() %s cp:%i ch:%s\n",
+			__func__, dev->name, smp_processor_id(), ch->id);
+
+	header = (struct th_header *)pskb->data;
+	if ((header->th_seg == 0) &&
+		(header->th_ch_flag == 0) &&
+		(header->th_blk_flag == 0) &&
+		(header->th_seq_num == 0))
+		/* nothing for us */	goto done;
+
+	CTCM_PR_DBGDATA("%s: th_header\n", __func__);
+	CTCM_D3_DUMP((char *)header, TH_HEADER_LENGTH);
+	CTCM_PR_DBGDATA("%s: pskb len: %04x \n", __func__, pskb->len);
+
+	pskb->dev = dev;
+	pskb->ip_summed = CHECKSUM_UNNECESSARY;
+	skb_pull(pskb, TH_HEADER_LENGTH);
+
+	if (likely(header->th_ch_flag == TH_HAS_PDU)) {
+		CTCM_PR_DBGDATA("%s: came into th_has_pdu\n", __func__);
+		if ((fsm_getstate(grp->fsm) == MPCG_STATE_FLOWC) ||
+		   ((fsm_getstate(grp->fsm) == MPCG_STATE_READY) &&
+		    (header->th_seq_num != ch->th_seq_num + 1) &&
+		    (ch->th_seq_num != 0))) {
+			/* This is NOT the next segment		*
+			 * we are not the correct race winner	*
+			 * go away and let someone else win	*
+			 * BUT..this only applies if xid negot	*
+			 * is done				*
+			*/
+			grp->out_of_sequence += 1;
+			__skb_push(pskb, TH_HEADER_LENGTH);
+			skb_queue_tail(&ch->io_queue, pskb);
+			CTCM_PR_DBGDATA("%s: th_seq_num expect:%08x "
+					"got:%08x\n", __func__,
+				ch->th_seq_num + 1, header->th_seq_num);
+
+			return;
+		}
+		grp->out_of_sequence = 0;
+		ch->th_seq_num = header->th_seq_num;
+
+		CTCM_PR_DBGDATA("ctcmpc: %s() FromVTAM_th_seq=%08x\n",
+					__func__, ch->th_seq_num);
+
+		if (unlikely(fsm_getstate(grp->fsm) != MPCG_STATE_READY))
+					goto done;
+		while ((pskb->len > 0) && !pdu_last_seen) {
+			curr_pdu = (struct pdu *)pskb->data;
+
+			CTCM_PR_DBGDATA("%s: pdu_header\n", __func__);
+			CTCM_D3_DUMP((char *)pskb->data, PDU_HEADER_LENGTH);
+			CTCM_PR_DBGDATA("%s: pskb len: %04x \n",
+						__func__, pskb->len);
+
+			skb_pull(pskb, PDU_HEADER_LENGTH);
+
+			if (curr_pdu->pdu_flag & PDU_LAST)
+				pdu_last_seen = 1;
+			if (curr_pdu->pdu_flag & PDU_CNTL)
+				pskb->protocol = htons(ETH_P_SNAP);
+			else
+				pskb->protocol = htons(ETH_P_SNA_DIX);
+
+			if ((pskb->len <= 0) || (pskb->len > ch->max_bufsize)) {
+				CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+					"%s(%s): Dropping packet with "
+					"illegal siize %d",
+					CTCM_FUNTAIL, dev->name, pskb->len);
+
+				priv->stats.rx_dropped++;
+				priv->stats.rx_length_errors++;
+					goto done;
+			}
+			skb_reset_mac_header(pskb);
+			new_len = curr_pdu->pdu_offset;
+			CTCM_PR_DBGDATA("%s: new_len: %04x \n",
+						__func__, new_len);
+			if ((new_len == 0) || (new_len > pskb->len)) {
+				/* should never happen		    */
+				/* pskb len must be hosed...bail out */
+				CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+					"%s(%s): non valid pdu_offset: %04x",
+					/* "data may be lost", */
+					CTCM_FUNTAIL, dev->name, new_len);
+				goto done;
+			}
+			skb = __dev_alloc_skb(new_len+4, GFP_ATOMIC);
+
+			if (!skb) {
+				CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+					"%s(%s): MEMORY allocation error",
+						CTCM_FUNTAIL, dev->name);
+				priv->stats.rx_dropped++;
+				fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+						goto done;
+			}
+			skb_put_data(skb, pskb->data, new_len);
+
+			skb_reset_mac_header(skb);
+			skb->dev = pskb->dev;
+			skb->protocol = pskb->protocol;
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+			*((__u32 *) skb_push(skb, 4)) = ch->pdu_seq;
+			ch->pdu_seq++;
+
+			if (do_debug_data) {
+				ctcm_pr_debug("%s: ToDCM_pdu_seq= %08x\n",
+						__func__, ch->pdu_seq);
+				ctcm_pr_debug("%s: skb:%0lx "
+					"skb len: %d \n", __func__,
+					(unsigned long)skb, skb->len);
+				ctcm_pr_debug("%s: up to 32 bytes "
+					"of pdu_data sent\n", __func__);
+				ctcmpc_dump32((char *)skb->data, skb->len);
+			}
+
+			skblen = skb->len;
+			sendrc = netif_rx(skb);
+			priv->stats.rx_packets++;
+			priv->stats.rx_bytes += skblen;
+			skb_pull(pskb, new_len); /* point to next PDU */
+		}
+	} else {
+		mpcginfo = kmalloc(sizeof(struct mpcg_info), gfp_type());
+		if (mpcginfo == NULL)
+					goto done;
+
+		mpcginfo->ch = ch;
+		mpcginfo->th = header;
+		mpcginfo->skb = pskb;
+		CTCM_PR_DEBUG("%s: Not PDU - may be control pkt\n",
+					__func__);
+		/*  it's a sweep?   */
+		sweep = (struct th_sweep *)pskb->data;
+		mpcginfo->sweep = sweep;
+		if (header->th_ch_flag == TH_SWEEP_REQ)
+			mpc_rcvd_sweep_req(mpcginfo);
+		else if (header->th_ch_flag == TH_SWEEP_RESP)
+			mpc_rcvd_sweep_resp(mpcginfo);
+		else if (header->th_blk_flag == TH_DATA_IS_XID) {
+			struct xid2 *thisxid = (struct xid2 *)pskb->data;
+			skb_pull(pskb, XID2_LENGTH);
+			mpcginfo->xid = thisxid;
+			fsm_event(grp->fsm, MPCG_EVENT_XID2, mpcginfo);
+		} else if (header->th_blk_flag == TH_DISCONTACT)
+			fsm_event(grp->fsm, MPCG_EVENT_DISCONC, mpcginfo);
+		else if (header->th_seq_num != 0) {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): control pkt expected\n",
+						CTCM_FUNTAIL, dev->name);
+			priv->stats.rx_dropped++;
+			/* mpcginfo only used for non-data transfers */
+			kfree(mpcginfo);
+			if (do_debug_data)
+				ctcmpc_dump_skb(pskb, -8);
+		}
+	}
+done:
+
+	dev_kfree_skb_any(pskb);
+	if (sendrc == NET_RX_DROP) {
+		dev_warn(&dev->dev,
+			"The network backlog for %s is exceeded, "
+			"package dropped\n", __func__);
+		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+	}
+
+	CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n",
+			__func__, dev->name, ch, ch->id);
+}
+
+/**
+ * tasklet helper for mpc's skb unpacking.
+ *
+ * ch		The channel to work on.
+ * Allow flow control back pressure to occur here.
+ * Throttling back channel can result in excessive
+ * channel inactivity and system deact of channel
+ */
+void ctcmpc_bh(unsigned long thischan)
+{
+	struct channel	  *ch	= (struct channel *)thischan;
+	struct sk_buff	  *skb;
+	struct net_device *dev	= ch->netdev;
+	struct ctcm_priv  *priv	= dev->ml_priv;
+	struct mpc_group  *grp	= priv->mpcg;
+
+	CTCM_PR_DEBUG("%s cp:%i enter:  %s() %s\n",
+	       dev->name, smp_processor_id(), __func__, ch->id);
+	/* caller has requested driver to throttle back */
+	while ((fsm_getstate(grp->fsm) != MPCG_STATE_FLOWC) &&
+			(skb = skb_dequeue(&ch->io_queue))) {
+		ctcmpc_unpack_skb(ch, skb);
+		if (grp->out_of_sequence > 20) {
+			/* assume data loss has occurred if */
+			/* missing seq_num for extended     */
+			/* period of time		    */
+			grp->out_of_sequence = 0;
+			fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+			break;
+		}
+		if (skb == skb_peek(&ch->io_queue))
+			break;
+	}
+	CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n",
+			__func__, dev->name, ch, ch->id);
+	return;
+}
+
+/*
+ *  MPC Group Initializations
+ */
+struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv)
+{
+	struct mpc_group *grp;
+
+	CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
+			"Enter %s(%p)", CTCM_FUNTAIL, priv);
+
+	grp = kzalloc(sizeof(struct mpc_group), GFP_KERNEL);
+	if (grp == NULL)
+		return NULL;
+
+	grp->fsm = init_fsm("mpcg", mpcg_state_names, mpcg_event_names,
+			MPCG_NR_STATES, MPCG_NR_EVENTS, mpcg_fsm,
+			mpcg_fsm_len, GFP_KERNEL);
+	if (grp->fsm == NULL) {
+		kfree(grp);
+		return NULL;
+	}
+
+	fsm_newstate(grp->fsm, MPCG_STATE_RESET);
+	fsm_settimer(grp->fsm, &grp->timer);
+
+	grp->xid_skb =
+		 __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA);
+	if (grp->xid_skb == NULL) {
+		kfree_fsm(grp->fsm);
+		kfree(grp);
+		return NULL;
+	}
+	/*  base xid for all channels in group  */
+	grp->xid_skb_data = grp->xid_skb->data;
+	grp->xid_th = (struct th_header *)grp->xid_skb->data;
+	skb_put_data(grp->xid_skb, &thnorm, TH_HEADER_LENGTH);
+
+	grp->xid = (struct xid2 *)skb_tail_pointer(grp->xid_skb);
+	skb_put_data(grp->xid_skb, &init_xid, XID2_LENGTH);
+	grp->xid->xid2_adj_id = jiffies | 0xfff00000;
+	grp->xid->xid2_sender_id = jiffies;
+
+	grp->xid_id = skb_tail_pointer(grp->xid_skb);
+	skb_put_data(grp->xid_skb, "VTAM", 4);
+
+	grp->rcvd_xid_skb =
+		__dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
+	if (grp->rcvd_xid_skb == NULL) {
+		kfree_fsm(grp->fsm);
+		dev_kfree_skb(grp->xid_skb);
+		kfree(grp);
+		return NULL;
+	}
+	grp->rcvd_xid_data = grp->rcvd_xid_skb->data;
+	grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
+	skb_put_data(grp->rcvd_xid_skb, &thnorm, TH_HEADER_LENGTH);
+	grp->saved_xid2 = NULL;
+	priv->xid = grp->xid;
+	priv->mpcg = grp;
+	return grp;
+}
+
+/*
+ * The MPC Group Station FSM
+ */
+
+/*
+ * MPC Group Station FSM actions
+ * CTCM_PROTO_MPC only
+ */
+
+/**
+ * NOP action for statemachines
+ */
+static void mpc_action_nop(fsm_instance *fi, int event, void *arg)
+{
+}
+
+/*
+ * invoked when the device transitions to dev_stopped
+ * MPC will stop each individual channel if a single XID failure
+ * occurs, or will intitiate all channels be stopped if a GROUP
+ * level failure occurs.
+ */
+static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
+{
+	struct net_device  *dev = arg;
+	struct ctcm_priv    *priv;
+	struct mpc_group *grp;
+	struct channel *wch;
+
+	CTCM_PR_DEBUG("Enter %s: %s\n",	__func__, dev->name);
+
+	priv  = dev->ml_priv;
+	grp =  priv->mpcg;
+	grp->flow_off_called = 0;
+	fsm_deltimer(&grp->timer);
+	if (grp->channels_terminating)
+			return;
+
+	grp->channels_terminating = 1;
+	grp->saved_state = fsm_getstate(grp->fsm);
+	fsm_newstate(grp->fsm, MPCG_STATE_INOP);
+	if (grp->saved_state > MPCG_STATE_XID7INITF)
+		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
+			"%s(%s): MPC GROUP INOPERATIVE",
+				CTCM_FUNTAIL, dev->name);
+	if ((grp->saved_state != MPCG_STATE_RESET) ||
+		/* dealloc_channel has been called */
+		(grp->port_persist == 0))
+		fsm_deltimer(&priv->restart_timer);
+
+	wch = priv->channel[CTCM_WRITE];
+
+	switch (grp->saved_state) {
+	case MPCG_STATE_RESET:
+	case MPCG_STATE_INOP:
+	case MPCG_STATE_XID2INITW:
+	case MPCG_STATE_XID0IOWAIT:
+	case MPCG_STATE_XID2INITX:
+	case MPCG_STATE_XID7INITW:
+	case MPCG_STATE_XID7INITX:
+	case MPCG_STATE_XID0IOWAIX:
+	case MPCG_STATE_XID7INITI:
+	case MPCG_STATE_XID7INITZ:
+	case MPCG_STATE_XID7INITF:
+		break;
+	case MPCG_STATE_FLOWC:
+	case MPCG_STATE_READY:
+	default:
+		tasklet_hi_schedule(&wch->ch_disc_tasklet);
+	}
+
+	grp->xid2_tgnum = 0;
+	grp->group_max_buflen = 0;  /*min of all received */
+	grp->outstanding_xid2 = 0;
+	grp->outstanding_xid7 = 0;
+	grp->outstanding_xid7_p2 = 0;
+	grp->saved_xid2 = NULL;
+	grp->xidnogood = 0;
+	grp->changed_side = 0;
+
+	grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
+	skb_reset_tail_pointer(grp->rcvd_xid_skb);
+	grp->rcvd_xid_skb->len = 0;
+	grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
+	skb_put_data(grp->rcvd_xid_skb, &thnorm, TH_HEADER_LENGTH);
+
+	if (grp->send_qllc_disc == 1) {
+		grp->send_qllc_disc = 0;
+		mpc_send_qllc_discontact(dev);
+	}
+
+	/* DO NOT issue DEV_EVENT_STOP directly out of this code */
+	/* This can result in INOP of VTAM PU due to halting of  */
+	/* outstanding IO which causes a sense to be returned	 */
+	/* Only about 3 senses are allowed and then IOS/VTAM will*/
+	/* become unreachable without manual intervention	 */
+	if ((grp->port_persist == 1) || (grp->alloc_called)) {
+		grp->alloc_called = 0;
+		fsm_deltimer(&priv->restart_timer);
+		fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_RESTART, dev);
+		fsm_newstate(grp->fsm, MPCG_STATE_RESET);
+		if (grp->saved_state > MPCG_STATE_XID7INITF)
+			CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS,
+				"%s(%s): MPC GROUP RECOVERY SCHEDULED",
+					CTCM_FUNTAIL, dev->name);
+	} else {
+		fsm_deltimer(&priv->restart_timer);
+		fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_STOP, dev);
+		fsm_newstate(grp->fsm, MPCG_STATE_RESET);
+		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS,
+			"%s(%s): NO MPC GROUP RECOVERY ATTEMPTED",
+						CTCM_FUNTAIL, dev->name);
+	}
+}
+
+/**
+ * Handle mpc group  action timeout.
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ *
+ * fi		An instance of an mpc_group fsm.
+ * event	The event, just happened.
+ * arg		Generic pointer, casted from net_device * upon call.
+ */
+static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
+{
+	struct net_device *dev = arg;
+	struct ctcm_priv *priv;
+	struct mpc_group *grp;
+	struct channel *wch;
+	struct channel *rch;
+
+	priv = dev->ml_priv;
+	grp = priv->mpcg;
+	wch = priv->channel[CTCM_WRITE];
+	rch = priv->channel[CTCM_READ];
+
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_XID2INITW:
+		/* Unless there is outstanding IO on the  */
+		/* channel just return and wait for ATTN  */
+		/* interrupt to begin XID negotiations	  */
+		if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) &&
+		   (fsm_getstate(wch->fsm) == CH_XID0_PENDING))
+			break;
+	default:
+		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+	}
+
+	CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
+			"%s: dev=%s exit",
+			CTCM_FUNTAIL, dev->name);
+	return;
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+void mpc_action_discontact(fsm_instance *fi, int event, void *arg)
+{
+	struct mpcg_info   *mpcginfo   = arg;
+	struct channel	   *ch	       = mpcginfo->ch;
+	struct net_device  *dev;
+	struct ctcm_priv   *priv;
+	struct mpc_group   *grp;
+
+	if (ch) {
+		dev = ch->netdev;
+		if (dev) {
+			priv = dev->ml_priv;
+			if (priv) {
+				CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
+					"%s: %s: %s\n",
+					CTCM_FUNTAIL, dev->name, ch->id);
+				grp = priv->mpcg;
+				grp->send_qllc_disc = 1;
+				fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+			}
+		}
+	}
+
+	return;
+}
+
+/*
+ * MPC Group Station - not part of FSM
+ * CTCM_PROTO_MPC only
+ * called from add_channel in ctcm_main.c
+ */
+void mpc_action_send_discontact(unsigned long thischan)
+{
+	int rc;
+	struct channel	*ch = (struct channel *)thischan;
+	unsigned long	saveflags = 0;
+
+	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+	rc = ccw_device_start(ch->cdev, &ch->ccw[15],
+					(unsigned long)ch, 0xff, 0);
+	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+
+	if (rc != 0) {
+		ctcm_ccw_check_rc(ch, rc, (char *)__func__);
+	}
+
+	return;
+}
+
+
+/*
+ * helper function of mpc FSM
+ * CTCM_PROTO_MPC only
+ * mpc_action_rcvd_xid7
+*/
+static int mpc_validate_xid(struct mpcg_info *mpcginfo)
+{
+	struct channel	   *ch	 = mpcginfo->ch;
+	struct net_device  *dev  = ch->netdev;
+	struct ctcm_priv   *priv = dev->ml_priv;
+	struct mpc_group   *grp  = priv->mpcg;
+	struct xid2	   *xid  = mpcginfo->xid;
+	int	rc	 = 0;
+	__u64	our_id   = 0;
+	__u64   their_id = 0;
+	int	len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
+
+	CTCM_PR_DEBUG("Enter %s: xid=%p\n", __func__, xid);
+
+	if (xid == NULL) {
+		rc = 1;
+		/* XID REJECTED: xid == NULL */
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): xid = NULL",
+				CTCM_FUNTAIL, ch->id);
+			goto done;
+	}
+
+	CTCM_D3_DUMP((char *)xid, XID2_LENGTH);
+
+	/*the received direction should be the opposite of ours  */
+	if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE :
+				XID2_READ_SIDE) != xid->xid2_dlc_type) {
+		rc = 2;
+		/* XID REJECTED: r/w channel pairing mismatch */
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): r/w channel pairing mismatch",
+				CTCM_FUNTAIL, ch->id);
+			goto done;
+	}
+
+	if (xid->xid2_dlc_type == XID2_READ_SIDE) {
+		CTCM_PR_DEBUG("%s: grpmaxbuf:%d xid2buflen:%d\n", __func__,
+				grp->group_max_buflen, xid->xid2_buf_len);
+
+		if (grp->group_max_buflen == 0 || grp->group_max_buflen >
+						xid->xid2_buf_len - len)
+			grp->group_max_buflen = xid->xid2_buf_len - len;
+	}
+
+	if (grp->saved_xid2 == NULL) {
+		grp->saved_xid2 =
+			(struct xid2 *)skb_tail_pointer(grp->rcvd_xid_skb);
+
+		skb_put_data(grp->rcvd_xid_skb, xid, XID2_LENGTH);
+		grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
+
+		skb_reset_tail_pointer(grp->rcvd_xid_skb);
+		grp->rcvd_xid_skb->len = 0;
+
+		/* convert two 32 bit numbers into 1 64 bit for id compare */
+		our_id = (__u64)priv->xid->xid2_adj_id;
+		our_id = our_id << 32;
+		our_id = our_id + priv->xid->xid2_sender_id;
+		their_id = (__u64)xid->xid2_adj_id;
+		their_id = their_id << 32;
+		their_id = their_id + xid->xid2_sender_id;
+		/* lower id assume the xside role */
+		if (our_id < their_id) {
+			grp->roll = XSIDE;
+			CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
+				"%s(%s): WE HAVE LOW ID - TAKE XSIDE",
+					CTCM_FUNTAIL, ch->id);
+		} else {
+			grp->roll = YSIDE;
+			CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
+				"%s(%s): WE HAVE HIGH ID - TAKE YSIDE",
+					CTCM_FUNTAIL, ch->id);
+		}
+
+	} else {
+		if (xid->xid2_flag4 != grp->saved_xid2->xid2_flag4) {
+			rc = 3;
+			/* XID REJECTED: xid flag byte4 mismatch */
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): xid flag byte4 mismatch",
+					CTCM_FUNTAIL, ch->id);
+		}
+		if (xid->xid2_flag2 == 0x40) {
+			rc = 4;
+			/* XID REJECTED - xid NOGOOD */
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): xid NOGOOD",
+					CTCM_FUNTAIL, ch->id);
+		}
+		if (xid->xid2_adj_id != grp->saved_xid2->xid2_adj_id) {
+			rc = 5;
+			/* XID REJECTED - Adjacent Station ID Mismatch */
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): Adjacent Station ID Mismatch",
+					CTCM_FUNTAIL, ch->id);
+		}
+		if (xid->xid2_sender_id != grp->saved_xid2->xid2_sender_id) {
+			rc = 6;
+			/* XID REJECTED - Sender Address Mismatch */
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): Sender Address Mismatch",
+					CTCM_FUNTAIL, ch->id);
+		}
+	}
+done:
+	if (rc) {
+		dev_warn(&dev->dev,
+			"The XID used in the MPC protocol is not valid, "
+			"rc = %d\n", rc);
+		priv->xid->xid2_flag2 = 0x40;
+		grp->saved_xid2->xid2_flag2 = 0x40;
+	}
+
+	return rc;
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side)
+{
+	struct channel *ch = arg;
+	int rc = 0;
+	int gotlock = 0;
+	unsigned long saveflags = 0;	/* avoids compiler warning with
+					   spin_unlock_irqrestore */
+
+	CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
+			__func__, smp_processor_id(), ch, ch->id);
+
+	if (ctcm_checkalloc_buffer(ch))
+					goto done;
+
+	/*
+	 * skb data-buffer referencing:
+	 */
+	ch->trans_skb->data = ch->trans_skb_data;
+	skb_reset_tail_pointer(ch->trans_skb);
+	ch->trans_skb->len = 0;
+	/* result of the previous 3 statements is NOT always
+	 * already set after ctcm_checkalloc_buffer
+	 * because of possible reuse of the trans_skb
+	 */
+	memset(ch->trans_skb->data, 0, 16);
+	ch->rcvd_xid_th =  (struct th_header *)ch->trans_skb_data;
+	/* check is main purpose here: */
+	skb_put(ch->trans_skb, TH_HEADER_LENGTH);
+	ch->rcvd_xid = (struct xid2 *)skb_tail_pointer(ch->trans_skb);
+	/* check is main purpose here: */
+	skb_put(ch->trans_skb, XID2_LENGTH);
+	ch->rcvd_xid_id = skb_tail_pointer(ch->trans_skb);
+	/* cleanup back to startpoint */
+	ch->trans_skb->data = ch->trans_skb_data;
+	skb_reset_tail_pointer(ch->trans_skb);
+	ch->trans_skb->len = 0;
+
+	/* non-checking rewrite of above skb data-buffer referencing: */
+	/*
+	memset(ch->trans_skb->data, 0, 16);
+	ch->rcvd_xid_th =  (struct th_header *)ch->trans_skb_data;
+	ch->rcvd_xid = (struct xid2 *)(ch->trans_skb_data + TH_HEADER_LENGTH);
+	ch->rcvd_xid_id = ch->trans_skb_data + TH_HEADER_LENGTH + XID2_LENGTH;
+	 */
+
+	ch->ccw[8].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+	ch->ccw[8].count	= 0;
+	ch->ccw[8].cda		= 0x00;
+
+	if (!(ch->xid_th && ch->xid && ch->xid_id))
+		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
+			"%s(%s): xid_th=%p, xid=%p, xid_id=%p",
+			CTCM_FUNTAIL, ch->id, ch->xid_th, ch->xid, ch->xid_id);
+
+	if (side == XSIDE) {
+		/* mpc_action_xside_xid */
+		if (ch->xid_th == NULL)
+				goto done;
+		ch->ccw[9].cmd_code	= CCW_CMD_WRITE;
+		ch->ccw[9].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[9].count	= TH_HEADER_LENGTH;
+		ch->ccw[9].cda		= virt_to_phys(ch->xid_th);
+
+		if (ch->xid == NULL)
+				goto done;
+		ch->ccw[10].cmd_code	= CCW_CMD_WRITE;
+		ch->ccw[10].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[10].count	= XID2_LENGTH;
+		ch->ccw[10].cda		= virt_to_phys(ch->xid);
+
+		ch->ccw[11].cmd_code	= CCW_CMD_READ;
+		ch->ccw[11].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[11].count	= TH_HEADER_LENGTH;
+		ch->ccw[11].cda		= virt_to_phys(ch->rcvd_xid_th);
+
+		ch->ccw[12].cmd_code	= CCW_CMD_READ;
+		ch->ccw[12].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[12].count	= XID2_LENGTH;
+		ch->ccw[12].cda		= virt_to_phys(ch->rcvd_xid);
+
+		ch->ccw[13].cmd_code	= CCW_CMD_READ;
+		ch->ccw[13].cda		= virt_to_phys(ch->rcvd_xid_id);
+
+	} else { /* side == YSIDE : mpc_action_yside_xid */
+		ch->ccw[9].cmd_code	= CCW_CMD_READ;
+		ch->ccw[9].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[9].count	= TH_HEADER_LENGTH;
+		ch->ccw[9].cda		= virt_to_phys(ch->rcvd_xid_th);
+
+		ch->ccw[10].cmd_code	= CCW_CMD_READ;
+		ch->ccw[10].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[10].count	= XID2_LENGTH;
+		ch->ccw[10].cda		= virt_to_phys(ch->rcvd_xid);
+
+		if (ch->xid_th == NULL)
+				goto done;
+		ch->ccw[11].cmd_code	= CCW_CMD_WRITE;
+		ch->ccw[11].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[11].count	= TH_HEADER_LENGTH;
+		ch->ccw[11].cda		= virt_to_phys(ch->xid_th);
+
+		if (ch->xid == NULL)
+				goto done;
+		ch->ccw[12].cmd_code	= CCW_CMD_WRITE;
+		ch->ccw[12].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+		ch->ccw[12].count	= XID2_LENGTH;
+		ch->ccw[12].cda		= virt_to_phys(ch->xid);
+
+		if (ch->xid_id == NULL)
+				goto done;
+		ch->ccw[13].cmd_code	= CCW_CMD_WRITE;
+		ch->ccw[13].cda		= virt_to_phys(ch->xid_id);
+
+	}
+	ch->ccw[13].flags	= CCW_FLAG_SLI | CCW_FLAG_CC;
+	ch->ccw[13].count	= 4;
+
+	ch->ccw[14].cmd_code	= CCW_CMD_NOOP;
+	ch->ccw[14].flags	= CCW_FLAG_SLI;
+	ch->ccw[14].count	= 0;
+	ch->ccw[14].cda		= 0;
+
+	CTCM_CCW_DUMP((char *)&ch->ccw[8], sizeof(struct ccw1) * 7);
+	CTCM_D3_DUMP((char *)ch->xid_th, TH_HEADER_LENGTH);
+	CTCM_D3_DUMP((char *)ch->xid, XID2_LENGTH);
+	CTCM_D3_DUMP((char *)ch->xid_id, 4);
+
+	if (!in_irq()) {
+			 /* Such conditional locking is a known problem for
+			  * sparse because its static undeterministic.
+			  * Warnings should be ignored here. */
+		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+		gotlock = 1;
+	}
+
+	fsm_addtimer(&ch->timer, 5000 , CTC_EVENT_TIMER, ch);
+	rc = ccw_device_start(ch->cdev, &ch->ccw[8],
+				(unsigned long)ch, 0xff, 0);
+
+	if (gotlock)	/* see remark above about conditional locking */
+		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+
+	if (rc != 0) {
+		ctcm_ccw_check_rc(ch, rc,
+				(side == XSIDE) ? "x-side XID" : "y-side XID");
+	}
+
+done:
+	CTCM_PR_DEBUG("Exit %s: ch=0x%p id=%s\n",
+				__func__, ch, ch->id);
+	return;
+
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg)
+{
+	mpc_action_side_xid(fsm, arg, XSIDE);
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg)
+{
+	mpc_action_side_xid(fsm, arg, YSIDE);
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg)
+{
+	struct channel	   *ch   = arg;
+	struct net_device  *dev  = ch->netdev;
+	struct ctcm_priv   *priv = dev->ml_priv;
+	struct mpc_group   *grp  = priv->mpcg;
+
+	CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
+			__func__, smp_processor_id(), ch, ch->id);
+
+	if (ch->xid == NULL) {
+		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+			"%s(%s): ch->xid == NULL",
+				CTCM_FUNTAIL, dev->name);
+		return;
+	}
+
+	fsm_newstate(ch->fsm, CH_XID0_INPROGRESS);
+
+	ch->xid->xid2_option =	XID2_0;
+
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_XID2INITW:
+	case MPCG_STATE_XID2INITX:
+		ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
+		break;
+	case MPCG_STATE_XID0IOWAIT:
+	case MPCG_STATE_XID0IOWAIX:
+		ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
+		break;
+	}
+
+	fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
+
+	return;
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+*/
+static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
+{
+	struct net_device *dev = arg;
+	struct ctcm_priv  *priv = dev->ml_priv;
+	struct mpc_group  *grp  = NULL;
+	int direction;
+	int send = 0;
+
+	if (priv)
+		grp = priv->mpcg;
+	if (grp == NULL)
+		return;
+
+	for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
+		struct channel *ch = priv->channel[direction];
+		struct xid2 *thisxid = ch->xid;
+		ch->xid_skb->data = ch->xid_skb_data;
+		skb_reset_tail_pointer(ch->xid_skb);
+		ch->xid_skb->len = 0;
+		thisxid->xid2_option = XID2_7;
+		send = 0;
+
+		/* xid7 phase 1 */
+		if (grp->outstanding_xid7_p2 > 0) {
+			if (grp->roll == YSIDE) {
+				if (fsm_getstate(ch->fsm) == CH_XID7_PENDING1) {
+					fsm_newstate(ch->fsm, CH_XID7_PENDING2);
+					ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
+					skb_put_data(ch->xid_skb, &thdummy,
+						     TH_HEADER_LENGTH);
+					send = 1;
+				}
+			} else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING2) {
+					fsm_newstate(ch->fsm, CH_XID7_PENDING2);
+					ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
+					skb_put_data(ch->xid_skb, &thnorm,
+						     TH_HEADER_LENGTH);
+					send = 1;
+			}
+		} else {
+			/* xid7 phase 2 */
+			if (grp->roll == YSIDE) {
+				if (fsm_getstate(ch->fsm) < CH_XID7_PENDING4) {
+					fsm_newstate(ch->fsm, CH_XID7_PENDING4);
+					skb_put_data(ch->xid_skb, &thnorm,
+						     TH_HEADER_LENGTH);
+					ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
+					send = 1;
+				}
+			} else if (fsm_getstate(ch->fsm) == CH_XID7_PENDING3) {
+				fsm_newstate(ch->fsm, CH_XID7_PENDING4);
+				ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
+				skb_put_data(ch->xid_skb, &thdummy,
+					     TH_HEADER_LENGTH);
+				send = 1;
+			}
+		}
+
+		if (send)
+			fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
+	}
+
+	return;
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg)
+{
+
+	struct mpcg_info   *mpcginfo  = arg;
+	struct channel	   *ch   = mpcginfo->ch;
+	struct net_device  *dev  = ch->netdev;
+	struct ctcm_priv   *priv = dev->ml_priv;
+	struct mpc_group   *grp  = priv->mpcg;
+
+	CTCM_PR_DEBUG("%s: ch-id:%s xid2:%i xid7:%i xidt_p2:%i \n",
+			__func__, ch->id, grp->outstanding_xid2,
+			grp->outstanding_xid7, grp->outstanding_xid7_p2);
+
+	if (fsm_getstate(ch->fsm) < CH_XID7_PENDING)
+		fsm_newstate(ch->fsm, CH_XID7_PENDING);
+
+	grp->outstanding_xid2--;
+	grp->outstanding_xid7++;
+	grp->outstanding_xid7_p2++;
+
+	/* must change state before validating xid to */
+	/* properly handle interim interrupts received*/
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_XID2INITW:
+		fsm_newstate(grp->fsm, MPCG_STATE_XID2INITX);
+		mpc_validate_xid(mpcginfo);
+		break;
+	case MPCG_STATE_XID0IOWAIT:
+		fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIX);
+		mpc_validate_xid(mpcginfo);
+		break;
+	case MPCG_STATE_XID2INITX:
+		if (grp->outstanding_xid2 == 0) {
+			fsm_newstate(grp->fsm, MPCG_STATE_XID7INITW);
+			mpc_validate_xid(mpcginfo);
+			fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev);
+		}
+		break;
+	case MPCG_STATE_XID0IOWAIX:
+		if (grp->outstanding_xid2 == 0) {
+			fsm_newstate(grp->fsm, MPCG_STATE_XID7INITI);
+			mpc_validate_xid(mpcginfo);
+			fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev);
+		}
+		break;
+	}
+	kfree(mpcginfo);
+
+	CTCM_PR_DEBUG("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n",
+		__func__, ch->id, grp->outstanding_xid2,
+		grp->outstanding_xid7, grp->outstanding_xid7_p2);
+	CTCM_PR_DEBUG("ctcmpc:%s() %s grpstate: %s chanstate: %s \n",
+		__func__, ch->id,
+		fsm_getstate_str(grp->fsm), fsm_getstate_str(ch->fsm));
+	return;
+
+}
+
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg)
+{
+	struct mpcg_info   *mpcginfo   = arg;
+	struct channel	   *ch	       = mpcginfo->ch;
+	struct net_device  *dev        = ch->netdev;
+	struct ctcm_priv   *priv    = dev->ml_priv;
+	struct mpc_group   *grp     = priv->mpcg;
+
+	CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
+		__func__, smp_processor_id(), ch, ch->id);
+	CTCM_PR_DEBUG("%s: outstanding_xid7: %i, outstanding_xid7_p2: %i\n",
+		__func__, grp->outstanding_xid7, grp->outstanding_xid7_p2);
+
+	grp->outstanding_xid7--;
+	ch->xid_skb->data = ch->xid_skb_data;
+	skb_reset_tail_pointer(ch->xid_skb);
+	ch->xid_skb->len = 0;
+
+	switch (fsm_getstate(grp->fsm)) {
+	case MPCG_STATE_XID7INITI:
+		fsm_newstate(grp->fsm, MPCG_STATE_XID7INITZ);
+		mpc_validate_xid(mpcginfo);
+		break;
+	case MPCG_STATE_XID7INITW:
+		fsm_newstate(grp->fsm, MPCG_STATE_XID7INITX);
+		mpc_validate_xid(mpcginfo);
+		break;
+	case MPCG_STATE_XID7INITZ:
+	case MPCG_STATE_XID7INITX:
+		if (grp->outstanding_xid7 == 0) {
+			if (grp->outstanding_xid7_p2 > 0) {
+				grp->outstanding_xid7 =
+					grp->outstanding_xid7_p2;
+				grp->outstanding_xid7_p2 = 0;
+			} else
+				fsm_newstate(grp->fsm, MPCG_STATE_XID7INITF);
+
+			mpc_validate_xid(mpcginfo);
+			fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
+			break;
+		}
+		mpc_validate_xid(mpcginfo);
+		break;
+	}
+	kfree(mpcginfo);
+	return;
+}
+
+/*
+ * mpc_action helper of an MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static int mpc_send_qllc_discontact(struct net_device *dev)
+{
+	__u32	new_len	= 0;
+	struct sk_buff   *skb;
+	struct qllc      *qllcptr;
+	struct ctcm_priv *priv = dev->ml_priv;
+	struct mpc_group *grp = priv->mpcg;
+
+	CTCM_PR_DEBUG("%s: GROUP STATE: %s\n",
+		__func__, mpcg_state_names[grp->saved_state]);
+
+	switch (grp->saved_state) {
+	/*
+	 * establish conn callback function is
+	 * preferred method to report failure
+	 */
+	case MPCG_STATE_XID0IOWAIT:
+	case MPCG_STATE_XID0IOWAIX:
+	case MPCG_STATE_XID7INITI:
+	case MPCG_STATE_XID7INITZ:
+	case MPCG_STATE_XID2INITW:
+	case MPCG_STATE_XID2INITX:
+	case MPCG_STATE_XID7INITW:
+	case MPCG_STATE_XID7INITX:
+		if (grp->estconnfunc) {
+			grp->estconnfunc(grp->port_num, -1, 0);
+			grp->estconnfunc = NULL;
+			break;
+		}
+	case MPCG_STATE_FLOWC:
+	case MPCG_STATE_READY:
+		grp->send_qllc_disc = 2;
+		new_len = sizeof(struct qllc);
+		qllcptr = kzalloc(new_len, gfp_type() | GFP_DMA);
+		if (qllcptr == NULL) {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): qllcptr allocation error",
+						CTCM_FUNTAIL, dev->name);
+			return -ENOMEM;
+		}
+
+		qllcptr->qllc_address = 0xcc;
+		qllcptr->qllc_commands = 0x03;
+
+		skb = __dev_alloc_skb(new_len, GFP_ATOMIC);
+
+		if (skb == NULL) {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): skb allocation error",
+						CTCM_FUNTAIL, dev->name);
+			priv->stats.rx_dropped++;
+			kfree(qllcptr);
+			return -ENOMEM;
+		}
+
+		skb_put_data(skb, qllcptr, new_len);
+		kfree(qllcptr);
+
+		if (skb_headroom(skb) < 4) {
+			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+				"%s(%s): skb_headroom error",
+						CTCM_FUNTAIL, dev->name);
+			dev_kfree_skb_any(skb);
+			return -ENOMEM;
+		}
+
+		*((__u32 *)skb_push(skb, 4)) =
+			priv->channel[CTCM_READ]->pdu_seq;
+		priv->channel[CTCM_READ]->pdu_seq++;
+		CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n",
+				__func__, priv->channel[CTCM_READ]->pdu_seq);
+
+		/* receipt of CC03 resets anticipated sequence number on
+		      receiving side */
+		priv->channel[CTCM_READ]->pdu_seq = 0x00;
+		skb_reset_mac_header(skb);
+		skb->dev = dev;
+		skb->protocol = htons(ETH_P_SNAP);
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+		CTCM_D3_DUMP(skb->data, (sizeof(struct qllc) + 4));
+
+		netif_rx(skb);
+		break;
+	default:
+		break;
+
+	}
+
+	return 0;
+}
+/* --- This is the END my friend --- */
+
diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h
new file mode 100644
index 0000000..441d7b2
--- /dev/null
+++ b/drivers/s390/net/ctcm_mpc.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2007
+ * Authors:	Peter Tiedemann (ptiedem@de.ibm.com)
+ *
+ * 	MPC additions:
+ *		Belinda Thompson (belindat@us.ibm.com)
+ *		Andy Richter (richtera@us.ibm.com)
+ */
+
+#ifndef _CTC_MPC_H_
+#define _CTC_MPC_H_
+
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include "fsm.h"
+
+/*
+ * MPC external interface
+ * Note that ctc_mpc_xyz are called with a lock on ................
+ */
+
+/*  port_number is the mpc device 0, 1, 2 etc mpc2 is port_number 2 */
+
+/*  passive open  Just wait for XID2 exchange */
+extern int ctc_mpc_alloc_channel(int port,
+		void (*callback)(int port_num, int max_write_size));
+/* active open  Alloc then send XID2 */
+extern void ctc_mpc_establish_connectivity(int port,
+		void (*callback)(int port_num, int rc, int max_write_size));
+
+extern void ctc_mpc_dealloc_ch(int port);
+extern void ctc_mpc_flow_control(int port, int flowc);
+
+/*
+ * other MPC Group prototypes and structures
+ */
+
+#define ETH_P_SNA_DIX	0x80D5
+
+/*
+ * Declaration of an XID2
+ *
+ */
+#define ALLZEROS 0x0000000000000000
+
+#define XID_FM2		0x20
+#define XID2_0		0x00
+#define XID2_7		0x07
+#define XID2_WRITE_SIDE 0x04
+#define XID2_READ_SIDE	0x05
+
+struct xid2 {
+	__u8	xid2_type_id;
+	__u8	xid2_len;
+	__u32	xid2_adj_id;
+	__u8	xid2_rlen;
+	__u8	xid2_resv1;
+	__u8	xid2_flag1;
+	__u8	xid2_fmtt;
+	__u8	xid2_flag4;
+	__u16	xid2_resv2;
+	__u8	xid2_tgnum;
+	__u32	xid2_sender_id;
+	__u8	xid2_flag2;
+	__u8	xid2_option;
+	char  xid2_resv3[8];
+	__u16	xid2_resv4;
+	__u8	xid2_dlc_type;
+	__u16	xid2_resv5;
+	__u8	xid2_mpc_flag;
+	__u8	xid2_resv6;
+	__u16	xid2_buf_len;
+	char xid2_buffer[255 - (13 * sizeof(__u8) +
+				2 * sizeof(__u32) +
+				4 * sizeof(__u16) +
+				8 * sizeof(char))];
+} __attribute__ ((packed));
+
+#define XID2_LENGTH  (sizeof(struct xid2))
+
+struct th_header {
+	__u8	th_seg;
+	__u8	th_ch_flag;
+#define TH_HAS_PDU	0xf0
+#define TH_IS_XID	0x01
+#define TH_SWEEP_REQ	0xfe
+#define TH_SWEEP_RESP	0xff
+	__u8	th_blk_flag;
+#define TH_DATA_IS_XID	0x80
+#define TH_RETRY	0x40
+#define TH_DISCONTACT	0xc0
+#define TH_SEG_BLK	0x20
+#define TH_LAST_SEG	0x10
+#define TH_PDU_PART	0x08
+	__u8	th_is_xid;	/* is 0x01 if this is XID  */
+	__u32	th_seq_num;
+} __attribute__ ((packed));
+
+struct th_addon {
+	__u32	th_last_seq;
+	__u32	th_resvd;
+} __attribute__ ((packed));
+
+struct th_sweep {
+	struct th_header th;
+	struct th_addon sw;
+} __attribute__ ((packed));
+
+#define TH_HEADER_LENGTH (sizeof(struct th_header))
+#define TH_SWEEP_LENGTH (sizeof(struct th_sweep))
+
+#define PDU_LAST	0x80
+#define PDU_CNTL	0x40
+#define PDU_FIRST	0x20
+
+struct pdu {
+	__u32	pdu_offset;
+	__u8	pdu_flag;
+	__u8	pdu_proto;   /*  0x01 is APPN SNA  */
+	__u16	pdu_seq;
+} __attribute__ ((packed));
+
+#define PDU_HEADER_LENGTH  (sizeof(struct pdu))
+
+struct qllc {
+	__u8	qllc_address;
+#define QLLC_REQ	0xFF
+#define QLLC_RESP	0x00
+	__u8	qllc_commands;
+#define QLLC_DISCONNECT 0x53
+#define QLLC_UNSEQACK	0x73
+#define QLLC_SETMODE	0x93
+#define QLLC_EXCHID	0xBF
+} __attribute__ ((packed));
+
+
+/*
+ * Definition of one MPC group
+ */
+
+#define MAX_MPCGCHAN		10
+#define MPC_XID_TIMEOUT_VALUE	10000
+#define MPC_CHANNEL_ADD		0
+#define MPC_CHANNEL_REMOVE	1
+#define MPC_CHANNEL_ATTN	2
+#define XSIDE	1
+#define YSIDE	0
+
+struct mpcg_info {
+	struct sk_buff	*skb;
+	struct channel	*ch;
+	struct xid2	*xid;
+	struct th_sweep	*sweep;
+	struct th_header *th;
+};
+
+struct mpc_group {
+	struct tasklet_struct mpc_tasklet;
+	struct tasklet_struct mpc_tasklet2;
+	int	changed_side;
+	int	saved_state;
+	int	channels_terminating;
+	int	out_of_sequence;
+	int	flow_off_called;
+	int	port_num;
+	int	port_persist;
+	int	alloc_called;
+	__u32	xid2_adj_id;
+	__u8	xid2_tgnum;
+	__u32	xid2_sender_id;
+	int	num_channel_paths;
+	int	active_channels[2];
+	__u16	group_max_buflen;
+	int	outstanding_xid2;
+	int	outstanding_xid7;
+	int	outstanding_xid7_p2;
+	int	sweep_req_pend_num;
+	int	sweep_rsp_pend_num;
+	struct sk_buff	*xid_skb;
+	char		*xid_skb_data;
+	struct th_header *xid_th;
+	struct xid2	*xid;
+	char		*xid_id;
+	struct th_header *rcvd_xid_th;
+	struct sk_buff	*rcvd_xid_skb;
+	char		*rcvd_xid_data;
+	__u8		in_sweep;
+	__u8		roll;
+	struct xid2	*saved_xid2;
+	void 		(*allochanfunc)(int, int);
+	int		allocchan_callback_retries;
+	void 		(*estconnfunc)(int, int, int);
+	int		estconn_callback_retries;
+	int		estconn_called;
+	int		xidnogood;
+	int		send_qllc_disc;
+	fsm_timer	timer;
+	fsm_instance	*fsm; /* group xid fsm */
+};
+
+#ifdef DEBUGDATA
+void ctcmpc_dumpit(char *buf, int len);
+#else
+static inline void ctcmpc_dumpit(char *buf, int len)
+{
+}
+#endif
+
+#ifdef DEBUGDATA
+/*
+ * Dump header and first 16 bytes of an sk_buff for debugging purposes.
+ *
+ * skb	 The struct sk_buff to dump.
+ * offset Offset relative to skb-data, where to start the dump.
+ */
+void ctcmpc_dump_skb(struct sk_buff *skb, int offset);
+#else
+static inline void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
+{}
+#endif
+
+static inline void ctcmpc_dump32(char *buf, int len)
+{
+	if (len < 32)
+		ctcmpc_dumpit(buf, len);
+	else
+		ctcmpc_dumpit(buf, 32);
+}
+
+int ctcmpc_open(struct net_device *);
+void ctcm_ccw_check_rc(struct channel *, int, char *);
+void mpc_group_ready(unsigned long adev);
+void mpc_channel_action(struct channel *ch, int direction, int action);
+void mpc_action_send_discontact(unsigned long thischan);
+void mpc_action_discontact(fsm_instance *fi, int event, void *arg);
+void ctcmpc_bh(unsigned long thischan);
+#endif
+/* --- This is the END my friend --- */
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
new file mode 100644
index 0000000..ded1930
--- /dev/null
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2007, 2007
+ * Authors:	Peter Tiedemann (ptiedem@de.ibm.com)
+ *
+ */
+
+#undef DEBUG
+#undef DEBUGDATA
+#undef DEBUGCCW
+
+#define KMSG_COMPONENT "ctcm"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+#include "ctcm_main.h"
+
+/*
+ * sysfs attributes
+ */
+
+static ssize_t ctcm_buffer_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct ctcm_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv)
+		return -ENODEV;
+	return sprintf(buf, "%d\n", priv->buffer_size);
+}
+
+static ssize_t ctcm_buffer_write(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct net_device *ndev;
+	unsigned int bs1;
+	struct ctcm_priv *priv = dev_get_drvdata(dev);
+	int rc;
+
+	ndev = priv->channel[CTCM_READ]->netdev;
+	if (!(priv && priv->channel[CTCM_READ] && ndev)) {
+		CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
+		return -ENODEV;
+	}
+
+	rc = kstrtouint(buf, 0, &bs1);
+	if (rc)
+		goto einval;
+	if (bs1 > CTCM_BUFSIZE_LIMIT)
+					goto einval;
+	if (bs1 < (576 + LL_HEADER_LENGTH + 2))
+					goto einval;
+	priv->buffer_size = bs1;	/* just to overwrite the default */
+
+	if ((ndev->flags & IFF_RUNNING) &&
+	    (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
+					goto einval;
+
+	priv->channel[CTCM_READ]->max_bufsize = bs1;
+	priv->channel[CTCM_WRITE]->max_bufsize = bs1;
+	if (!(ndev->flags & IFF_RUNNING))
+		ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
+	priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+	priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+
+	CTCM_DBF_DEV(SETUP, ndev, buf);
+	return count;
+
+einval:
+	CTCM_DBF_DEV(SETUP, ndev, "buff_err");
+	return -EINVAL;
+}
+
+static void ctcm_print_statistics(struct ctcm_priv *priv)
+{
+	char *sbuf;
+	char *p;
+
+	if (!priv)
+		return;
+	sbuf = kmalloc(2048, GFP_KERNEL);
+	if (sbuf == NULL)
+		return;
+	p = sbuf;
+
+	p += sprintf(p, "  Device FSM state: %s\n",
+		     fsm_getstate_str(priv->fsm));
+	p += sprintf(p, "  RX channel FSM state: %s\n",
+		     fsm_getstate_str(priv->channel[CTCM_READ]->fsm));
+	p += sprintf(p, "  TX channel FSM state: %s\n",
+		     fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm));
+	p += sprintf(p, "  Max. TX buffer used: %ld\n",
+		     priv->channel[WRITE]->prof.maxmulti);
+	p += sprintf(p, "  Max. chained SKBs: %ld\n",
+		     priv->channel[WRITE]->prof.maxcqueue);
+	p += sprintf(p, "  TX single write ops: %ld\n",
+		     priv->channel[WRITE]->prof.doios_single);
+	p += sprintf(p, "  TX multi write ops: %ld\n",
+		     priv->channel[WRITE]->prof.doios_multi);
+	p += sprintf(p, "  Netto bytes written: %ld\n",
+		     priv->channel[WRITE]->prof.txlen);
+	p += sprintf(p, "  Max. TX IO-time: %u\n",
+		     jiffies_to_usecs(priv->channel[WRITE]->prof.tx_time));
+
+	printk(KERN_INFO "Statistics for %s:\n%s",
+				priv->channel[CTCM_WRITE]->netdev->name, sbuf);
+	kfree(sbuf);
+	return;
+}
+
+static ssize_t stats_show(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+	struct ctcm_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv || gdev->state != CCWGROUP_ONLINE)
+		return -ENODEV;
+	ctcm_print_statistics(priv);
+	return sprintf(buf, "0\n");
+}
+
+static ssize_t stats_write(struct device *dev, struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct ctcm_priv *priv = dev_get_drvdata(dev);
+	if (!priv)
+		return -ENODEV;
+	/* Reset statistics */
+	memset(&priv->channel[WRITE]->prof, 0,
+				sizeof(priv->channel[CTCM_WRITE]->prof));
+	return count;
+}
+
+static ssize_t ctcm_proto_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct ctcm_priv *priv = dev_get_drvdata(dev);
+	if (!priv)
+		return -ENODEV;
+
+	return sprintf(buf, "%d\n", priv->protocol);
+}
+
+static ssize_t ctcm_proto_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int value, rc;
+	struct ctcm_priv *priv = dev_get_drvdata(dev);
+
+	if (!priv)
+		return -ENODEV;
+	rc = kstrtoint(buf, 0, &value);
+	if (rc ||
+	    !((value == CTCM_PROTO_S390)  ||
+	      (value == CTCM_PROTO_LINUX) ||
+	      (value == CTCM_PROTO_MPC) ||
+	      (value == CTCM_PROTO_OS390)))
+		return -EINVAL;
+	priv->protocol = value;
+	CTCM_DBF_DEV(SETUP, dev, buf);
+
+	return count;
+}
+
+static const char *ctcm_type[] = {
+	"not a channel",
+	"CTC/A",
+	"FICON channel",
+	"ESCON channel",
+	"unknown channel type",
+	"unsupported channel type",
+};
+
+static ssize_t ctcm_type_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct ccwgroup_device *cgdev;
+
+	cgdev = to_ccwgroupdev(dev);
+	if (!cgdev)
+		return -ENODEV;
+
+	return sprintf(buf, "%s\n",
+			ctcm_type[cgdev->cdev[0]->id.driver_info]);
+}
+
+static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write);
+static DEVICE_ATTR(protocol, 0644, ctcm_proto_show, ctcm_proto_store);
+static DEVICE_ATTR(type, 0444, ctcm_type_show, NULL);
+static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
+
+static struct attribute *ctcm_attr[] = {
+	&dev_attr_protocol.attr,
+	&dev_attr_type.attr,
+	&dev_attr_buffer.attr,
+	&dev_attr_stats.attr,
+	NULL,
+};
+
+static struct attribute_group ctcm_attr_group = {
+	.attrs = ctcm_attr,
+};
+const struct attribute_group *ctcm_attr_groups[] = {
+	&ctcm_attr_group,
+	NULL,
+};
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
new file mode 100644
index 0000000..eb07862
--- /dev/null
+++ b/drivers/s390/net/fsm.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * A generic FSM based on fsm used in isdn4linux
+ *
+ */
+
+#include "fsm.h"
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
+MODULE_DESCRIPTION("Finite state machine helper functions");
+MODULE_LICENSE("GPL");
+
+fsm_instance *
+init_fsm(char *name, const char **state_names, const char **event_names, int nr_states,
+		int nr_events, const fsm_node *tmpl, int tmpl_len, gfp_t order)
+{
+	int i;
+	fsm_instance *this;
+	fsm_function_t *m;
+	fsm *f;
+
+	this = kzalloc(sizeof(fsm_instance), order);
+	if (this == NULL) {
+		printk(KERN_WARNING
+			"fsm(%s): init_fsm: Couldn't alloc instance\n", name);
+		return NULL;
+	}
+	strlcpy(this->name, name, sizeof(this->name));
+	init_waitqueue_head(&this->wait_q);
+
+	f = kzalloc(sizeof(fsm), order);
+	if (f == NULL) {
+		printk(KERN_WARNING
+			"fsm(%s): init_fsm: Couldn't alloc fsm\n", name);
+		kfree_fsm(this);
+		return NULL;
+	}
+	f->nr_events = nr_events;
+	f->nr_states = nr_states;
+	f->event_names = event_names;
+	f->state_names = state_names;
+	this->f = f;
+
+	m = kcalloc(nr_states*nr_events, sizeof(fsm_function_t), order);
+	if (m == NULL) {
+		printk(KERN_WARNING
+			"fsm(%s): init_fsm: Couldn't alloc jumptable\n", name);
+		kfree_fsm(this);
+		return NULL;
+	}
+	f->jumpmatrix = m;
+
+	for (i = 0; i < tmpl_len; i++) {
+		if ((tmpl[i].cond_state >= nr_states) ||
+		    (tmpl[i].cond_event >= nr_events)   ) {
+			printk(KERN_ERR
+				"fsm(%s): init_fsm: Bad template l=%d st(%ld/%ld) ev(%ld/%ld)\n",
+				name, i, (long)tmpl[i].cond_state, (long)f->nr_states,
+				(long)tmpl[i].cond_event, (long)f->nr_events);
+			kfree_fsm(this);
+			return NULL;
+		} else
+			m[nr_states * tmpl[i].cond_event + tmpl[i].cond_state] =
+				tmpl[i].function;
+	}
+	return this;
+}
+
+void
+kfree_fsm(fsm_instance *this)
+{
+	if (this) {
+		if (this->f) {
+			kfree(this->f->jumpmatrix);
+			kfree(this->f);
+		}
+		kfree(this);
+	} else
+		printk(KERN_WARNING
+			"fsm: kfree_fsm called with NULL argument\n");
+}
+
+#if FSM_DEBUG_HISTORY
+void
+fsm_print_history(fsm_instance *fi)
+{
+	int idx = 0;
+	int i;
+
+	if (fi->history_size >= FSM_HISTORY_SIZE)
+		idx = fi->history_index;
+
+	printk(KERN_DEBUG "fsm(%s): History:\n", fi->name);
+	for (i = 0; i < fi->history_size; i++) {
+		int e = fi->history[idx].event;
+		int s = fi->history[idx++].state;
+		idx %= FSM_HISTORY_SIZE;
+		if (e == -1)
+			printk(KERN_DEBUG "  S=%s\n",
+			       fi->f->state_names[s]);
+		else
+			printk(KERN_DEBUG "  S=%s E=%s\n",
+			       fi->f->state_names[s],
+			       fi->f->event_names[e]);
+	}
+	fi->history_size = fi->history_index = 0;
+}
+
+void
+fsm_record_history(fsm_instance *fi, int state, int event)
+{
+	fi->history[fi->history_index].state = state;
+	fi->history[fi->history_index++].event = event;
+	fi->history_index %= FSM_HISTORY_SIZE;
+	if (fi->history_size < FSM_HISTORY_SIZE)
+		fi->history_size++;
+}
+#endif
+
+const char *
+fsm_getstate_str(fsm_instance *fi)
+{
+	int st = atomic_read(&fi->state);
+	if (st >= fi->f->nr_states)
+		return "Invalid";
+	return fi->f->state_names[st];
+}
+
+static void
+fsm_expire_timer(struct timer_list *t)
+{
+	fsm_timer *this = from_timer(this, t, tl);
+#if FSM_TIMER_DEBUG
+	printk(KERN_DEBUG "fsm(%s): Timer %p expired\n",
+	       this->fi->name, this);
+#endif
+	fsm_event(this->fi, this->expire_event, this->event_arg);
+}
+
+void
+fsm_settimer(fsm_instance *fi, fsm_timer *this)
+{
+	this->fi = fi;
+#if FSM_TIMER_DEBUG
+	printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name,
+	       this);
+#endif
+	timer_setup(&this->tl, fsm_expire_timer, 0);
+}
+
+void
+fsm_deltimer(fsm_timer *this)
+{
+#if FSM_TIMER_DEBUG
+	printk(KERN_DEBUG "fsm(%s): Delete timer %p\n", this->fi->name,
+		this);
+#endif
+	del_timer(&this->tl);
+}
+
+int
+fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg)
+{
+
+#if FSM_TIMER_DEBUG
+	printk(KERN_DEBUG "fsm(%s): Add timer %p %dms\n",
+	       this->fi->name, this, millisec);
+#endif
+
+	timer_setup(&this->tl, fsm_expire_timer, 0);
+	this->expire_event = event;
+	this->event_arg = arg;
+	this->tl.expires = jiffies + (millisec * HZ) / 1000;
+	add_timer(&this->tl);
+	return 0;
+}
+
+/* FIXME: this function is never used, why */
+void
+fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg)
+{
+
+#if FSM_TIMER_DEBUG
+	printk(KERN_DEBUG "fsm(%s): Restart timer %p %dms\n",
+		this->fi->name, this, millisec);
+#endif
+
+	del_timer(&this->tl);
+	timer_setup(&this->tl, fsm_expire_timer, 0);
+	this->expire_event = event;
+	this->event_arg = arg;
+	this->tl.expires = jiffies + (millisec * HZ) / 1000;
+	add_timer(&this->tl);
+}
+
+EXPORT_SYMBOL(init_fsm);
+EXPORT_SYMBOL(kfree_fsm);
+EXPORT_SYMBOL(fsm_settimer);
+EXPORT_SYMBOL(fsm_deltimer);
+EXPORT_SYMBOL(fsm_addtimer);
+EXPORT_SYMBOL(fsm_modtimer);
+EXPORT_SYMBOL(fsm_getstate_str);
+
+#if FSM_DEBUG_HISTORY
+EXPORT_SYMBOL(fsm_print_history);
+EXPORT_SYMBOL(fsm_record_history);
+#endif
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
new file mode 100644
index 0000000..16dc071
--- /dev/null
+++ b/drivers/s390/net/fsm.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _FSM_H_
+#define _FSM_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/atomic.h>
+
+/**
+ * Define this to get debugging messages.
+ */
+#define FSM_DEBUG         0
+
+/**
+ * Define this to get debugging massages for
+ * timer handling.
+ */
+#define FSM_TIMER_DEBUG   0
+
+/**
+ * Define these to record a history of
+ * Events/Statechanges and print it if a
+ * action_function is not found.
+ */
+#define FSM_DEBUG_HISTORY 0
+#define FSM_HISTORY_SIZE  40
+
+struct fsm_instance_t;
+
+/**
+ * Definition of an action function, called by a FSM
+ */
+typedef void (*fsm_function_t)(struct fsm_instance_t *, int, void *);
+
+/**
+ * Internal jump table for a FSM
+ */
+typedef struct {
+	fsm_function_t *jumpmatrix;
+	int nr_events;
+	int nr_states;
+	const char **event_names;
+	const char **state_names;
+} fsm;
+
+#if FSM_DEBUG_HISTORY
+/**
+ * Element of State/Event history used for debugging.
+ */
+typedef struct {
+	int state;
+	int event;
+} fsm_history;
+#endif
+
+/**
+ * Representation of a FSM
+ */
+typedef struct fsm_instance_t {
+	fsm *f;
+	atomic_t state;
+	char name[16];
+	void *userdata;
+	int userint;
+	wait_queue_head_t wait_q;
+#if FSM_DEBUG_HISTORY
+	int         history_index;
+	int         history_size;
+	fsm_history history[FSM_HISTORY_SIZE];
+#endif
+} fsm_instance;
+
+/**
+ * Description of a state-event combination
+ */
+typedef struct {
+	int cond_state;
+	int cond_event;
+	fsm_function_t function;
+} fsm_node;
+
+/**
+ * Description of a FSM Timer.
+ */
+typedef struct {
+	fsm_instance *fi;
+	struct timer_list tl;
+	int expire_event;
+	void *event_arg;
+} fsm_timer;
+
+/**
+ * Creates an FSM
+ *
+ * @param name        Name of this instance for logging purposes.
+ * @param state_names An array of names for all states for logging purposes.
+ * @param event_names An array of names for all events for logging purposes.
+ * @param nr_states   Number of states for this instance.
+ * @param nr_events   Number of events for this instance.
+ * @param tmpl        An array of fsm_nodes, describing this FSM.
+ * @param tmpl_len    Length of the describing array.
+ * @param order       Parameter for allocation of the FSM data structs.
+ */
+extern fsm_instance *
+init_fsm(char *name, const char **state_names,
+	 const char **event_names,
+	 int nr_states, int nr_events, const fsm_node *tmpl,
+	 int tmpl_len, gfp_t order);
+
+/**
+ * Releases an FSM
+ *
+ * @param fi Pointer to an FSM, previously created with init_fsm.
+ */
+extern void kfree_fsm(fsm_instance *fi);
+
+#if FSM_DEBUG_HISTORY
+extern void
+fsm_print_history(fsm_instance *fi);
+
+extern void
+fsm_record_history(fsm_instance *fi, int state, int event);
+#endif
+
+/**
+ * Emits an event to a FSM.
+ * If an action function is defined for the current state/event combination,
+ * this function is called.
+ *
+ * @param fi    Pointer to FSM which should receive the event.
+ * @param event The event do be delivered.
+ * @param arg   A generic argument, handed to the action function.
+ *
+ * @return      0  on success,
+ *              1  if current state or event is out of range
+ *              !0 if state and event in range, but no action defined.
+ */
+static inline int
+fsm_event(fsm_instance *fi, int event, void *arg)
+{
+	fsm_function_t r;
+	int state = atomic_read(&fi->state);
+
+	if ((state >= fi->f->nr_states) ||
+	    (event >= fi->f->nr_events)       ) {
+		printk(KERN_ERR "fsm(%s): Invalid state st(%ld/%ld) ev(%d/%ld)\n",
+			fi->name, (long)state,(long)fi->f->nr_states, event,
+			(long)fi->f->nr_events);
+#if FSM_DEBUG_HISTORY
+		fsm_print_history(fi);
+#endif
+		return 1;
+	}
+	r = fi->f->jumpmatrix[fi->f->nr_states * event + state];
+	if (r) {
+#if FSM_DEBUG
+		printk(KERN_DEBUG "fsm(%s): state %s event %s\n",
+		       fi->name, fi->f->state_names[state],
+		       fi->f->event_names[event]);
+#endif
+#if FSM_DEBUG_HISTORY
+		fsm_record_history(fi, state, event);
+#endif
+		r(fi, event, arg);
+		return 0;
+	} else {
+#if FSM_DEBUG || FSM_DEBUG_HISTORY
+		printk(KERN_DEBUG "fsm(%s): no function for event %s in state %s\n",
+		       fi->name, fi->f->event_names[event],
+		       fi->f->state_names[state]);
+#endif
+#if FSM_DEBUG_HISTORY
+		fsm_print_history(fi);
+#endif
+		return !0;
+	}
+}
+
+/**
+ * Modifies the state of an FSM.
+ * This does <em>not</em> trigger an event or calls an action function.
+ *
+ * @param fi    Pointer to FSM
+ * @param state The new state for this FSM.
+ */
+static inline void
+fsm_newstate(fsm_instance *fi, int newstate)
+{
+	atomic_set(&fi->state,newstate);
+#if FSM_DEBUG_HISTORY
+	fsm_record_history(fi, newstate, -1);
+#endif
+#if FSM_DEBUG
+	printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name,
+		fi->f->state_names[newstate]);
+#endif
+	wake_up(&fi->wait_q);
+}
+
+/**
+ * Retrieves the state of an FSM
+ *
+ * @param fi Pointer to FSM
+ *
+ * @return The current state of the FSM.
+ */
+static inline int
+fsm_getstate(fsm_instance *fi)
+{
+	return atomic_read(&fi->state);
+}
+
+/**
+ * Retrieves the name of the state of an FSM
+ *
+ * @param fi Pointer to FSM
+ *
+ * @return The current state of the FSM in a human readable form.
+ */
+extern const char *fsm_getstate_str(fsm_instance *fi);
+
+/**
+ * Initializes a timer for an FSM.
+ * This prepares an fsm_timer for usage with fsm_addtimer.
+ *
+ * @param fi    Pointer to FSM
+ * @param timer The timer to be initialized.
+ */
+extern void fsm_settimer(fsm_instance *fi, fsm_timer *);
+
+/**
+ * Clears a pending timer of an FSM instance.
+ *
+ * @param timer The timer to clear.
+ */
+extern void fsm_deltimer(fsm_timer *timer);
+
+/**
+ * Adds and starts a timer to an FSM instance.
+ *
+ * @param timer    The timer to be added. The field fi of that timer
+ *                 must have been set to point to the instance.
+ * @param millisec Duration, after which the timer should expire.
+ * @param event    Event, to trigger if timer expires.
+ * @param arg      Generic argument, provided to expiry function.
+ *
+ * @return         0 on success, -1 if timer is already active.
+ */
+extern int fsm_addtimer(fsm_timer *timer, int millisec, int event, void *arg);
+
+/**
+ * Modifies a timer of an FSM.
+ *
+ * @param timer    The timer to modify.
+ * @param millisec Duration, after which the timer should expire.
+ * @param event    Event, to trigger if timer expires.
+ * @param arg      Generic argument, provided to expiry function.
+ */
+extern void fsm_modtimer(fsm_timer *timer, int millisec, int event, void *arg);
+
+#endif /* _FSM_H_ */
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
new file mode 100644
index 0000000..0aab908
--- /dev/null
+++ b/drivers/s390/net/ism.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_ISM_H
+#define S390_ISM_H
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <net/smc.h>
+
+#define UTIL_STR_LEN	16
+
+/*
+ * Do not use the first word of the DMB bits to ensure 8 byte aligned access.
+ */
+#define ISM_DMB_WORD_OFFSET	1
+#define ISM_DMB_BIT_OFFSET	(ISM_DMB_WORD_OFFSET * 32)
+#define ISM_NR_DMBS		1920
+
+#define ISM_REG_SBA	0x1
+#define ISM_REG_IEQ	0x2
+#define ISM_READ_GID	0x3
+#define ISM_ADD_VLAN_ID	0x4
+#define ISM_DEL_VLAN_ID	0x5
+#define ISM_SET_VLAN	0x6
+#define ISM_RESET_VLAN	0x7
+#define ISM_QUERY_INFO	0x8
+#define ISM_QUERY_RGID	0x9
+#define ISM_REG_DMB	0xA
+#define ISM_UNREG_DMB	0xB
+#define ISM_SIGNAL_IEQ	0xE
+#define ISM_UNREG_SBA	0x11
+#define ISM_UNREG_IEQ	0x12
+
+#define ISM_ERROR	0xFFFF
+
+struct ism_req_hdr {
+	u32 cmd;
+	u16 : 16;
+	u16 len;
+};
+
+struct ism_resp_hdr {
+	u32 cmd;
+	u16 ret;
+	u16 len;
+};
+
+union ism_reg_sba {
+	struct {
+		struct ism_req_hdr hdr;
+		u64 sba;
+	} request;
+	struct {
+		struct ism_resp_hdr hdr;
+	} response;
+} __aligned(16);
+
+union ism_reg_ieq {
+	struct {
+		struct ism_req_hdr hdr;
+		u64 ieq;
+		u64 len;
+	} request;
+	struct {
+		struct ism_resp_hdr hdr;
+	} response;
+} __aligned(16);
+
+union ism_read_gid {
+	struct {
+		struct ism_req_hdr hdr;
+	} request;
+	struct {
+		struct ism_resp_hdr hdr;
+		u64 gid;
+	} response;
+} __aligned(16);
+
+union ism_qi {
+	struct {
+		struct ism_req_hdr hdr;
+	} request;
+	struct {
+		struct ism_resp_hdr hdr;
+		u32 version;
+		u32 max_len;
+		u64 ism_state;
+		u64 my_gid;
+		u64 sba;
+		u64 ieq;
+		u32 ieq_len;
+		u32 : 32;
+		u32 dmbs_owned;
+		u32 dmbs_used;
+		u32 vlan_required;
+		u32 vlan_nr_ids;
+		u16 vlan_id[64];
+	} response;
+} __aligned(64);
+
+union ism_query_rgid {
+	struct {
+		struct ism_req_hdr hdr;
+		u64 rgid;
+		u32 vlan_valid;
+		u32 vlan_id;
+	} request;
+	struct {
+		struct ism_resp_hdr hdr;
+	} response;
+} __aligned(16);
+
+union ism_reg_dmb {
+	struct {
+		struct ism_req_hdr hdr;
+		u64 dmb;
+		u32 dmb_len;
+		u32 sba_idx;
+		u32 vlan_valid;
+		u32 vlan_id;
+		u64 rgid;
+	} request;
+	struct {
+		struct ism_resp_hdr hdr;
+		u64 dmb_tok;
+	} response;
+} __aligned(32);
+
+union ism_sig_ieq {
+	struct {
+		struct ism_req_hdr hdr;
+		u64 rgid;
+		u32 trigger_irq;
+		u32 event_code;
+		u64 info;
+	} request;
+	struct {
+		struct ism_resp_hdr hdr;
+	} response;
+} __aligned(32);
+
+union ism_unreg_dmb {
+	struct {
+		struct ism_req_hdr hdr;
+		u64 dmb_tok;
+	} request;
+	struct {
+		struct ism_resp_hdr hdr;
+	} response;
+} __aligned(16);
+
+union ism_cmd_simple {
+	struct {
+		struct ism_req_hdr hdr;
+	} request;
+	struct {
+		struct ism_resp_hdr hdr;
+	} response;
+} __aligned(8);
+
+union ism_set_vlan_id {
+	struct {
+		struct ism_req_hdr hdr;
+		u64 vlan_id;
+	} request;
+	struct {
+		struct ism_resp_hdr hdr;
+	} response;
+} __aligned(16);
+
+struct ism_eq_header {
+	u64 idx;
+	u64 ieq_len;
+	u64 entry_len;
+	u64 : 64;
+};
+
+struct ism_eq {
+	struct ism_eq_header header;
+	struct smcd_event entry[15];
+};
+
+struct ism_sba {
+	u32 s : 1;	/* summary bit */
+	u32 e : 1;	/* event bit */
+	u32 : 30;
+	u32 dmb_bits[ISM_NR_DMBS / 32];
+	u32 reserved[3];
+	u16 dmbe_mask[ISM_NR_DMBS];
+};
+
+struct ism_dev {
+	spinlock_t lock;
+	struct pci_dev *pdev;
+	struct smcd_dev *smcd;
+
+	void __iomem *ctl;
+
+	struct ism_sba *sba;
+	dma_addr_t sba_dma_addr;
+	DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
+
+	struct ism_eq *ieq;
+	dma_addr_t ieq_dma_addr;
+
+	int ieq_idx;
+};
+
+#define ISM_CREATE_REQ(dmb, idx, sf, offset)		\
+	((dmb) | (idx) << 24 | (sf) << 23 | (offset))
+
+static inline int __ism_move(struct ism_dev *ism, u64 dmb_req, void *data,
+			     unsigned int size)
+{
+	struct zpci_dev *zdev = to_zpci(ism->pdev);
+	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, size);
+
+	return zpci_write_block(req, data, dmb_req);
+}
+
+#endif /* S390_ISM_H */
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
new file mode 100644
index 0000000..8684bce
--- /dev/null
+++ b/drivers/s390/net/ism_drv.c
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ISM driver for s390.
+ *
+ * Copyright IBM Corp. 2018
+ */
+#define KMSG_COMPONENT "ism"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/err.h>
+#include <net/smc.h>
+
+#include <asm/debug.h>
+
+#include "ism.h"
+
+MODULE_DESCRIPTION("ISM driver for s390");
+MODULE_LICENSE("GPL");
+
+#define PCI_DEVICE_ID_IBM_ISM 0x04ED
+#define DRV_NAME "ism"
+
+static const struct pci_device_id ism_device_table[] = {
+	{ PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, ism_device_table);
+
+static debug_info_t *ism_debug_info;
+
+static int ism_cmd(struct ism_dev *ism, void *cmd)
+{
+	struct ism_req_hdr *req = cmd;
+	struct ism_resp_hdr *resp = cmd;
+
+	memcpy_toio(ism->ctl + sizeof(*req), req + 1, req->len - sizeof(*req));
+	memcpy_toio(ism->ctl, req, sizeof(*req));
+
+	WRITE_ONCE(resp->ret, ISM_ERROR);
+
+	memcpy_fromio(resp, ism->ctl, sizeof(*resp));
+	if (resp->ret) {
+		debug_text_event(ism_debug_info, 0, "cmd failure");
+		debug_event(ism_debug_info, 0, resp, sizeof(*resp));
+		goto out;
+	}
+	memcpy_fromio(resp + 1, ism->ctl + sizeof(*resp),
+		      resp->len - sizeof(*resp));
+out:
+	return resp->ret;
+}
+
+static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
+{
+	union ism_cmd_simple cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.request.hdr.cmd = cmd_code;
+	cmd.request.hdr.len = sizeof(cmd.request);
+
+	return ism_cmd(ism, &cmd);
+}
+
+static int query_info(struct ism_dev *ism)
+{
+	union ism_qi cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.request.hdr.cmd = ISM_QUERY_INFO;
+	cmd.request.hdr.len = sizeof(cmd.request);
+
+	if (ism_cmd(ism, &cmd))
+		goto out;
+
+	debug_text_event(ism_debug_info, 3, "query info");
+	debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
+out:
+	return 0;
+}
+
+static int register_sba(struct ism_dev *ism)
+{
+	union ism_reg_sba cmd;
+	dma_addr_t dma_handle;
+	struct ism_sba *sba;
+
+	sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
+				  &dma_handle, GFP_KERNEL);
+	if (!sba)
+		return -ENOMEM;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.request.hdr.cmd = ISM_REG_SBA;
+	cmd.request.hdr.len = sizeof(cmd.request);
+	cmd.request.sba = dma_handle;
+
+	if (ism_cmd(ism, &cmd)) {
+		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
+		return -EIO;
+	}
+
+	ism->sba = sba;
+	ism->sba_dma_addr = dma_handle;
+
+	return 0;
+}
+
+static int register_ieq(struct ism_dev *ism)
+{
+	union ism_reg_ieq cmd;
+	dma_addr_t dma_handle;
+	struct ism_eq *ieq;
+
+	ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
+				  &dma_handle, GFP_KERNEL);
+	if (!ieq)
+		return -ENOMEM;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.request.hdr.cmd = ISM_REG_IEQ;
+	cmd.request.hdr.len = sizeof(cmd.request);
+	cmd.request.ieq = dma_handle;
+	cmd.request.len = sizeof(*ieq);
+
+	if (ism_cmd(ism, &cmd)) {
+		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
+		return -EIO;
+	}
+
+	ism->ieq = ieq;
+	ism->ieq_idx = -1;
+	ism->ieq_dma_addr = dma_handle;
+
+	return 0;
+}
+
+static int unregister_sba(struct ism_dev *ism)
+{
+	if (!ism->sba)
+		return 0;
+
+	if (ism_cmd_simple(ism, ISM_UNREG_SBA))
+		return -EIO;
+
+	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
+			  ism->sba, ism->sba_dma_addr);
+
+	ism->sba = NULL;
+	ism->sba_dma_addr = 0;
+
+	return 0;
+}
+
+static int unregister_ieq(struct ism_dev *ism)
+{
+	if (!ism->ieq)
+		return 0;
+
+	if (ism_cmd_simple(ism, ISM_UNREG_IEQ))
+		return -EIO;
+
+	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
+			  ism->ieq, ism->ieq_dma_addr);
+
+	ism->ieq = NULL;
+	ism->ieq_dma_addr = 0;
+
+	return 0;
+}
+
+static int ism_read_local_gid(struct ism_dev *ism)
+{
+	union ism_read_gid cmd;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.request.hdr.cmd = ISM_READ_GID;
+	cmd.request.hdr.len = sizeof(cmd.request);
+
+	ret = ism_cmd(ism, &cmd);
+	if (ret)
+		goto out;
+
+	ism->smcd->local_gid = cmd.response.gid;
+out:
+	return ret;
+}
+
+static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
+			  u32 vid)
+{
+	struct ism_dev *ism = smcd->priv;
+	union ism_query_rgid cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.request.hdr.cmd = ISM_QUERY_RGID;
+	cmd.request.hdr.len = sizeof(cmd.request);
+
+	cmd.request.rgid = rgid;
+	cmd.request.vlan_valid = vid_valid;
+	cmd.request.vlan_id = vid;
+
+	return ism_cmd(ism, &cmd);
+}
+
+static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
+{
+	clear_bit(dmb->sba_idx, ism->sba_bitmap);
+	dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
+			  dmb->cpu_addr, dmb->dma_addr);
+}
+
+static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
+{
+	unsigned long bit;
+
+	if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
+		return -EINVAL;
+
+	if (!dmb->sba_idx) {
+		bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
+					 ISM_DMB_BIT_OFFSET);
+		if (bit == ISM_NR_DMBS)
+			return -ENOMEM;
+
+		dmb->sba_idx = bit;
+	}
+	if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
+	    test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
+		return -EINVAL;
+
+	dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len,
+					    &dmb->dma_addr, GFP_KERNEL |
+					    __GFP_NOWARN | __GFP_NOMEMALLOC |
+					    __GFP_COMP | __GFP_NORETRY);
+	if (!dmb->cpu_addr)
+		clear_bit(dmb->sba_idx, ism->sba_bitmap);
+
+	return dmb->cpu_addr ? 0 : -ENOMEM;
+}
+
+static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
+{
+	struct ism_dev *ism = smcd->priv;
+	union ism_reg_dmb cmd;
+	int ret;
+
+	ret = ism_alloc_dmb(ism, dmb);
+	if (ret)
+		goto out;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.request.hdr.cmd = ISM_REG_DMB;
+	cmd.request.hdr.len = sizeof(cmd.request);
+
+	cmd.request.dmb = dmb->dma_addr;
+	cmd.request.dmb_len = dmb->dmb_len;
+	cmd.request.sba_idx = dmb->sba_idx;
+	cmd.request.vlan_valid = dmb->vlan_valid;
+	cmd.request.vlan_id = dmb->vlan_id;
+	cmd.request.rgid = dmb->rgid;
+
+	ret = ism_cmd(ism, &cmd);
+	if (ret) {
+		ism_free_dmb(ism, dmb);
+		goto out;
+	}
+	dmb->dmb_tok = cmd.response.dmb_tok;
+out:
+	return ret;
+}
+
+static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
+{
+	struct ism_dev *ism = smcd->priv;
+	union ism_unreg_dmb cmd;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.request.hdr.cmd = ISM_UNREG_DMB;
+	cmd.request.hdr.len = sizeof(cmd.request);
+
+	cmd.request.dmb_tok = dmb->dmb_tok;
+
+	ret = ism_cmd(ism, &cmd);
+	if (ret)
+		goto out;
+
+	ism_free_dmb(ism, dmb);
+out:
+	return ret;
+}
+
+static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
+{
+	struct ism_dev *ism = smcd->priv;
+	union ism_set_vlan_id cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
+	cmd.request.hdr.len = sizeof(cmd.request);
+
+	cmd.request.vlan_id = vlan_id;
+
+	return ism_cmd(ism, &cmd);
+}
+
+static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
+{
+	struct ism_dev *ism = smcd->priv;
+	union ism_set_vlan_id cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
+	cmd.request.hdr.len = sizeof(cmd.request);
+
+	cmd.request.vlan_id = vlan_id;
+
+	return ism_cmd(ism, &cmd);
+}
+
+static int ism_set_vlan_required(struct smcd_dev *smcd)
+{
+	return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
+}
+
+static int ism_reset_vlan_required(struct smcd_dev *smcd)
+{
+	return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
+}
+
+static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
+			  u32 event_code, u64 info)
+{
+	struct ism_dev *ism = smcd->priv;
+	union ism_sig_ieq cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
+	cmd.request.hdr.len = sizeof(cmd.request);
+
+	cmd.request.rgid = rgid;
+	cmd.request.trigger_irq = trigger_irq;
+	cmd.request.event_code = event_code;
+	cmd.request.info = info;
+
+	return ism_cmd(ism, &cmd);
+}
+
+static unsigned int max_bytes(unsigned int start, unsigned int len,
+			      unsigned int boundary)
+{
+	return min(boundary - (start & (boundary - 1)), len);
+}
+
+static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
+		    bool sf, unsigned int offset, void *data, unsigned int size)
+{
+	struct ism_dev *ism = smcd->priv;
+	unsigned int bytes;
+	u64 dmb_req;
+	int ret;
+
+	while (size) {
+		bytes = max_bytes(offset, size, PAGE_SIZE);
+		dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
+					 offset);
+
+		ret = __ism_move(ism, dmb_req, data, bytes);
+		if (ret)
+			return ret;
+
+		size -= bytes;
+		data += bytes;
+		offset += bytes;
+	}
+
+	return 0;
+}
+
+static void ism_handle_event(struct ism_dev *ism)
+{
+	struct smcd_event *entry;
+
+	while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
+		if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
+			ism->ieq_idx = 0;
+
+		entry = &ism->ieq->entry[ism->ieq_idx];
+		debug_event(ism_debug_info, 2, entry, sizeof(*entry));
+		smcd_handle_event(ism->smcd, entry);
+	}
+}
+
+static irqreturn_t ism_handle_irq(int irq, void *data)
+{
+	struct ism_dev *ism = data;
+	unsigned long bit, end;
+	unsigned long *bv;
+
+	bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
+	end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
+
+	spin_lock(&ism->lock);
+	ism->sba->s = 0;
+	barrier();
+	for (bit = 0;;) {
+		bit = find_next_bit_inv(bv, end, bit);
+		if (bit >= end)
+			break;
+
+		clear_bit_inv(bit, bv);
+		ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
+		barrier();
+		smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET);
+	}
+
+	if (ism->sba->e) {
+		ism->sba->e = 0;
+		barrier();
+		ism_handle_event(ism);
+	}
+	spin_unlock(&ism->lock);
+	return IRQ_HANDLED;
+}
+
+static const struct smcd_ops ism_ops = {
+	.query_remote_gid = ism_query_rgid,
+	.register_dmb = ism_register_dmb,
+	.unregister_dmb = ism_unregister_dmb,
+	.add_vlan_id = ism_add_vlan_id,
+	.del_vlan_id = ism_del_vlan_id,
+	.set_vlan_required = ism_set_vlan_required,
+	.reset_vlan_required = ism_reset_vlan_required,
+	.signal_event = ism_signal_ieq,
+	.move_data = ism_move,
+};
+
+static int ism_dev_init(struct ism_dev *ism)
+{
+	struct pci_dev *pdev = ism->pdev;
+	int ret;
+
+	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+	if (ret <= 0)
+		goto out;
+
+	ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
+			  pci_name(pdev), ism);
+	if (ret)
+		goto free_vectors;
+
+	ret = register_sba(ism);
+	if (ret)
+		goto free_irq;
+
+	ret = register_ieq(ism);
+	if (ret)
+		goto unreg_sba;
+
+	ret = ism_read_local_gid(ism);
+	if (ret)
+		goto unreg_ieq;
+
+	ret = smcd_register_dev(ism->smcd);
+	if (ret)
+		goto unreg_ieq;
+
+	query_info(ism);
+	return 0;
+
+unreg_ieq:
+	unregister_ieq(ism);
+unreg_sba:
+	unregister_sba(ism);
+free_irq:
+	free_irq(pci_irq_vector(pdev, 0), ism);
+free_vectors:
+	pci_free_irq_vectors(pdev);
+out:
+	return ret;
+}
+
+static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct ism_dev *ism;
+	int ret;
+
+	ism = kzalloc(sizeof(*ism), GFP_KERNEL);
+	if (!ism)
+		return -ENOMEM;
+
+	spin_lock_init(&ism->lock);
+	dev_set_drvdata(&pdev->dev, ism);
+	ism->pdev = pdev;
+
+	ret = pci_enable_device_mem(pdev);
+	if (ret)
+		goto err;
+
+	ret = pci_request_mem_regions(pdev, DRV_NAME);
+	if (ret)
+		goto err_disable;
+
+	ism->ctl = pci_iomap(pdev, 2, 0);
+	if (!ism->ctl)
+		goto err_resource;
+
+	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (ret)
+		goto err_unmap;
+
+	pci_set_dma_seg_boundary(pdev, SZ_1M - 1);
+	pci_set_dma_max_seg_size(pdev, SZ_1M);
+	pci_set_master(pdev);
+
+	ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
+				   ISM_NR_DMBS);
+	if (!ism->smcd)
+		goto err_unmap;
+
+	ism->smcd->priv = ism;
+	ret = ism_dev_init(ism);
+	if (ret)
+		goto err_free;
+
+	return 0;
+
+err_free:
+	smcd_free_dev(ism->smcd);
+err_unmap:
+	pci_iounmap(pdev, ism->ctl);
+err_resource:
+	pci_release_mem_regions(pdev);
+err_disable:
+	pci_disable_device(pdev);
+err:
+	kfree(ism);
+	dev_set_drvdata(&pdev->dev, NULL);
+	return ret;
+}
+
+static void ism_dev_exit(struct ism_dev *ism)
+{
+	struct pci_dev *pdev = ism->pdev;
+
+	smcd_unregister_dev(ism->smcd);
+	unregister_ieq(ism);
+	unregister_sba(ism);
+	free_irq(pci_irq_vector(pdev, 0), ism);
+	pci_free_irq_vectors(pdev);
+}
+
+static void ism_remove(struct pci_dev *pdev)
+{
+	struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
+
+	ism_dev_exit(ism);
+
+	smcd_free_dev(ism->smcd);
+	pci_iounmap(pdev, ism->ctl);
+	pci_release_mem_regions(pdev);
+	pci_disable_device(pdev);
+	dev_set_drvdata(&pdev->dev, NULL);
+	kfree(ism);
+}
+
+static int ism_suspend(struct device *dev)
+{
+	struct ism_dev *ism = dev_get_drvdata(dev);
+
+	ism_dev_exit(ism);
+	return 0;
+}
+
+static int ism_resume(struct device *dev)
+{
+	struct ism_dev *ism = dev_get_drvdata(dev);
+
+	return ism_dev_init(ism);
+}
+
+static SIMPLE_DEV_PM_OPS(ism_pm_ops, ism_suspend, ism_resume);
+
+static struct pci_driver ism_driver = {
+	.name	  = DRV_NAME,
+	.id_table = ism_device_table,
+	.probe	  = ism_probe,
+	.remove	  = ism_remove,
+	.driver	  = {
+		.pm = &ism_pm_ops,
+	},
+};
+
+static int __init ism_init(void)
+{
+	int ret;
+
+	ism_debug_info = debug_register("ism", 2, 1, 16);
+	if (!ism_debug_info)
+		return -ENODEV;
+
+	debug_register_view(ism_debug_info, &debug_hex_ascii_view);
+	ret = pci_register_driver(&ism_driver);
+	if (ret)
+		debug_unregister(ism_debug_info);
+
+	return ret;
+}
+
+static void __exit ism_exit(void)
+{
+	pci_unregister_driver(&ism_driver);
+	debug_unregister(ism_debug_info);
+}
+
+module_init(ism_init);
+module_exit(ism_exit);
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
new file mode 100644
index 0000000..2d9fe7e
--- /dev/null
+++ b/drivers/s390/net/lcs.c
@@ -0,0 +1,2470 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  Linux for S/390 Lan Channel Station Network Driver
+ *
+ *  Copyright IBM Corp. 1999, 2009
+ *  Author(s): Original Code written by
+ *			DJ Barrow <djbarrow@de.ibm.com,barrow_dj@yahoo.com>
+ *	       Rewritten by
+ *			Frank Pavlic <fpavlic@de.ibm.com> and
+ *			Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT		"lcs"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/inetdevice.h>
+#include <linux/in.h>
+#include <linux/igmp.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <net/arp.h>
+#include <net/ip.h>
+
+#include <asm/debug.h>
+#include <asm/idals.h>
+#include <asm/timex.h>
+#include <linux/device.h>
+#include <asm/ccwgroup.h>
+
+#include "lcs.h"
+
+
+#if !defined(CONFIG_ETHERNET) && !defined(CONFIG_FDDI)
+#error Cannot compile lcs.c without some net devices switched on.
+#endif
+
+/**
+ * initialization string for output
+ */
+
+static char version[] __initdata = "LCS driver";
+
+/**
+  * the root device for lcs group devices
+  */
+static struct device *lcs_root_dev;
+
+/**
+ * Some prototypes.
+ */
+static void lcs_tasklet(unsigned long);
+static void lcs_start_kernel_thread(struct work_struct *);
+static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
+#ifdef CONFIG_IP_MULTICAST
+static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
+#endif /* CONFIG_IP_MULTICAST */
+static int lcs_recovery(void *ptr);
+
+/**
+ * Debug Facility Stuff
+ */
+static char debug_buffer[255];
+static debug_info_t *lcs_dbf_setup;
+static debug_info_t *lcs_dbf_trace;
+
+/**
+ *  LCS Debug Facility functions
+ */
+static void
+lcs_unregister_debug_facility(void)
+{
+	debug_unregister(lcs_dbf_setup);
+	debug_unregister(lcs_dbf_trace);
+}
+
+static int
+lcs_register_debug_facility(void)
+{
+	lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8);
+	lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8);
+	if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
+		pr_err("Not enough memory for debug facility.\n");
+		lcs_unregister_debug_facility();
+		return -ENOMEM;
+	}
+	debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view);
+	debug_set_level(lcs_dbf_setup, 2);
+	debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view);
+	debug_set_level(lcs_dbf_trace, 2);
+	return 0;
+}
+
+/**
+ * Allocate io buffers.
+ */
+static int
+lcs_alloc_channel(struct lcs_channel *channel)
+{
+	int cnt;
+
+	LCS_DBF_TEXT(2, setup, "ichalloc");
+	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
+		/* alloc memory fo iobuffer */
+		channel->iob[cnt].data =
+			kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
+		if (channel->iob[cnt].data == NULL)
+			break;
+		channel->iob[cnt].state = LCS_BUF_STATE_EMPTY;
+	}
+	if (cnt < LCS_NUM_BUFFS) {
+		/* Not all io buffers could be allocated. */
+		LCS_DBF_TEXT(2, setup, "echalloc");
+		while (cnt-- > 0)
+			kfree(channel->iob[cnt].data);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+/**
+ * Free io buffers.
+ */
+static void
+lcs_free_channel(struct lcs_channel *channel)
+{
+	int cnt;
+
+	LCS_DBF_TEXT(2, setup, "ichfree");
+	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
+		kfree(channel->iob[cnt].data);
+		channel->iob[cnt].data = NULL;
+	}
+}
+
+/*
+ * Cleanup channel.
+ */
+static void
+lcs_cleanup_channel(struct lcs_channel *channel)
+{
+	LCS_DBF_TEXT(3, setup, "cleanch");
+	/* Kill write channel tasklets. */
+	tasklet_kill(&channel->irq_tasklet);
+	/* Free channel buffers. */
+	lcs_free_channel(channel);
+}
+
+/**
+ * LCS free memory for card and channels.
+ */
+static void
+lcs_free_card(struct lcs_card *card)
+{
+	LCS_DBF_TEXT(2, setup, "remcard");
+	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
+	kfree(card);
+}
+
+/**
+ * LCS alloc memory for card and channels
+ */
+static struct lcs_card *
+lcs_alloc_card(void)
+{
+	struct lcs_card *card;
+	int rc;
+
+	LCS_DBF_TEXT(2, setup, "alloclcs");
+
+	card = kzalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA);
+	if (card == NULL)
+		return NULL;
+	card->lan_type = LCS_FRAME_TYPE_AUTO;
+	card->pkt_seq = 0;
+	card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT;
+	/* Allocate io buffers for the read channel. */
+	rc = lcs_alloc_channel(&card->read);
+	if (rc){
+		LCS_DBF_TEXT(2, setup, "iccwerr");
+		lcs_free_card(card);
+		return NULL;
+	}
+	/* Allocate io buffers for the write channel. */
+	rc = lcs_alloc_channel(&card->write);
+	if (rc) {
+		LCS_DBF_TEXT(2, setup, "iccwerr");
+		lcs_cleanup_channel(&card->read);
+		lcs_free_card(card);
+		return NULL;
+	}
+
+#ifdef CONFIG_IP_MULTICAST
+	INIT_LIST_HEAD(&card->ipm_list);
+#endif
+	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
+	return card;
+}
+
+/*
+ * Setup read channel.
+ */
+static void
+lcs_setup_read_ccws(struct lcs_card *card)
+{
+	int cnt;
+
+	LCS_DBF_TEXT(2, setup, "ireadccw");
+	/* Setup read ccws. */
+	memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1));
+	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
+		card->read.ccws[cnt].cmd_code = LCS_CCW_READ;
+		card->read.ccws[cnt].count = LCS_IOBUFFERSIZE;
+		card->read.ccws[cnt].flags =
+			CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI;
+		/*
+		 * Note: we have allocated the buffer with GFP_DMA, so
+		 * we do not need to do set_normalized_cda.
+		 */
+		card->read.ccws[cnt].cda =
+			(__u32) __pa(card->read.iob[cnt].data);
+		((struct lcs_header *)
+		 card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
+		card->read.iob[cnt].callback = lcs_get_frames_cb;
+		card->read.iob[cnt].state = LCS_BUF_STATE_READY;
+		card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
+	}
+	card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
+	card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI;
+	card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND;
+	/* Last ccw is a tic (transfer in channel). */
+	card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
+	card->read.ccws[LCS_NUM_BUFFS].cda =
+		(__u32) __pa(card->read.ccws);
+	/* Setg initial state of the read channel. */
+	card->read.state = LCS_CH_STATE_INIT;
+
+	card->read.io_idx = 0;
+	card->read.buf_idx = 0;
+}
+
+static void
+lcs_setup_read(struct lcs_card *card)
+{
+	LCS_DBF_TEXT(3, setup, "initread");
+
+	lcs_setup_read_ccws(card);
+	/* Initialize read channel tasklet. */
+	card->read.irq_tasklet.data = (unsigned long) &card->read;
+	card->read.irq_tasklet.func = lcs_tasklet;
+	/* Initialize waitqueue. */
+	init_waitqueue_head(&card->read.wait_q);
+}
+
+/*
+ * Setup write channel.
+ */
+static void
+lcs_setup_write_ccws(struct lcs_card *card)
+{
+	int cnt;
+
+	LCS_DBF_TEXT(3, setup, "iwritccw");
+	/* Setup write ccws. */
+	memset(card->write.ccws, 0, sizeof(struct ccw1) * (LCS_NUM_BUFFS + 1));
+	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
+		card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
+		card->write.ccws[cnt].count = 0;
+		card->write.ccws[cnt].flags =
+			CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI;
+		/*
+		 * Note: we have allocated the buffer with GFP_DMA, so
+		 * we do not need to do set_normalized_cda.
+		 */
+		card->write.ccws[cnt].cda =
+			(__u32) __pa(card->write.iob[cnt].data);
+	}
+	/* Last ccw is a tic (transfer in channel). */
+	card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
+	card->write.ccws[LCS_NUM_BUFFS].cda =
+		(__u32) __pa(card->write.ccws);
+	/* Set initial state of the write channel. */
+	card->read.state = LCS_CH_STATE_INIT;
+
+	card->write.io_idx = 0;
+	card->write.buf_idx = 0;
+}
+
+static void
+lcs_setup_write(struct lcs_card *card)
+{
+	LCS_DBF_TEXT(3, setup, "initwrit");
+
+	lcs_setup_write_ccws(card);
+	/* Initialize write channel tasklet. */
+	card->write.irq_tasklet.data = (unsigned long) &card->write;
+	card->write.irq_tasklet.func = lcs_tasklet;
+	/* Initialize waitqueue. */
+	init_waitqueue_head(&card->write.wait_q);
+}
+
+static void
+lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&card->mask_lock, flags);
+	card->thread_allowed_mask = threads;
+	spin_unlock_irqrestore(&card->mask_lock, flags);
+	wake_up(&card->wait_q);
+}
+static int lcs_threads_running(struct lcs_card *card, unsigned long threads)
+{
+        unsigned long flags;
+        int rc = 0;
+
+	spin_lock_irqsave(&card->mask_lock, flags);
+        rc = (card->thread_running_mask & threads);
+	spin_unlock_irqrestore(&card->mask_lock, flags);
+        return rc;
+}
+
+static int
+lcs_wait_for_threads(struct lcs_card *card, unsigned long threads)
+{
+        return wait_event_interruptible(card->wait_q,
+                        lcs_threads_running(card, threads) == 0);
+}
+
+static int lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
+{
+        unsigned long flags;
+
+	spin_lock_irqsave(&card->mask_lock, flags);
+        if ( !(card->thread_allowed_mask & thread) ||
+              (card->thread_start_mask & thread) ) {
+                spin_unlock_irqrestore(&card->mask_lock, flags);
+                return -EPERM;
+        }
+        card->thread_start_mask |= thread;
+	spin_unlock_irqrestore(&card->mask_lock, flags);
+        return 0;
+}
+
+static void
+lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread)
+{
+        unsigned long flags;
+
+	spin_lock_irqsave(&card->mask_lock, flags);
+        card->thread_running_mask &= ~thread;
+	spin_unlock_irqrestore(&card->mask_lock, flags);
+        wake_up(&card->wait_q);
+}
+
+static int __lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
+{
+        unsigned long flags;
+        int rc = 0;
+
+	spin_lock_irqsave(&card->mask_lock, flags);
+        if (card->thread_start_mask & thread){
+                if ((card->thread_allowed_mask & thread) &&
+                    !(card->thread_running_mask & thread)){
+                        rc = 1;
+                        card->thread_start_mask &= ~thread;
+                        card->thread_running_mask |= thread;
+                } else
+                        rc = -EPERM;
+        }
+	spin_unlock_irqrestore(&card->mask_lock, flags);
+        return rc;
+}
+
+static int
+lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
+{
+        int rc = 0;
+        wait_event(card->wait_q,
+                   (rc = __lcs_do_run_thread(card, thread)) >= 0);
+        return rc;
+}
+
+static int
+lcs_do_start_thread(struct lcs_card *card, unsigned long thread)
+{
+        unsigned long flags;
+        int rc = 0;
+
+	spin_lock_irqsave(&card->mask_lock, flags);
+        LCS_DBF_TEXT_(4, trace, "  %02x%02x%02x",
+                        (u8) card->thread_start_mask,
+                        (u8) card->thread_allowed_mask,
+                        (u8) card->thread_running_mask);
+        rc = (card->thread_start_mask & thread);
+	spin_unlock_irqrestore(&card->mask_lock, flags);
+        return rc;
+}
+
+/**
+ * Initialize channels,card and state machines.
+ */
+static void
+lcs_setup_card(struct lcs_card *card)
+{
+	LCS_DBF_TEXT(2, setup, "initcard");
+	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
+
+	lcs_setup_read(card);
+	lcs_setup_write(card);
+	/* Set cards initial state. */
+	card->state = DEV_STATE_DOWN;
+	card->tx_buffer = NULL;
+	card->tx_emitted = 0;
+
+	init_waitqueue_head(&card->wait_q);
+	spin_lock_init(&card->lock);
+	spin_lock_init(&card->ipm_lock);
+	spin_lock_init(&card->mask_lock);
+#ifdef CONFIG_IP_MULTICAST
+	INIT_LIST_HEAD(&card->ipm_list);
+#endif
+	INIT_LIST_HEAD(&card->lancmd_waiters);
+}
+
+static void lcs_clear_multicast_list(struct lcs_card *card)
+{
+#ifdef	CONFIG_IP_MULTICAST
+	struct lcs_ipm_list *ipm;
+	unsigned long flags;
+
+	/* Free multicast list. */
+	LCS_DBF_TEXT(3, setup, "clmclist");
+	spin_lock_irqsave(&card->ipm_lock, flags);
+	while (!list_empty(&card->ipm_list)){
+		ipm = list_entry(card->ipm_list.next,
+				 struct lcs_ipm_list, list);
+		list_del(&ipm->list);
+		if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){
+			spin_unlock_irqrestore(&card->ipm_lock, flags);
+			lcs_send_delipm(card, ipm);
+			spin_lock_irqsave(&card->ipm_lock, flags);
+		}
+		kfree(ipm);
+	}
+	spin_unlock_irqrestore(&card->ipm_lock, flags);
+#endif
+}
+/**
+ * Cleanup channels,card and state machines.
+ */
+static void
+lcs_cleanup_card(struct lcs_card *card)
+{
+
+	LCS_DBF_TEXT(3, setup, "cleancrd");
+	LCS_DBF_HEX(2,setup,&card,sizeof(void*));
+
+	if (card->dev != NULL)
+		free_netdev(card->dev);
+	/* Cleanup channels. */
+	lcs_cleanup_channel(&card->write);
+	lcs_cleanup_channel(&card->read);
+}
+
+/**
+ * Start channel.
+ */
+static int
+lcs_start_channel(struct lcs_channel *channel)
+{
+	unsigned long flags;
+	int rc;
+
+	LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev));
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	rc = ccw_device_start(channel->ccwdev,
+			      channel->ccws + channel->io_idx, 0, 0,
+			      DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
+	if (rc == 0)
+		channel->state = LCS_CH_STATE_RUNNING;
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	if (rc) {
+		LCS_DBF_TEXT_(4,trace,"essh%s",
+			      dev_name(&channel->ccwdev->dev));
+		dev_err(&channel->ccwdev->dev,
+			"Starting an LCS device resulted in an error,"
+			" rc=%d!\n", rc);
+	}
+	return rc;
+}
+
+static int
+lcs_clear_channel(struct lcs_channel *channel)
+{
+	unsigned long flags;
+	int rc;
+
+	LCS_DBF_TEXT(4,trace,"clearch");
+	LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	rc = ccw_device_clear(channel->ccwdev, (addr_t) channel);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	if (rc) {
+		LCS_DBF_TEXT_(4, trace, "ecsc%s",
+			      dev_name(&channel->ccwdev->dev));
+		return rc;
+	}
+	wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED));
+	channel->state = LCS_CH_STATE_STOPPED;
+	return rc;
+}
+
+
+/**
+ * Stop channel.
+ */
+static int
+lcs_stop_channel(struct lcs_channel *channel)
+{
+	unsigned long flags;
+	int rc;
+
+	if (channel->state == LCS_CH_STATE_STOPPED)
+		return 0;
+	LCS_DBF_TEXT(4,trace,"haltsch");
+	LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
+	channel->state = LCS_CH_STATE_INIT;
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	rc = ccw_device_halt(channel->ccwdev, (addr_t) channel);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	if (rc) {
+		LCS_DBF_TEXT_(4, trace, "ehsc%s",
+			      dev_name(&channel->ccwdev->dev));
+		return rc;
+	}
+	/* Asynchronous halt initialted. Wait for its completion. */
+	wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED));
+	lcs_clear_channel(channel);
+	return 0;
+}
+
+/**
+ * start read and write channel
+ */
+static int
+lcs_start_channels(struct lcs_card *card)
+{
+	int rc;
+
+	LCS_DBF_TEXT(2, trace, "chstart");
+	/* start read channel */
+	rc = lcs_start_channel(&card->read);
+	if (rc)
+		return rc;
+	/* start write channel */
+	rc = lcs_start_channel(&card->write);
+	if (rc)
+		lcs_stop_channel(&card->read);
+	return rc;
+}
+
+/**
+ * stop read and write channel
+ */
+static int
+lcs_stop_channels(struct lcs_card *card)
+{
+	LCS_DBF_TEXT(2, trace, "chhalt");
+	lcs_stop_channel(&card->read);
+	lcs_stop_channel(&card->write);
+	return 0;
+}
+
+/**
+ * Get empty buffer.
+ */
+static struct lcs_buffer *
+__lcs_get_buffer(struct lcs_channel *channel)
+{
+	int index;
+
+	LCS_DBF_TEXT(5, trace, "_getbuff");
+	index = channel->io_idx;
+	do {
+		if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) {
+			channel->iob[index].state = LCS_BUF_STATE_LOCKED;
+			return channel->iob + index;
+		}
+		index = (index + 1) & (LCS_NUM_BUFFS - 1);
+	} while (index != channel->io_idx);
+	return NULL;
+}
+
+static struct lcs_buffer *
+lcs_get_buffer(struct lcs_channel *channel)
+{
+	struct lcs_buffer *buffer;
+	unsigned long flags;
+
+	LCS_DBF_TEXT(5, trace, "getbuff");
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	buffer = __lcs_get_buffer(channel);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	return buffer;
+}
+
+/**
+ * Resume channel program if the channel is suspended.
+ */
+static int
+__lcs_resume_channel(struct lcs_channel *channel)
+{
+	int rc;
+
+	if (channel->state != LCS_CH_STATE_SUSPENDED)
+		return 0;
+	if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
+		return 0;
+	LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev));
+	rc = ccw_device_resume(channel->ccwdev);
+	if (rc) {
+		LCS_DBF_TEXT_(4, trace, "ersc%s",
+			      dev_name(&channel->ccwdev->dev));
+		dev_err(&channel->ccwdev->dev,
+			"Sending data from the LCS device to the LAN failed"
+			" with rc=%d\n",rc);
+	} else
+		channel->state = LCS_CH_STATE_RUNNING;
+	return rc;
+
+}
+
+/**
+ * Make a buffer ready for processing.
+ */
+static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
+{
+	int prev, next;
+
+	LCS_DBF_TEXT(5, trace, "rdybits");
+	prev = (index - 1) & (LCS_NUM_BUFFS - 1);
+	next = (index + 1) & (LCS_NUM_BUFFS - 1);
+	/* Check if we may clear the suspend bit of this buffer. */
+	if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) {
+		/* Check if we have to set the PCI bit. */
+		if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND))
+			/* Suspend bit of the previous buffer is not set. */
+			channel->ccws[index].flags |= CCW_FLAG_PCI;
+		/* Suspend bit of the next buffer is set. */
+		channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND;
+	}
+}
+
+static int
+lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+	unsigned long flags;
+	int index, rc;
+
+	LCS_DBF_TEXT(5, trace, "rdybuff");
+	BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
+	       buffer->state != LCS_BUF_STATE_PROCESSED);
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	buffer->state = LCS_BUF_STATE_READY;
+	index = buffer - channel->iob;
+	/* Set length. */
+	channel->ccws[index].count = buffer->count;
+	/* Check relevant PCI/suspend bits. */
+	__lcs_ready_buffer_bits(channel, index);
+	rc = __lcs_resume_channel(channel);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	return rc;
+}
+
+/**
+ * Mark the buffer as processed. Take care of the suspend bit
+ * of the previous buffer. This function is called from
+ * interrupt context, so the lock must not be taken.
+ */
+static int
+__lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+	int index, prev, next;
+
+	LCS_DBF_TEXT(5, trace, "prcsbuff");
+	BUG_ON(buffer->state != LCS_BUF_STATE_READY);
+	buffer->state = LCS_BUF_STATE_PROCESSED;
+	index = buffer - channel->iob;
+	prev = (index - 1) & (LCS_NUM_BUFFS - 1);
+	next = (index + 1) & (LCS_NUM_BUFFS - 1);
+	/* Set the suspend bit and clear the PCI bit of this buffer. */
+	channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
+	channel->ccws[index].flags &= ~CCW_FLAG_PCI;
+	/* Check the suspend bit of the previous buffer. */
+	if (channel->iob[prev].state == LCS_BUF_STATE_READY) {
+		/*
+		 * Previous buffer is in state ready. It might have
+		 * happened in lcs_ready_buffer that the suspend bit
+		 * has not been cleared to avoid an endless loop.
+		 * Do it now.
+		 */
+		__lcs_ready_buffer_bits(channel, prev);
+	}
+	/* Clear PCI bit of next buffer. */
+	channel->ccws[next].flags &= ~CCW_FLAG_PCI;
+	return __lcs_resume_channel(channel);
+}
+
+/**
+ * Put a processed buffer back to state empty.
+ */
+static void
+lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+	unsigned long flags;
+
+	LCS_DBF_TEXT(5, trace, "relbuff");
+	BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
+	       buffer->state != LCS_BUF_STATE_PROCESSED);
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	buffer->state = LCS_BUF_STATE_EMPTY;
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+}
+
+/**
+ * Get buffer for a lan command.
+ */
+static struct lcs_buffer *
+lcs_get_lancmd(struct lcs_card *card, int count)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+
+	LCS_DBF_TEXT(4, trace, "getlncmd");
+	/* Get buffer and wait if none is available. */
+	wait_event(card->write.wait_q,
+		   ((buffer = lcs_get_buffer(&card->write)) != NULL));
+	count += sizeof(struct lcs_header);
+	*(__u16 *)(buffer->data + count) = 0;
+	buffer->count = count + sizeof(__u16);
+	buffer->callback = lcs_release_buffer;
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->offset = count;
+	cmd->type = LCS_FRAME_TYPE_CONTROL;
+	cmd->slot = 0;
+	return buffer;
+}
+
+
+static void
+lcs_get_reply(struct lcs_reply *reply)
+{
+	refcount_inc(&reply->refcnt);
+}
+
+static void
+lcs_put_reply(struct lcs_reply *reply)
+{
+	if (refcount_dec_and_test(&reply->refcnt))
+		kfree(reply);
+}
+
+static struct lcs_reply *
+lcs_alloc_reply(struct lcs_cmd *cmd)
+{
+	struct lcs_reply *reply;
+
+	LCS_DBF_TEXT(4, trace, "getreply");
+
+	reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
+	if (!reply)
+		return NULL;
+	refcount_set(&reply->refcnt, 1);
+	reply->sequence_no = cmd->sequence_no;
+	reply->received = 0;
+	reply->rc = 0;
+	init_waitqueue_head(&reply->wait_q);
+
+	return reply;
+}
+
+/**
+ * Notifier function for lancmd replies. Called from read irq.
+ */
+static void
+lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+	struct list_head *l, *n;
+	struct lcs_reply *reply;
+
+	LCS_DBF_TEXT(4, trace, "notiwait");
+	spin_lock(&card->lock);
+	list_for_each_safe(l, n, &card->lancmd_waiters) {
+		reply = list_entry(l, struct lcs_reply, list);
+		if (reply->sequence_no == cmd->sequence_no) {
+			lcs_get_reply(reply);
+			list_del_init(&reply->list);
+			if (reply->callback != NULL)
+				reply->callback(card, cmd);
+			reply->received = 1;
+			reply->rc = cmd->return_code;
+			wake_up(&reply->wait_q);
+			lcs_put_reply(reply);
+			break;
+		}
+	}
+	spin_unlock(&card->lock);
+}
+
+/**
+ * Emit buffer of a lan command.
+ */
+static void
+lcs_lancmd_timeout(struct timer_list *t)
+{
+	struct lcs_reply *reply = from_timer(reply, t, timer);
+	struct lcs_reply *list_reply, *r;
+	unsigned long flags;
+
+	LCS_DBF_TEXT(4, trace, "timeout");
+	spin_lock_irqsave(&reply->card->lock, flags);
+	list_for_each_entry_safe(list_reply, r,
+				 &reply->card->lancmd_waiters,list) {
+		if (reply == list_reply) {
+			lcs_get_reply(reply);
+			list_del_init(&reply->list);
+			spin_unlock_irqrestore(&reply->card->lock, flags);
+			reply->received = 1;
+			reply->rc = -ETIME;
+			wake_up(&reply->wait_q);
+			lcs_put_reply(reply);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&reply->card->lock, flags);
+}
+
+static int
+lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
+		void (*reply_callback)(struct lcs_card *, struct lcs_cmd *))
+{
+	struct lcs_reply *reply;
+	struct lcs_cmd *cmd;
+	unsigned long flags;
+	int rc;
+
+	LCS_DBF_TEXT(4, trace, "sendcmd");
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->return_code = 0;
+	cmd->sequence_no = card->sequence_no++;
+	reply = lcs_alloc_reply(cmd);
+	if (!reply)
+		return -ENOMEM;
+	reply->callback = reply_callback;
+	reply->card = card;
+	spin_lock_irqsave(&card->lock, flags);
+	list_add_tail(&reply->list, &card->lancmd_waiters);
+	spin_unlock_irqrestore(&card->lock, flags);
+
+	buffer->callback = lcs_release_buffer;
+	rc = lcs_ready_buffer(&card->write, buffer);
+	if (rc)
+		return rc;
+	timer_setup(&reply->timer, lcs_lancmd_timeout, 0);
+	mod_timer(&reply->timer, jiffies + HZ * card->lancmd_timeout);
+	wait_event(reply->wait_q, reply->received);
+	del_timer_sync(&reply->timer);
+	LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc);
+	rc = reply->rc;
+	lcs_put_reply(reply);
+	return rc ? -EIO : 0;
+}
+
+/**
+ * LCS startup command
+ */
+static int
+lcs_send_startup(struct lcs_card *card, __u8 initiator)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+
+	LCS_DBF_TEXT(2, trace, "startup");
+	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->cmd_code = LCS_CMD_STARTUP;
+	cmd->initiator = initiator;
+	cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE;
+	return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * LCS shutdown command
+ */
+static int
+lcs_send_shutdown(struct lcs_card *card)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+
+	LCS_DBF_TEXT(2, trace, "shutdown");
+	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->cmd_code = LCS_CMD_SHUTDOWN;
+	cmd->initiator = LCS_INITIATOR_TCPIP;
+	return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * LCS lanstat command
+ */
+static void
+__lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+	LCS_DBF_TEXT(2, trace, "statcb");
+	memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH);
+}
+
+static int
+lcs_send_lanstat(struct lcs_card *card)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+
+	LCS_DBF_TEXT(2,trace, "cmdstat");
+	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+	cmd = (struct lcs_cmd *) buffer->data;
+	/* Setup lanstat command. */
+	cmd->cmd_code = LCS_CMD_LANSTAT;
+	cmd->initiator = LCS_INITIATOR_TCPIP;
+	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
+	cmd->cmd.lcs_std_cmd.portno = card->portno;
+	return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb);
+}
+
+/**
+ * send stoplan command
+ */
+static int
+lcs_send_stoplan(struct lcs_card *card, __u8 initiator)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+
+	LCS_DBF_TEXT(2, trace, "cmdstpln");
+	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->cmd_code = LCS_CMD_STOPLAN;
+	cmd->initiator = initiator;
+	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
+	cmd->cmd.lcs_std_cmd.portno = card->portno;
+	return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * send startlan command
+ */
+static void
+__lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+	LCS_DBF_TEXT(2, trace, "srtlancb");
+	card->lan_type = cmd->cmd.lcs_std_cmd.lan_type;
+	card->portno = cmd->cmd.lcs_std_cmd.portno;
+}
+
+static int
+lcs_send_startlan(struct lcs_card *card, __u8 initiator)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+
+	LCS_DBF_TEXT(2, trace, "cmdstaln");
+	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->cmd_code = LCS_CMD_STARTLAN;
+	cmd->initiator = initiator;
+	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
+	cmd->cmd.lcs_std_cmd.portno = card->portno;
+	return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb);
+}
+
+#ifdef CONFIG_IP_MULTICAST
+/**
+ * send setipm command (Multicast)
+ */
+static int
+lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+
+	LCS_DBF_TEXT(2, trace, "cmdsetim");
+	buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->cmd_code = LCS_CMD_SETIPM;
+	cmd->initiator = LCS_INITIATOR_TCPIP;
+	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
+	cmd->cmd.lcs_qipassist.portno = card->portno;
+	cmd->cmd.lcs_qipassist.version = 4;
+	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
+	memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
+	       &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
+	LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
+	return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * send delipm command (Multicast)
+ */
+static int
+lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+
+	LCS_DBF_TEXT(2, trace, "cmddelim");
+	buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->cmd_code = LCS_CMD_DELIPM;
+	cmd->initiator = LCS_INITIATOR_TCPIP;
+	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
+	cmd->cmd.lcs_qipassist.portno = card->portno;
+	cmd->cmd.lcs_qipassist.version = 4;
+	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
+	memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
+	       &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
+	LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
+	return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * check if multicast is supported by LCS
+ */
+static void
+__lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+	LCS_DBF_TEXT(2, trace, "chkmccb");
+	card->ip_assists_supported =
+		cmd->cmd.lcs_qipassist.ip_assists_supported;
+	card->ip_assists_enabled =
+		cmd->cmd.lcs_qipassist.ip_assists_enabled;
+}
+
+static int
+lcs_check_multicast_support(struct lcs_card *card)
+{
+	struct lcs_buffer *buffer;
+	struct lcs_cmd *cmd;
+	int rc;
+
+	LCS_DBF_TEXT(2, trace, "cmdqipa");
+	/* Send query ipassist. */
+	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+	cmd = (struct lcs_cmd *) buffer->data;
+	cmd->cmd_code = LCS_CMD_QIPASSIST;
+	cmd->initiator = LCS_INITIATOR_TCPIP;
+	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
+	cmd->cmd.lcs_qipassist.portno = card->portno;
+	cmd->cmd.lcs_qipassist.version = 4;
+	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
+	rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb);
+	if (rc != 0) {
+		pr_err("Query IPAssist failed. Assuming unsupported!\n");
+		return -EOPNOTSUPP;
+	}
+	if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT)
+		return 0;
+	return -EOPNOTSUPP;
+}
+
+/**
+ * set or del multicast address on LCS card
+ */
+static void
+lcs_fix_multicast_list(struct lcs_card *card)
+{
+	struct list_head failed_list;
+	struct lcs_ipm_list *ipm, *tmp;
+	unsigned long flags;
+	int rc;
+
+	LCS_DBF_TEXT(4,trace, "fixipm");
+	INIT_LIST_HEAD(&failed_list);
+	spin_lock_irqsave(&card->ipm_lock, flags);
+list_modified:
+	list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){
+		switch (ipm->ipm_state) {
+		case LCS_IPM_STATE_SET_REQUIRED:
+			/* del from ipm_list so no one else can tamper with
+			 * this entry */
+			list_del_init(&ipm->list);
+			spin_unlock_irqrestore(&card->ipm_lock, flags);
+			rc = lcs_send_setipm(card, ipm);
+			spin_lock_irqsave(&card->ipm_lock, flags);
+			if (rc) {
+				pr_info("Adding multicast address failed."
+					" Table possibly full!\n");
+				/* store ipm in failed list -> will be added
+				 * to ipm_list again, so a retry will be done
+				 * during the next call of this function */
+				list_add_tail(&ipm->list, &failed_list);
+			} else {
+				ipm->ipm_state = LCS_IPM_STATE_ON_CARD;
+				/* re-insert into ipm_list */
+				list_add_tail(&ipm->list, &card->ipm_list);
+			}
+			goto list_modified;
+		case LCS_IPM_STATE_DEL_REQUIRED:
+			list_del(&ipm->list);
+			spin_unlock_irqrestore(&card->ipm_lock, flags);
+			lcs_send_delipm(card, ipm);
+			spin_lock_irqsave(&card->ipm_lock, flags);
+			kfree(ipm);
+			goto list_modified;
+		case LCS_IPM_STATE_ON_CARD:
+			break;
+		}
+	}
+	/* re-insert all entries from the failed_list into ipm_list */
+	list_for_each_entry_safe(ipm, tmp, &failed_list, list)
+		list_move_tail(&ipm->list, &card->ipm_list);
+
+	spin_unlock_irqrestore(&card->ipm_lock, flags);
+}
+
+/**
+ * get mac address for the relevant Multicast address
+ */
+static void
+lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
+{
+	LCS_DBF_TEXT(4,trace, "getmac");
+	ip_eth_mc_map(ipm, mac);
+}
+
+/**
+ * function called by net device to handle multicast address relevant things
+ */
+static void lcs_remove_mc_addresses(struct lcs_card *card,
+				    struct in_device *in4_dev)
+{
+	struct ip_mc_list *im4;
+	struct list_head *l;
+	struct lcs_ipm_list *ipm;
+	unsigned long flags;
+	char buf[MAX_ADDR_LEN];
+
+	LCS_DBF_TEXT(4, trace, "remmclst");
+	spin_lock_irqsave(&card->ipm_lock, flags);
+	list_for_each(l, &card->ipm_list) {
+		ipm = list_entry(l, struct lcs_ipm_list, list);
+		for (im4 = rcu_dereference(in4_dev->mc_list);
+		     im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) {
+			lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
+			if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
+			     (memcmp(buf, &ipm->ipm.mac_addr,
+				     LCS_MAC_LENGTH) == 0) )
+				break;
+		}
+		if (im4 == NULL)
+			ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED;
+	}
+	spin_unlock_irqrestore(&card->ipm_lock, flags);
+}
+
+static struct lcs_ipm_list *lcs_check_addr_entry(struct lcs_card *card,
+						 struct ip_mc_list *im4,
+						 char *buf)
+{
+	struct lcs_ipm_list *tmp, *ipm = NULL;
+	struct list_head *l;
+	unsigned long flags;
+
+	LCS_DBF_TEXT(4, trace, "chkmcent");
+	spin_lock_irqsave(&card->ipm_lock, flags);
+	list_for_each(l, &card->ipm_list) {
+		tmp = list_entry(l, struct lcs_ipm_list, list);
+		if ( (tmp->ipm.ip_addr == im4->multiaddr) &&
+		     (memcmp(buf, &tmp->ipm.mac_addr,
+			     LCS_MAC_LENGTH) == 0) ) {
+			ipm = tmp;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&card->ipm_lock, flags);
+	return ipm;
+}
+
+static void lcs_set_mc_addresses(struct lcs_card *card,
+				 struct in_device *in4_dev)
+{
+
+	struct ip_mc_list *im4;
+	struct lcs_ipm_list *ipm;
+	char buf[MAX_ADDR_LEN];
+	unsigned long flags;
+
+	LCS_DBF_TEXT(4, trace, "setmclst");
+	for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
+	     im4 = rcu_dereference(im4->next_rcu)) {
+		lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
+		ipm = lcs_check_addr_entry(card, im4, buf);
+		if (ipm != NULL)
+			continue;	/* Address already in list. */
+		ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
+		if (ipm == NULL) {
+			pr_info("Not enough memory to add"
+				" new multicast entry!\n");
+			break;
+		}
+		memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH);
+		ipm->ipm.ip_addr = im4->multiaddr;
+		ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED;
+		spin_lock_irqsave(&card->ipm_lock, flags);
+		LCS_DBF_HEX(2,trace,&ipm->ipm.ip_addr,4);
+		list_add(&ipm->list, &card->ipm_list);
+		spin_unlock_irqrestore(&card->ipm_lock, flags);
+	}
+}
+
+static int
+lcs_register_mc_addresses(void *data)
+{
+	struct lcs_card *card;
+	struct in_device *in4_dev;
+
+	card = (struct lcs_card *) data;
+
+	if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD))
+		return 0;
+	LCS_DBF_TEXT(4, trace, "regmulti");
+
+	in4_dev = in_dev_get(card->dev);
+	if (in4_dev == NULL)
+		goto out;
+	rcu_read_lock();
+	lcs_remove_mc_addresses(card,in4_dev);
+	lcs_set_mc_addresses(card, in4_dev);
+	rcu_read_unlock();
+	in_dev_put(in4_dev);
+
+	netif_carrier_off(card->dev);
+	netif_tx_disable(card->dev);
+	wait_event(card->write.wait_q,
+			(card->write.state != LCS_CH_STATE_RUNNING));
+	lcs_fix_multicast_list(card);
+	if (card->state == DEV_STATE_UP) {
+		netif_carrier_on(card->dev);
+		netif_wake_queue(card->dev);
+	}
+out:
+	lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD);
+	return 0;
+}
+#endif /* CONFIG_IP_MULTICAST */
+
+/**
+ * function called by net device to
+ * handle multicast address relevant things
+ */
+static void
+lcs_set_multicast_list(struct net_device *dev)
+{
+#ifdef CONFIG_IP_MULTICAST
+        struct lcs_card *card;
+
+        LCS_DBF_TEXT(4, trace, "setmulti");
+        card = (struct lcs_card *) dev->ml_priv;
+
+        if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD))
+		schedule_work(&card->kernel_thread_starter);
+#endif /* CONFIG_IP_MULTICAST */
+}
+
+static long
+lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
+{
+	if (!IS_ERR(irb))
+		return 0;
+
+	switch (PTR_ERR(irb)) {
+	case -EIO:
+		dev_warn(&cdev->dev,
+			"An I/O-error occurred on the LCS device\n");
+		LCS_DBF_TEXT(2, trace, "ckirberr");
+		LCS_DBF_TEXT_(2, trace, "  rc%d", -EIO);
+		break;
+	case -ETIMEDOUT:
+		dev_warn(&cdev->dev,
+			"A command timed out on the LCS device\n");
+		LCS_DBF_TEXT(2, trace, "ckirberr");
+		LCS_DBF_TEXT_(2, trace, "  rc%d", -ETIMEDOUT);
+		break;
+	default:
+		dev_warn(&cdev->dev,
+			"An error occurred on the LCS device, rc=%ld\n",
+			PTR_ERR(irb));
+		LCS_DBF_TEXT(2, trace, "ckirberr");
+		LCS_DBF_TEXT(2, trace, "  rc???");
+	}
+	return PTR_ERR(irb);
+}
+
+static int
+lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
+{
+	int dstat, cstat;
+	char *sense;
+
+	sense = (char *) irb->ecw;
+	cstat = irb->scsw.cmd.cstat;
+	dstat = irb->scsw.cmd.dstat;
+
+	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
+		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
+		     SCHN_STAT_PROT_CHECK   | SCHN_STAT_PROG_CHECK)) {
+		LCS_DBF_TEXT(2, trace, "CGENCHK");
+		return 1;
+	}
+	if (dstat & DEV_STAT_UNIT_CHECK) {
+		if (sense[LCS_SENSE_BYTE_1] &
+		    LCS_SENSE_RESETTING_EVENT) {
+			LCS_DBF_TEXT(2, trace, "REVIND");
+			return 1;
+		}
+		if (sense[LCS_SENSE_BYTE_0] &
+		    LCS_SENSE_CMD_REJECT) {
+			LCS_DBF_TEXT(2, trace, "CMDREJ");
+			return 0;
+		}
+		if ((!sense[LCS_SENSE_BYTE_0]) &&
+		    (!sense[LCS_SENSE_BYTE_1]) &&
+		    (!sense[LCS_SENSE_BYTE_2]) &&
+		    (!sense[LCS_SENSE_BYTE_3])) {
+			LCS_DBF_TEXT(2, trace, "ZEROSEN");
+			return 0;
+		}
+		LCS_DBF_TEXT(2, trace, "DGENCHK");
+		return 1;
+	}
+	return 0;
+}
+
+static void
+lcs_schedule_recovery(struct lcs_card *card)
+{
+	LCS_DBF_TEXT(2, trace, "startrec");
+	if (!lcs_set_thread_start_bit(card, LCS_RECOVERY_THREAD))
+		schedule_work(&card->kernel_thread_starter);
+}
+
+/**
+ * IRQ Handler for LCS channels
+ */
+static void
+lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+{
+	struct lcs_card *card;
+	struct lcs_channel *channel;
+	int rc, index;
+	int cstat, dstat;
+
+	if (lcs_check_irb_error(cdev, irb))
+		return;
+
+	card = CARD_FROM_DEV(cdev);
+	if (card->read.ccwdev == cdev)
+		channel = &card->read;
+	else
+		channel = &card->write;
+
+	cstat = irb->scsw.cmd.cstat;
+	dstat = irb->scsw.cmd.dstat;
+	LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev));
+	LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat,
+		      irb->scsw.cmd.dstat);
+	LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl,
+		      irb->scsw.cmd.actl);
+
+	/* Check for channel and device errors presented */
+	rc = lcs_get_problem(cdev, irb);
+	if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) {
+		dev_warn(&cdev->dev,
+			"The LCS device stopped because of an error,"
+			" dstat=0x%X, cstat=0x%X \n",
+			    dstat, cstat);
+		if (rc) {
+			channel->state = LCS_CH_STATE_ERROR;
+		}
+	}
+	if (channel->state == LCS_CH_STATE_ERROR) {
+		lcs_schedule_recovery(card);
+		wake_up(&card->wait_q);
+		return;
+	}
+	/* How far in the ccw chain have we processed? */
+	if ((channel->state != LCS_CH_STATE_INIT) &&
+	    (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
+	    (irb->scsw.cmd.cpa != 0)) {
+		index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
+			- channel->ccws;
+		if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
+		    (irb->scsw.cmd.cstat & SCHN_STAT_PCI))
+			/* Bloody io subsystem tells us lies about cpa... */
+			index = (index - 1) & (LCS_NUM_BUFFS - 1);
+		while (channel->io_idx != index) {
+			__lcs_processed_buffer(channel,
+					       channel->iob + channel->io_idx);
+			channel->io_idx =
+				(channel->io_idx + 1) & (LCS_NUM_BUFFS - 1);
+		}
+	}
+
+	if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) ||
+	    (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) ||
+	    (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK))
+		/* Mark channel as stopped. */
+		channel->state = LCS_CH_STATE_STOPPED;
+	else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)
+		/* CCW execution stopped on a suspend bit. */
+		channel->state = LCS_CH_STATE_SUSPENDED;
+	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
+		if (irb->scsw.cmd.cc != 0) {
+			ccw_device_halt(channel->ccwdev, (addr_t) channel);
+			return;
+		}
+		/* The channel has been stopped by halt_IO. */
+		channel->state = LCS_CH_STATE_HALTED;
+	}
+	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
+		channel->state = LCS_CH_STATE_CLEARED;
+	/* Do the rest in the tasklet. */
+	tasklet_schedule(&channel->irq_tasklet);
+}
+
+/**
+ * Tasklet for IRQ handler
+ */
+static void
+lcs_tasklet(unsigned long data)
+{
+	unsigned long flags;
+	struct lcs_channel *channel;
+	struct lcs_buffer *iob;
+	int buf_idx;
+
+	channel = (struct lcs_channel *) data;
+	LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev));
+
+	/* Check for processed buffers. */
+	iob = channel->iob;
+	buf_idx = channel->buf_idx;
+	while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) {
+		/* Do the callback thing. */
+		if (iob[buf_idx].callback != NULL)
+			iob[buf_idx].callback(channel, iob + buf_idx);
+		buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1);
+	}
+	channel->buf_idx = buf_idx;
+
+	if (channel->state == LCS_CH_STATE_STOPPED)
+		lcs_start_channel(channel);
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	if (channel->state == LCS_CH_STATE_SUSPENDED &&
+	    channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY)
+		__lcs_resume_channel(channel);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+
+	/* Something happened on the channel. Wake up waiters. */
+	wake_up(&channel->wait_q);
+}
+
+/**
+ * Finish current tx buffer and make it ready for transmit.
+ */
+static void
+__lcs_emit_txbuffer(struct lcs_card *card)
+{
+	LCS_DBF_TEXT(5, trace, "emittx");
+	*(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0;
+	card->tx_buffer->count += 2;
+	lcs_ready_buffer(&card->write, card->tx_buffer);
+	card->tx_buffer = NULL;
+	card->tx_emitted++;
+}
+
+/**
+ * Callback for finished tx buffers.
+ */
+static void
+lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+	struct lcs_card *card;
+
+	LCS_DBF_TEXT(5, trace, "txbuffcb");
+	/* Put buffer back to pool. */
+	lcs_release_buffer(channel, buffer);
+	card = container_of(channel, struct lcs_card, write);
+	if (netif_queue_stopped(card->dev) && netif_carrier_ok(card->dev))
+		netif_wake_queue(card->dev);
+	spin_lock(&card->lock);
+	card->tx_emitted--;
+	if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
+		/*
+		 * Last running tx buffer has finished. Submit partially
+		 * filled current buffer.
+		 */
+		__lcs_emit_txbuffer(card);
+	spin_unlock(&card->lock);
+}
+
+/**
+ * Packet transmit function called by network stack
+ */
+static int
+__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
+		 struct net_device *dev)
+{
+	struct lcs_header *header;
+	int rc = NETDEV_TX_OK;
+
+	LCS_DBF_TEXT(5, trace, "hardxmit");
+	if (skb == NULL) {
+		card->stats.tx_dropped++;
+		card->stats.tx_errors++;
+		return NETDEV_TX_OK;
+	}
+	if (card->state != DEV_STATE_UP) {
+		dev_kfree_skb(skb);
+		card->stats.tx_dropped++;
+		card->stats.tx_errors++;
+		card->stats.tx_carrier_errors++;
+		return NETDEV_TX_OK;
+	}
+	if (skb->protocol == htons(ETH_P_IPV6)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	netif_stop_queue(card->dev);
+	spin_lock(&card->lock);
+	if (card->tx_buffer != NULL &&
+	    card->tx_buffer->count + sizeof(struct lcs_header) +
+	    skb->len + sizeof(u16) > LCS_IOBUFFERSIZE)
+		/* skb too big for current tx buffer. */
+		__lcs_emit_txbuffer(card);
+	if (card->tx_buffer == NULL) {
+		/* Get new tx buffer */
+		card->tx_buffer = lcs_get_buffer(&card->write);
+		if (card->tx_buffer == NULL) {
+			card->stats.tx_dropped++;
+			rc = NETDEV_TX_BUSY;
+			goto out;
+		}
+		card->tx_buffer->callback = lcs_txbuffer_cb;
+		card->tx_buffer->count = 0;
+	}
+	header = (struct lcs_header *)
+		(card->tx_buffer->data + card->tx_buffer->count);
+	card->tx_buffer->count += skb->len + sizeof(struct lcs_header);
+	header->offset = card->tx_buffer->count;
+	header->type = card->lan_type;
+	header->slot = card->portno;
+	skb_copy_from_linear_data(skb, header + 1, skb->len);
+	spin_unlock(&card->lock);
+	card->stats.tx_bytes += skb->len;
+	card->stats.tx_packets++;
+	dev_kfree_skb(skb);
+	netif_wake_queue(card->dev);
+	spin_lock(&card->lock);
+	if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
+		/* If this is the first tx buffer emit it immediately. */
+		__lcs_emit_txbuffer(card);
+out:
+	spin_unlock(&card->lock);
+	return rc;
+}
+
+static int
+lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct lcs_card *card;
+	int rc;
+
+	LCS_DBF_TEXT(5, trace, "pktxmit");
+	card = (struct lcs_card *) dev->ml_priv;
+	rc = __lcs_start_xmit(card, skb, dev);
+	return rc;
+}
+
+/**
+ * send startlan and lanstat command to make LCS device ready
+ */
+static int
+lcs_startlan_auto(struct lcs_card *card)
+{
+	int rc;
+
+	LCS_DBF_TEXT(2, trace, "strtauto");
+#ifdef CONFIG_ETHERNET
+	card->lan_type = LCS_FRAME_TYPE_ENET;
+	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
+	if (rc == 0)
+		return 0;
+
+#endif
+#ifdef CONFIG_FDDI
+	card->lan_type = LCS_FRAME_TYPE_FDDI;
+	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
+	if (rc == 0)
+		return 0;
+#endif
+	return -EIO;
+}
+
+static int
+lcs_startlan(struct lcs_card *card)
+{
+	int rc, i;
+
+	LCS_DBF_TEXT(2, trace, "startlan");
+	rc = 0;
+	if (card->portno != LCS_INVALID_PORT_NO) {
+		if (card->lan_type == LCS_FRAME_TYPE_AUTO)
+			rc = lcs_startlan_auto(card);
+		else
+			rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
+	} else {
+                for (i = 0; i <= 16; i++) {
+                        card->portno = i;
+                        if (card->lan_type != LCS_FRAME_TYPE_AUTO)
+                                rc = lcs_send_startlan(card,
+                                                       LCS_INITIATOR_TCPIP);
+                        else
+                                /* autodetecting lan type */
+                                rc = lcs_startlan_auto(card);
+                        if (rc == 0)
+                                break;
+                }
+        }
+	if (rc == 0)
+		return lcs_send_lanstat(card);
+	return rc;
+}
+
+/**
+ * LCS detect function
+ * setup channels and make them I/O ready
+ */
+static int
+lcs_detect(struct lcs_card *card)
+{
+	int rc = 0;
+
+	LCS_DBF_TEXT(2, setup, "lcsdetct");
+	/* start/reset card */
+	if (card->dev)
+		netif_stop_queue(card->dev);
+	rc = lcs_stop_channels(card);
+	if (rc == 0) {
+		rc = lcs_start_channels(card);
+		if (rc == 0) {
+			rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP);
+			if (rc == 0)
+				rc = lcs_startlan(card);
+		}
+	}
+	if (rc == 0) {
+		card->state = DEV_STATE_UP;
+	} else {
+		card->state = DEV_STATE_DOWN;
+		card->write.state = LCS_CH_STATE_INIT;
+		card->read.state =  LCS_CH_STATE_INIT;
+	}
+	return rc;
+}
+
+/**
+ * LCS Stop card
+ */
+static int
+lcs_stopcard(struct lcs_card *card)
+{
+	int rc;
+
+	LCS_DBF_TEXT(3, setup, "stopcard");
+
+	if (card->read.state != LCS_CH_STATE_STOPPED &&
+	    card->write.state != LCS_CH_STATE_STOPPED &&
+	    card->read.state != LCS_CH_STATE_ERROR &&
+	    card->write.state != LCS_CH_STATE_ERROR &&
+	    card->state == DEV_STATE_UP) {
+		lcs_clear_multicast_list(card);
+		rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
+		rc = lcs_send_shutdown(card);
+	}
+	rc = lcs_stop_channels(card);
+	card->state = DEV_STATE_DOWN;
+
+	return rc;
+}
+
+/**
+ * Kernel Thread helper functions for LGW initiated commands
+ */
+static void
+lcs_start_kernel_thread(struct work_struct *work)
+{
+	struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter);
+	LCS_DBF_TEXT(5, trace, "krnthrd");
+	if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD))
+		kthread_run(lcs_recovery, card, "lcs_recover");
+#ifdef CONFIG_IP_MULTICAST
+	if (lcs_do_start_thread(card, LCS_SET_MC_THREAD))
+		kthread_run(lcs_register_mc_addresses, card, "regipm");
+#endif
+}
+
+/**
+ * Process control frames.
+ */
+static void
+lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+	LCS_DBF_TEXT(5, trace, "getctrl");
+	if (cmd->initiator == LCS_INITIATOR_LGW) {
+		switch(cmd->cmd_code) {
+		case LCS_CMD_STARTUP:
+		case LCS_CMD_STARTLAN:
+			lcs_schedule_recovery(card);
+			break;
+		case LCS_CMD_STOPLAN:
+			pr_warn("Stoplan for %s initiated by LGW\n",
+				card->dev->name);
+			if (card->dev)
+				netif_carrier_off(card->dev);
+			break;
+		default:
+			LCS_DBF_TEXT(5, trace, "noLGWcmd");
+			break;
+		}
+	} else
+		lcs_notify_lancmd_waiters(card, cmd);
+}
+
+/**
+ * Unpack network packet.
+ */
+static void
+lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
+{
+	struct sk_buff *skb;
+
+	LCS_DBF_TEXT(5, trace, "getskb");
+	if (card->dev == NULL ||
+	    card->state != DEV_STATE_UP)
+		/* The card isn't up. Ignore the packet. */
+		return;
+
+	skb = dev_alloc_skb(skb_len);
+	if (skb == NULL) {
+		dev_err(&card->dev->dev,
+			" Allocating a socket buffer to interface %s failed\n",
+			  card->dev->name);
+		card->stats.rx_dropped++;
+		return;
+	}
+	skb_put_data(skb, skb_data, skb_len);
+	skb->protocol =	card->lan_type_trans(skb, card->dev);
+	card->stats.rx_bytes += skb_len;
+	card->stats.rx_packets++;
+	if (skb->protocol == htons(ETH_P_802_2))
+		*((__u32 *)skb->cb) = ++card->pkt_seq;
+	netif_rx(skb);
+}
+
+/**
+ * LCS main routine to get packets and lancmd replies from the buffers
+ */
+static void
+lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+	struct lcs_card *card;
+	struct lcs_header *lcs_hdr;
+	__u16 offset;
+
+	LCS_DBF_TEXT(5, trace, "lcsgtpkt");
+	lcs_hdr = (struct lcs_header *) buffer->data;
+	if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) {
+		LCS_DBF_TEXT(4, trace, "-eiogpkt");
+		return;
+	}
+	card = container_of(channel, struct lcs_card, read);
+	offset = 0;
+	while (lcs_hdr->offset != 0) {
+		if (lcs_hdr->offset <= 0 ||
+		    lcs_hdr->offset > LCS_IOBUFFERSIZE ||
+		    lcs_hdr->offset < offset) {
+			/* Offset invalid. */
+			card->stats.rx_length_errors++;
+			card->stats.rx_errors++;
+			return;
+		}
+		/* What kind of frame is it? */
+		if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL)
+			/* Control frame. */
+			lcs_get_control(card, (struct lcs_cmd *) lcs_hdr);
+		else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET ||
+			 lcs_hdr->type == LCS_FRAME_TYPE_TR ||
+			 lcs_hdr->type == LCS_FRAME_TYPE_FDDI)
+			/* Normal network packet. */
+			lcs_get_skb(card, (char *)(lcs_hdr + 1),
+				    lcs_hdr->offset - offset -
+				    sizeof(struct lcs_header));
+		else
+			/* Unknown frame type. */
+			; // FIXME: error message ?
+		/* Proceed to next frame. */
+		offset = lcs_hdr->offset;
+		lcs_hdr->offset = LCS_ILLEGAL_OFFSET;
+		lcs_hdr = (struct lcs_header *) (buffer->data + offset);
+	}
+	/* The buffer is now empty. Make it ready again. */
+	lcs_ready_buffer(&card->read, buffer);
+}
+
+/**
+ * get network statistics for ifconfig and other user programs
+ */
+static struct net_device_stats *
+lcs_getstats(struct net_device *dev)
+{
+	struct lcs_card *card;
+
+	LCS_DBF_TEXT(4, trace, "netstats");
+	card = (struct lcs_card *) dev->ml_priv;
+	return &card->stats;
+}
+
+/**
+ * stop lcs device
+ * This function will be called by user doing ifconfig xxx down
+ */
+static int
+lcs_stop_device(struct net_device *dev)
+{
+	struct lcs_card *card;
+	int rc;
+
+	LCS_DBF_TEXT(2, trace, "stopdev");
+	card   = (struct lcs_card *) dev->ml_priv;
+	netif_carrier_off(dev);
+	netif_tx_disable(dev);
+	dev->flags &= ~IFF_UP;
+	wait_event(card->write.wait_q,
+		(card->write.state != LCS_CH_STATE_RUNNING));
+	rc = lcs_stopcard(card);
+	if (rc)
+		dev_err(&card->dev->dev,
+			" Shutting down the LCS device failed\n");
+	return rc;
+}
+
+/**
+ * start lcs device and make it runnable
+ * This function will be called by user doing ifconfig xxx up
+ */
+static int
+lcs_open_device(struct net_device *dev)
+{
+	struct lcs_card *card;
+	int rc;
+
+	LCS_DBF_TEXT(2, trace, "opendev");
+	card = (struct lcs_card *) dev->ml_priv;
+	/* initialize statistics */
+	rc = lcs_detect(card);
+	if (rc) {
+		pr_err("Error in opening device!\n");
+
+	} else {
+		dev->flags |= IFF_UP;
+		netif_carrier_on(dev);
+		netif_wake_queue(dev);
+		card->state = DEV_STATE_UP;
+	}
+	return rc;
+}
+
+/**
+ * show function for portno called by cat or similar things
+ */
+static ssize_t
+lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+        struct lcs_card *card;
+
+	card = dev_get_drvdata(dev);
+
+        if (!card)
+                return 0;
+
+        return sprintf(buf, "%d\n", card->portno);
+}
+
+/**
+ * store the value which is piped to file portno
+ */
+static ssize_t
+lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+        struct lcs_card *card;
+	int rc;
+	s16 value;
+
+	card = dev_get_drvdata(dev);
+
+        if (!card)
+                return 0;
+
+	rc = kstrtos16(buf, 0, &value);
+	if (rc)
+		return -EINVAL;
+        /* TODO: sanity checks */
+        card->portno = value;
+	if (card->dev)
+		card->dev->dev_port = card->portno;
+
+        return count;
+
+}
+
+static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
+
+static const char *lcs_type[] = {
+	"not a channel",
+	"2216 parallel",
+	"2216 channel",
+	"OSA LCS card",
+	"unknown channel type",
+	"unsupported channel type",
+};
+
+static ssize_t
+lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ccwgroup_device *cgdev;
+
+	cgdev = to_ccwgroupdev(dev);
+	if (!cgdev)
+		return -ENODEV;
+
+	return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]);
+}
+
+static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
+
+static ssize_t
+lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct lcs_card *card;
+
+	card = dev_get_drvdata(dev);
+
+	return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0;
+}
+
+static ssize_t
+lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+        struct lcs_card *card;
+	unsigned int value;
+	int rc;
+
+	card = dev_get_drvdata(dev);
+
+        if (!card)
+                return 0;
+
+	rc = kstrtouint(buf, 0, &value);
+	if (rc)
+		return -EINVAL;
+        /* TODO: sanity checks */
+        card->lancmd_timeout = value;
+
+        return count;
+
+}
+
+static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
+
+static ssize_t
+lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
+		      const char *buf, size_t count)
+{
+	struct lcs_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int i;
+
+	if (!card)
+		return -EINVAL;
+	if (card->state != DEV_STATE_UP)
+		return -EPERM;
+	i = simple_strtoul(buf, &tmp, 16);
+	if (i == 1)
+		lcs_schedule_recovery(card);
+	return count;
+}
+
+static DEVICE_ATTR(recover, 0200, NULL, lcs_dev_recover_store);
+
+static struct attribute * lcs_attrs[] = {
+	&dev_attr_portno.attr,
+	&dev_attr_type.attr,
+	&dev_attr_lancmd_timeout.attr,
+	&dev_attr_recover.attr,
+	NULL,
+};
+static struct attribute_group lcs_attr_group = {
+	.attrs = lcs_attrs,
+};
+static const struct attribute_group *lcs_attr_groups[] = {
+	&lcs_attr_group,
+	NULL,
+};
+static const struct device_type lcs_devtype = {
+	.name = "lcs",
+	.groups = lcs_attr_groups,
+};
+
+/**
+ * lcs_probe_device is called on establishing a new ccwgroup_device.
+ */
+static int
+lcs_probe_device(struct ccwgroup_device *ccwgdev)
+{
+	struct lcs_card *card;
+
+	if (!get_device(&ccwgdev->dev))
+		return -ENODEV;
+
+	LCS_DBF_TEXT(2, setup, "add_dev");
+        card = lcs_alloc_card();
+        if (!card) {
+		LCS_DBF_TEXT_(2, setup, "  rc%d", -ENOMEM);
+		put_device(&ccwgdev->dev);
+                return -ENOMEM;
+        }
+	dev_set_drvdata(&ccwgdev->dev, card);
+	ccwgdev->cdev[0]->handler = lcs_irq;
+	ccwgdev->cdev[1]->handler = lcs_irq;
+	card->gdev = ccwgdev;
+	INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread);
+	card->thread_start_mask = 0;
+	card->thread_allowed_mask = 0;
+	card->thread_running_mask = 0;
+	ccwgdev->dev.type = &lcs_devtype;
+
+	return 0;
+}
+
+static int
+lcs_register_netdev(struct ccwgroup_device *ccwgdev)
+{
+	struct lcs_card *card;
+
+	LCS_DBF_TEXT(2, setup, "regnetdv");
+	card = dev_get_drvdata(&ccwgdev->dev);
+	if (card->dev->reg_state != NETREG_UNINITIALIZED)
+		return 0;
+	SET_NETDEV_DEV(card->dev, &ccwgdev->dev);
+	return register_netdev(card->dev);
+}
+
+/**
+ * lcs_new_device will be called by setting the group device online.
+ */
+static const struct net_device_ops lcs_netdev_ops = {
+	.ndo_open		= lcs_open_device,
+	.ndo_stop		= lcs_stop_device,
+	.ndo_get_stats		= lcs_getstats,
+	.ndo_start_xmit		= lcs_start_xmit,
+};
+
+static const struct net_device_ops lcs_mc_netdev_ops = {
+	.ndo_open		= lcs_open_device,
+	.ndo_stop		= lcs_stop_device,
+	.ndo_get_stats		= lcs_getstats,
+	.ndo_start_xmit		= lcs_start_xmit,
+	.ndo_set_rx_mode	= lcs_set_multicast_list,
+};
+
+static int
+lcs_new_device(struct ccwgroup_device *ccwgdev)
+{
+	struct  lcs_card *card;
+	struct net_device *dev=NULL;
+	enum lcs_dev_states recover_state;
+	int rc;
+
+	card = dev_get_drvdata(&ccwgdev->dev);
+	if (!card)
+		return -ENODEV;
+
+	LCS_DBF_TEXT(2, setup, "newdev");
+	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
+	card->read.ccwdev  = ccwgdev->cdev[0];
+	card->write.ccwdev = ccwgdev->cdev[1];
+
+	recover_state = card->state;
+	rc = ccw_device_set_online(card->read.ccwdev);
+	if (rc)
+		goto out_err;
+	rc = ccw_device_set_online(card->write.ccwdev);
+	if (rc)
+		goto out_werr;
+
+	LCS_DBF_TEXT(3, setup, "lcsnewdv");
+
+	lcs_setup_card(card);
+	rc = lcs_detect(card);
+	if (rc) {
+		LCS_DBF_TEXT(2, setup, "dtctfail");
+		dev_err(&ccwgdev->dev,
+			"Detecting a network adapter for LCS devices"
+			" failed with rc=%d (0x%x)\n", rc, rc);
+		lcs_stopcard(card);
+		goto out;
+	}
+	if (card->dev) {
+		LCS_DBF_TEXT(2, setup, "samedev");
+		LCS_DBF_HEX(3, setup, &card, sizeof(void*));
+		goto netdev_out;
+	}
+	switch (card->lan_type) {
+#ifdef CONFIG_ETHERNET
+	case LCS_FRAME_TYPE_ENET:
+		card->lan_type_trans = eth_type_trans;
+		dev = alloc_etherdev(0);
+		break;
+#endif
+#ifdef CONFIG_FDDI
+	case LCS_FRAME_TYPE_FDDI:
+		card->lan_type_trans = fddi_type_trans;
+		dev = alloc_fddidev(0);
+		break;
+#endif
+	default:
+		LCS_DBF_TEXT(3, setup, "errinit");
+		pr_err(" Initialization failed\n");
+		goto out;
+	}
+	if (!dev)
+		goto out;
+	card->dev = dev;
+	card->dev->ml_priv = card;
+	card->dev->netdev_ops = &lcs_netdev_ops;
+	card->dev->dev_port = card->portno;
+	memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
+#ifdef CONFIG_IP_MULTICAST
+	if (!lcs_check_multicast_support(card))
+		card->dev->netdev_ops = &lcs_mc_netdev_ops;
+#endif
+netdev_out:
+	lcs_set_allowed_threads(card,0xffffffff);
+	if (recover_state == DEV_STATE_RECOVER) {
+		lcs_set_multicast_list(card->dev);
+		card->dev->flags |= IFF_UP;
+		netif_carrier_on(card->dev);
+		netif_wake_queue(card->dev);
+		card->state = DEV_STATE_UP;
+	} else {
+		lcs_stopcard(card);
+	}
+
+	if (lcs_register_netdev(ccwgdev) != 0)
+		goto out;
+
+	/* Print out supported assists: IPv6 */
+	pr_info("LCS device %s %s IPv6 support\n", card->dev->name,
+		(card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
+		"with" : "without");
+	/* Print out supported assist: Multicast */
+	pr_info("LCS device %s %s Multicast support\n", card->dev->name,
+		(card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
+		"with" : "without");
+	return 0;
+out:
+
+	ccw_device_set_offline(card->write.ccwdev);
+out_werr:
+	ccw_device_set_offline(card->read.ccwdev);
+out_err:
+	return -ENODEV;
+}
+
+/**
+ * lcs_shutdown_device, called when setting the group device offline.
+ */
+static int
+__lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode)
+{
+	struct lcs_card *card;
+	enum lcs_dev_states recover_state;
+	int ret = 0, ret2 = 0, ret3 = 0;
+
+	LCS_DBF_TEXT(3, setup, "shtdndev");
+	card = dev_get_drvdata(&ccwgdev->dev);
+	if (!card)
+		return -ENODEV;
+	if (recovery_mode == 0) {
+		lcs_set_allowed_threads(card, 0);
+		if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD))
+			return -ERESTARTSYS;
+	}
+	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
+	recover_state = card->state;
+
+	ret = lcs_stop_device(card->dev);
+	ret2 = ccw_device_set_offline(card->read.ccwdev);
+	ret3 = ccw_device_set_offline(card->write.ccwdev);
+	if (!ret)
+		ret = (ret2) ? ret2 : ret3;
+	if (ret)
+		LCS_DBF_TEXT_(3, setup, "1err:%d", ret);
+	if (recover_state == DEV_STATE_UP) {
+		card->state = DEV_STATE_RECOVER;
+	}
+	return 0;
+}
+
+static int
+lcs_shutdown_device(struct ccwgroup_device *ccwgdev)
+{
+	return __lcs_shutdown_device(ccwgdev, 0);
+}
+
+/**
+ * drive lcs recovery after startup and startlan initiated by Lan Gateway
+ */
+static int
+lcs_recovery(void *ptr)
+{
+	struct lcs_card *card;
+	struct ccwgroup_device *gdev;
+        int rc;
+
+	card = (struct lcs_card *) ptr;
+
+	LCS_DBF_TEXT(4, trace, "recover1");
+	if (!lcs_do_run_thread(card, LCS_RECOVERY_THREAD))
+		return 0;
+	LCS_DBF_TEXT(4, trace, "recover2");
+	gdev = card->gdev;
+	dev_warn(&gdev->dev,
+		"A recovery process has been started for the LCS device\n");
+	rc = __lcs_shutdown_device(gdev, 1);
+	rc = lcs_new_device(gdev);
+	if (!rc)
+		pr_info("Device %s successfully recovered!\n",
+			card->dev->name);
+	else
+		pr_info("Device %s could not be recovered!\n",
+			card->dev->name);
+	lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD);
+	return 0;
+}
+
+/**
+ * lcs_remove_device, free buffers and card
+ */
+static void
+lcs_remove_device(struct ccwgroup_device *ccwgdev)
+{
+	struct lcs_card *card;
+
+	card = dev_get_drvdata(&ccwgdev->dev);
+	if (!card)
+		return;
+
+	LCS_DBF_TEXT(3, setup, "remdev");
+	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
+	if (ccwgdev->state == CCWGROUP_ONLINE) {
+		lcs_shutdown_device(ccwgdev);
+	}
+	if (card->dev)
+		unregister_netdev(card->dev);
+	lcs_cleanup_card(card);
+	lcs_free_card(card);
+	dev_set_drvdata(&ccwgdev->dev, NULL);
+	put_device(&ccwgdev->dev);
+}
+
+static int lcs_pm_suspend(struct lcs_card *card)
+{
+	if (card->dev)
+		netif_device_detach(card->dev);
+	lcs_set_allowed_threads(card, 0);
+	lcs_wait_for_threads(card, 0xffffffff);
+	if (card->state != DEV_STATE_DOWN)
+		__lcs_shutdown_device(card->gdev, 1);
+	return 0;
+}
+
+static int lcs_pm_resume(struct lcs_card *card)
+{
+	int rc = 0;
+
+	if (card->state == DEV_STATE_RECOVER)
+		rc = lcs_new_device(card->gdev);
+	if (card->dev)
+		netif_device_attach(card->dev);
+	if (rc) {
+		dev_warn(&card->gdev->dev, "The lcs device driver "
+			"failed to recover the device\n");
+	}
+	return rc;
+}
+
+static int lcs_prepare(struct ccwgroup_device *gdev)
+{
+	return 0;
+}
+
+static void lcs_complete(struct ccwgroup_device *gdev)
+{
+	return;
+}
+
+static int lcs_freeze(struct ccwgroup_device *gdev)
+{
+	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
+	return lcs_pm_suspend(card);
+}
+
+static int lcs_thaw(struct ccwgroup_device *gdev)
+{
+	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
+	return lcs_pm_resume(card);
+}
+
+static int lcs_restore(struct ccwgroup_device *gdev)
+{
+	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
+	return lcs_pm_resume(card);
+}
+
+static struct ccw_device_id lcs_ids[] = {
+	{CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
+	{CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
+	{CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2},
+	{},
+};
+MODULE_DEVICE_TABLE(ccw, lcs_ids);
+
+static struct ccw_driver lcs_ccw_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "lcs",
+	},
+	.ids	= lcs_ids,
+	.probe	= ccwgroup_probe_ccwdev,
+	.remove	= ccwgroup_remove_ccwdev,
+	.int_class = IRQIO_LCS,
+};
+
+/**
+ * LCS ccwgroup driver registration
+ */
+static struct ccwgroup_driver lcs_group_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "lcs",
+	},
+	.ccw_driver  = &lcs_ccw_driver,
+	.setup	     = lcs_probe_device,
+	.remove      = lcs_remove_device,
+	.set_online  = lcs_new_device,
+	.set_offline = lcs_shutdown_device,
+	.prepare     = lcs_prepare,
+	.complete    = lcs_complete,
+	.freeze	     = lcs_freeze,
+	.thaw	     = lcs_thaw,
+	.restore     = lcs_restore,
+};
+
+static ssize_t group_store(struct device_driver *ddrv, const char *buf,
+			   size_t count)
+{
+	int err;
+	err = ccwgroup_create_dev(lcs_root_dev, &lcs_group_driver, 2, buf);
+	return err ? err : count;
+}
+static DRIVER_ATTR_WO(group);
+
+static struct attribute *lcs_drv_attrs[] = {
+	&driver_attr_group.attr,
+	NULL,
+};
+static struct attribute_group lcs_drv_attr_group = {
+	.attrs = lcs_drv_attrs,
+};
+static const struct attribute_group *lcs_drv_attr_groups[] = {
+	&lcs_drv_attr_group,
+	NULL,
+};
+
+/**
+ *  LCS Module/Kernel initialization function
+ */
+static int
+__init lcs_init_module(void)
+{
+	int rc;
+
+	pr_info("Loading %s\n", version);
+	rc = lcs_register_debug_facility();
+	LCS_DBF_TEXT(0, setup, "lcsinit");
+	if (rc)
+		goto out_err;
+	lcs_root_dev = root_device_register("lcs");
+	rc = PTR_ERR_OR_ZERO(lcs_root_dev);
+	if (rc)
+		goto register_err;
+	rc = ccw_driver_register(&lcs_ccw_driver);
+	if (rc)
+		goto ccw_err;
+	lcs_group_driver.driver.groups = lcs_drv_attr_groups;
+	rc = ccwgroup_driver_register(&lcs_group_driver);
+	if (rc)
+		goto ccwgroup_err;
+	return 0;
+
+ccwgroup_err:
+	ccw_driver_unregister(&lcs_ccw_driver);
+ccw_err:
+	root_device_unregister(lcs_root_dev);
+register_err:
+	lcs_unregister_debug_facility();
+out_err:
+	pr_err("Initializing the lcs device driver failed\n");
+	return rc;
+}
+
+
+/**
+ *  LCS module cleanup function
+ */
+static void
+__exit lcs_cleanup_module(void)
+{
+	pr_info("Terminating lcs module.\n");
+	LCS_DBF_TEXT(0, trace, "cleanup");
+	ccwgroup_driver_unregister(&lcs_group_driver);
+	ccw_driver_unregister(&lcs_ccw_driver);
+	root_device_unregister(lcs_root_dev);
+	lcs_unregister_debug_facility();
+}
+
+module_init(lcs_init_module);
+module_exit(lcs_cleanup_module);
+
+MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
new file mode 100644
index 0000000..bd52caa
--- /dev/null
+++ b/drivers/s390/net/lcs.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*lcs.h*/
+
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <linux/refcount.h>
+#include <asm/ccwdev.h>
+
+#define LCS_DBF_TEXT(level, name, text) \
+	do { \
+		debug_text_event(lcs_dbf_##name, level, text); \
+	} while (0)
+
+#define LCS_DBF_HEX(level,name,addr,len) \
+do { \
+	debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
+} while (0)
+
+#define LCS_DBF_TEXT_(level,name,text...) \
+	do { \
+		if (debug_level_enabled(lcs_dbf_##name, level)) { \
+			sprintf(debug_buffer, text); \
+			debug_text_event(lcs_dbf_##name, level, debug_buffer); \
+		} \
+	} while (0)
+
+/**
+ *	sysfs related stuff
+ */
+#define CARD_FROM_DEV(cdev) \
+	(struct lcs_card *) dev_get_drvdata( \
+		&((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev);
+
+/**
+ * Enum for classifying detected devices.
+ */
+enum lcs_channel_types {
+	/* Device is not a channel  */
+	lcs_channel_type_none,
+
+	/* Device is a 2216 channel */
+	lcs_channel_type_parallel,
+
+	/* Device is a 2216 channel */
+	lcs_channel_type_2216,
+
+	/* Device is a OSA2 card */
+	lcs_channel_type_osa2
+};
+
+/**
+ * CCW commands used in this driver
+ */
+#define LCS_CCW_WRITE		0x01
+#define LCS_CCW_READ		0x02
+#define LCS_CCW_TRANSFER	0x08
+
+/**
+ * LCS device status primitives
+ */
+#define LCS_CMD_STARTLAN	0x01
+#define LCS_CMD_STOPLAN		0x02
+#define LCS_CMD_LANSTAT		0x04
+#define LCS_CMD_STARTUP		0x07
+#define LCS_CMD_SHUTDOWN	0x08
+#define LCS_CMD_QIPASSIST	0xb2
+#define LCS_CMD_SETIPM		0xb4
+#define LCS_CMD_DELIPM		0xb5
+
+#define LCS_INITIATOR_TCPIP	0x00
+#define LCS_INITIATOR_LGW	0x01
+#define LCS_STD_CMD_SIZE	16
+#define LCS_MULTICAST_CMD_SIZE	404
+
+/**
+ * LCS IPASSIST MASKS,only used when multicast is switched on
+ */
+/* Not supported by LCS */
+#define LCS_IPASS_ARP_PROCESSING	0x0001
+#define LCS_IPASS_IN_CHECKSUM_SUPPORT	0x0002
+#define LCS_IPASS_OUT_CHECKSUM_SUPPORT	0x0004
+#define LCS_IPASS_IP_FRAG_REASSEMBLY	0x0008
+#define LCS_IPASS_IP_FILTERING		0x0010
+/* Supported by lcs 3172 */
+#define LCS_IPASS_IPV6_SUPPORT		0x0020
+#define LCS_IPASS_MULTICAST_SUPPORT	0x0040
+
+/**
+ * LCS sense byte definitions
+ */
+#define LCS_SENSE_BYTE_0 		0
+#define LCS_SENSE_BYTE_1 		1
+#define LCS_SENSE_BYTE_2 		2
+#define LCS_SENSE_BYTE_3 		3
+#define LCS_SENSE_INTERFACE_DISCONNECT	0x01
+#define LCS_SENSE_EQUIPMENT_CHECK	0x10
+#define LCS_SENSE_BUS_OUT_CHECK		0x20
+#define LCS_SENSE_INTERVENTION_REQUIRED 0x40
+#define LCS_SENSE_CMD_REJECT		0x80
+#define LCS_SENSE_RESETTING_EVENT	0x80
+#define LCS_SENSE_DEVICE_ONLINE		0x20
+
+/**
+ * LCS packet type definitions
+ */
+#define LCS_FRAME_TYPE_CONTROL		0
+#define LCS_FRAME_TYPE_ENET		1
+#define LCS_FRAME_TYPE_TR		2
+#define LCS_FRAME_TYPE_FDDI		7
+#define LCS_FRAME_TYPE_AUTO		-1
+
+/**
+ * some more definitions,we will sort them later
+ */
+#define LCS_ILLEGAL_OFFSET		0xffff
+#define LCS_IOBUFFERSIZE		0x5000
+#define LCS_NUM_BUFFS			32	/* needs to be power of 2 */
+#define LCS_MAC_LENGTH			6
+#define LCS_INVALID_PORT_NO		-1
+#define LCS_LANCMD_TIMEOUT_DEFAULT      5
+
+/**
+ * Multicast state
+ */
+#define	 LCS_IPM_STATE_SET_REQUIRED	0
+#define	 LCS_IPM_STATE_DEL_REQUIRED	1
+#define	 LCS_IPM_STATE_ON_CARD		2
+
+/**
+ * LCS IP Assist declarations
+ * seems to be only used for multicast
+ */
+#define	 LCS_IPASS_ARP_PROCESSING	0x0001
+#define	 LCS_IPASS_INBOUND_CSUM_SUPP	0x0002
+#define	 LCS_IPASS_OUTBOUND_CSUM_SUPP	0x0004
+#define	 LCS_IPASS_IP_FRAG_REASSEMBLY	0x0008
+#define	 LCS_IPASS_IP_FILTERING		0x0010
+#define	 LCS_IPASS_IPV6_SUPPORT		0x0020
+#define	 LCS_IPASS_MULTICAST_SUPPORT	0x0040
+
+/**
+ * LCS Buffer states
+ */
+enum lcs_buffer_states {
+	LCS_BUF_STATE_EMPTY,	/* buffer is empty */
+	LCS_BUF_STATE_LOCKED,	/* buffer is locked, don't touch */
+	LCS_BUF_STATE_READY,	/* buffer is ready for read/write */
+	LCS_BUF_STATE_PROCESSED,
+};
+
+/**
+ * LCS Channel State Machine declarations
+ */
+enum lcs_channel_states {
+	LCS_CH_STATE_INIT,
+	LCS_CH_STATE_HALTED,
+	LCS_CH_STATE_STOPPED,
+	LCS_CH_STATE_RUNNING,
+	LCS_CH_STATE_SUSPENDED,
+	LCS_CH_STATE_CLEARED,
+	LCS_CH_STATE_ERROR,
+};
+
+/**
+ * LCS device state machine
+ */
+enum lcs_dev_states {
+	DEV_STATE_DOWN,
+	DEV_STATE_UP,
+	DEV_STATE_RECOVER,
+};
+
+enum lcs_threads {
+	LCS_SET_MC_THREAD 	= 1,
+	LCS_RECOVERY_THREAD 	= 2,
+};
+
+/**
+ * LCS struct declarations
+ */
+struct lcs_header {
+	__u16  offset;
+	__u8   type;
+	__u8   slot;
+}  __attribute__ ((packed));
+
+struct lcs_ip_mac_pair {
+	__be32  ip_addr;
+	__u8   mac_addr[LCS_MAC_LENGTH];
+	__u8   reserved[2];
+}  __attribute__ ((packed));
+
+struct lcs_ipm_list {
+	struct list_head list;
+	struct lcs_ip_mac_pair ipm;
+	__u8 ipm_state;
+};
+
+struct lcs_cmd {
+	__u16  offset;
+	__u8   type;
+	__u8   slot;
+	__u8   cmd_code;
+	__u8   initiator;
+	__u16  sequence_no;
+	__u16  return_code;
+	union {
+		struct {
+			__u8   lan_type;
+			__u8   portno;
+			__u16  parameter_count;
+			__u8   operator_flags[3];
+			__u8   reserved[3];
+		} lcs_std_cmd;
+		struct {
+			__u16  unused1;
+			__u16  buff_size;
+			__u8   unused2[6];
+		} lcs_startup;
+		struct {
+			__u8   lan_type;
+			__u8   portno;
+			__u8   unused[10];
+			__u8   mac_addr[LCS_MAC_LENGTH];
+			__u32  num_packets_deblocked;
+			__u32  num_packets_blocked;
+			__u32  num_packets_tx_on_lan;
+			__u32  num_tx_errors_detected;
+			__u32  num_tx_packets_disgarded;
+			__u32  num_packets_rx_from_lan;
+			__u32  num_rx_errors_detected;
+			__u32  num_rx_discarded_nobuffs_avail;
+			__u32  num_rx_packets_too_large;
+		} lcs_lanstat_cmd;
+#ifdef CONFIG_IP_MULTICAST
+		struct {
+			__u8   lan_type;
+			__u8   portno;
+			__u16  num_ip_pairs;
+			__u16  ip_assists_supported;
+			__u16  ip_assists_enabled;
+			__u16  version;
+			struct {
+				struct lcs_ip_mac_pair
+				ip_mac_pair[32];
+				__u32	  response_data;
+			} lcs_ipass_ctlmsg __attribute ((packed));
+		} lcs_qipassist __attribute__ ((packed));
+#endif /*CONFIG_IP_MULTICAST */
+	} cmd __attribute__ ((packed));
+}  __attribute__ ((packed));
+
+/**
+ * Forward declarations.
+ */
+struct lcs_card;
+struct lcs_channel;
+
+/**
+ * Definition of an lcs buffer.
+ */
+struct lcs_buffer {
+	enum lcs_buffer_states state;
+	void *data;
+	int count;
+	/* Callback for completion notification. */
+	void (*callback)(struct lcs_channel *, struct lcs_buffer *);
+};
+
+struct lcs_reply {
+	struct list_head list;
+	__u16 sequence_no;
+	refcount_t refcnt;
+	/* Callback for completion notification. */
+	void (*callback)(struct lcs_card *, struct lcs_cmd *);
+	wait_queue_head_t wait_q;
+	struct lcs_card *card;
+	struct timer_list timer;
+	int received;
+	int rc;
+};
+
+/**
+ * Definition of an lcs channel
+ */
+struct lcs_channel {
+	enum lcs_channel_states state;
+	struct ccw_device *ccwdev;
+	struct ccw1 ccws[LCS_NUM_BUFFS + 1];
+	wait_queue_head_t wait_q;
+	struct tasklet_struct irq_tasklet;
+	struct lcs_buffer iob[LCS_NUM_BUFFS];
+	int io_idx;
+	int buf_idx;
+};
+
+
+/**
+ * definition of the lcs card
+ */
+struct lcs_card {
+	spinlock_t lock;
+	spinlock_t ipm_lock;
+	enum lcs_dev_states state;
+	struct net_device *dev;
+	struct net_device_stats stats;
+	__be16 (*lan_type_trans)(struct sk_buff *skb,
+					 struct net_device *dev);
+	struct ccwgroup_device *gdev;
+	struct lcs_channel read;
+	struct lcs_channel write;
+	struct lcs_buffer *tx_buffer;
+	int tx_emitted;
+	struct list_head lancmd_waiters;
+	int lancmd_timeout;
+
+	struct work_struct kernel_thread_starter;
+	spinlock_t mask_lock;
+	unsigned long thread_start_mask;
+	unsigned long thread_running_mask;
+	unsigned long thread_allowed_mask;
+	wait_queue_head_t wait_q;
+
+#ifdef CONFIG_IP_MULTICAST
+	struct list_head ipm_list;
+#endif
+	__u8 mac[LCS_MAC_LENGTH];
+	__u16 ip_assists_supported;
+	__u16 ip_assists_enabled;
+	__s8 lan_type;
+	__u32 pkt_seq;
+	__u16 sequence_no;
+	__s16 portno;
+	/* Some info copied from probeinfo */
+	u8 device_forced;
+	u8 max_port_no;
+	u8 hint_port_no;
+	s16 port_protocol_no;
+}  __attribute__ ((aligned(8)));
+
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
new file mode 100644
index 0000000..5ce2424
--- /dev/null
+++ b/drivers/s390/net/netiucv.c
@@ -0,0 +1,2216 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * IUCV network driver
+ *
+ * Copyright IBM Corp. 2001, 2009
+ *
+ * Author(s):
+ *	Original netiucv driver:
+ *		Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
+ *	Sysfs integration and all bugs therein:
+ *		Cornelia Huck (cornelia.huck@de.ibm.com)
+ *	PM functions:
+ *		Ursula Braun (ursula.braun@de.ibm.com)
+ *
+ * Documentation used:
+ *  the source of the original IUCV driver by:
+ *    Stefan Hegewald <hegewald@de.ibm.com>
+ *    Hartmut Penner <hpenner@de.ibm.com>
+ *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ *    Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
+ */
+
+#define KMSG_COMPONENT "netiucv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+#include <linux/device.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/dst.h>
+
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <asm/ebcdic.h>
+
+#include <net/iucv/iucv.h>
+#include "fsm.h"
+
+MODULE_AUTHOR
+    ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
+MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
+
+/**
+ * Debug Facility stuff
+ */
+#define IUCV_DBF_SETUP_NAME "iucv_setup"
+#define IUCV_DBF_SETUP_LEN 64
+#define IUCV_DBF_SETUP_PAGES 2
+#define IUCV_DBF_SETUP_NR_AREAS 1
+#define IUCV_DBF_SETUP_LEVEL 3
+
+#define IUCV_DBF_DATA_NAME "iucv_data"
+#define IUCV_DBF_DATA_LEN 128
+#define IUCV_DBF_DATA_PAGES 2
+#define IUCV_DBF_DATA_NR_AREAS 1
+#define IUCV_DBF_DATA_LEVEL 2
+
+#define IUCV_DBF_TRACE_NAME "iucv_trace"
+#define IUCV_DBF_TRACE_LEN 16
+#define IUCV_DBF_TRACE_PAGES 4
+#define IUCV_DBF_TRACE_NR_AREAS 1
+#define IUCV_DBF_TRACE_LEVEL 3
+
+#define IUCV_DBF_TEXT(name,level,text) \
+	do { \
+		debug_text_event(iucv_dbf_##name,level,text); \
+	} while (0)
+
+#define IUCV_DBF_HEX(name,level,addr,len) \
+	do { \
+		debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
+	} while (0)
+
+DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
+
+#define IUCV_DBF_TEXT_(name, level, text...) \
+	do { \
+		if (debug_level_enabled(iucv_dbf_##name, level)) { \
+			char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
+			sprintf(__buf, text); \
+			debug_text_event(iucv_dbf_##name, level, __buf); \
+			put_cpu_var(iucv_dbf_txt_buf); \
+		} \
+	} while (0)
+
+#define IUCV_DBF_SPRINTF(name,level,text...) \
+	do { \
+		debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
+		debug_sprintf_event(iucv_dbf_trace, level, text ); \
+	} while (0)
+
+/**
+ * some more debug stuff
+ */
+#define PRINTK_HEADER " iucv: "       /* for debugging */
+
+/* dummy device to make sure netiucv_pm functions are called */
+static struct device *netiucv_dev;
+
+static int netiucv_pm_prepare(struct device *);
+static void netiucv_pm_complete(struct device *);
+static int netiucv_pm_freeze(struct device *);
+static int netiucv_pm_restore_thaw(struct device *);
+
+static const struct dev_pm_ops netiucv_pm_ops = {
+	.prepare = netiucv_pm_prepare,
+	.complete = netiucv_pm_complete,
+	.freeze = netiucv_pm_freeze,
+	.thaw = netiucv_pm_restore_thaw,
+	.restore = netiucv_pm_restore_thaw,
+};
+
+static struct device_driver netiucv_driver = {
+	.owner = THIS_MODULE,
+	.name = "netiucv",
+	.bus  = &iucv_bus,
+	.pm = &netiucv_pm_ops,
+};
+
+static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
+static void netiucv_callback_connack(struct iucv_path *, u8 *);
+static void netiucv_callback_connrej(struct iucv_path *, u8 *);
+static void netiucv_callback_connsusp(struct iucv_path *, u8 *);
+static void netiucv_callback_connres(struct iucv_path *, u8 *);
+static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
+static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
+
+static struct iucv_handler netiucv_handler = {
+	.path_pending	  = netiucv_callback_connreq,
+	.path_complete	  = netiucv_callback_connack,
+	.path_severed	  = netiucv_callback_connrej,
+	.path_quiesced	  = netiucv_callback_connsusp,
+	.path_resumed	  = netiucv_callback_connres,
+	.message_pending  = netiucv_callback_rx,
+	.message_complete = netiucv_callback_txdone
+};
+
+/**
+ * Per connection profiling data
+ */
+struct connection_profile {
+	unsigned long maxmulti;
+	unsigned long maxcqueue;
+	unsigned long doios_single;
+	unsigned long doios_multi;
+	unsigned long txlen;
+	unsigned long tx_time;
+	unsigned long send_stamp;
+	unsigned long tx_pending;
+	unsigned long tx_max_pending;
+};
+
+/**
+ * Representation of one iucv connection
+ */
+struct iucv_connection {
+	struct list_head	  list;
+	struct iucv_path	  *path;
+	struct sk_buff            *rx_buff;
+	struct sk_buff            *tx_buff;
+	struct sk_buff_head       collect_queue;
+	struct sk_buff_head	  commit_queue;
+	spinlock_t                collect_lock;
+	int                       collect_len;
+	int                       max_buffsize;
+	fsm_timer                 timer;
+	fsm_instance              *fsm;
+	struct net_device         *netdev;
+	struct connection_profile prof;
+	char                      userid[9];
+	char			  userdata[17];
+};
+
+/**
+ * Linked list of all connection structs.
+ */
+static LIST_HEAD(iucv_connection_list);
+static DEFINE_RWLOCK(iucv_connection_rwlock);
+
+/**
+ * Representation of event-data for the
+ * connection state machine.
+ */
+struct iucv_event {
+	struct iucv_connection *conn;
+	void                   *data;
+};
+
+/**
+ * Private part of the network device structure
+ */
+struct netiucv_priv {
+	struct net_device_stats stats;
+	unsigned long           tbusy;
+	fsm_instance            *fsm;
+        struct iucv_connection  *conn;
+	struct device           *dev;
+	int			 pm_state;
+};
+
+/**
+ * Link level header for a packet.
+ */
+struct ll_header {
+	u16 next;
+};
+
+#define NETIUCV_HDRLEN		 (sizeof(struct ll_header))
+#define NETIUCV_BUFSIZE_MAX	 65537
+#define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
+#define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
+#define NETIUCV_MTU_DEFAULT      9216
+#define NETIUCV_QUEUELEN_DEFAULT 50
+#define NETIUCV_TIMEOUT_5SEC     5000
+
+/**
+ * Compatibility macros for busy handling
+ * of network devices.
+ */
+static void netiucv_clear_busy(struct net_device *dev)
+{
+	struct netiucv_priv *priv = netdev_priv(dev);
+	clear_bit(0, &priv->tbusy);
+	netif_wake_queue(dev);
+}
+
+static int netiucv_test_and_set_busy(struct net_device *dev)
+{
+	struct netiucv_priv *priv = netdev_priv(dev);
+	netif_stop_queue(dev);
+	return test_and_set_bit(0, &priv->tbusy);
+}
+
+static u8 iucvMagic_ascii[16] = {
+	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
+};
+
+static u8 iucvMagic_ebcdic[16] = {
+	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
+};
+
+/**
+ * Convert an iucv userId to its printable
+ * form (strip whitespace at end).
+ *
+ * @param An iucv userId
+ *
+ * @returns The printable string (static data!!)
+ */
+static char *netiucv_printname(char *name, int len)
+{
+	static char tmp[17];
+	char *p = tmp;
+	memcpy(tmp, name, len);
+	tmp[len] = '\0';
+	while (*p && ((p - tmp) < len) && (!isspace(*p)))
+		p++;
+	*p = '\0';
+	return tmp;
+}
+
+static char *netiucv_printuser(struct iucv_connection *conn)
+{
+	static char tmp_uid[9];
+	static char tmp_udat[17];
+	static char buf[100];
+
+	if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
+		tmp_uid[8] = '\0';
+		tmp_udat[16] = '\0';
+		memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8);
+		memcpy(tmp_udat, conn->userdata, 16);
+		EBCASC(tmp_udat, 16);
+		memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
+		sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
+		return buf;
+	} else
+		return netiucv_printname(conn->userid, 8);
+}
+
+/**
+ * States of the interface statemachine.
+ */
+enum dev_states {
+	DEV_STATE_STOPPED,
+	DEV_STATE_STARTWAIT,
+	DEV_STATE_STOPWAIT,
+	DEV_STATE_RUNNING,
+	/**
+	 * MUST be always the last element!!
+	 */
+	NR_DEV_STATES
+};
+
+static const char *dev_state_names[] = {
+	"Stopped",
+	"StartWait",
+	"StopWait",
+	"Running",
+};
+
+/**
+ * Events of the interface statemachine.
+ */
+enum dev_events {
+	DEV_EVENT_START,
+	DEV_EVENT_STOP,
+	DEV_EVENT_CONUP,
+	DEV_EVENT_CONDOWN,
+	/**
+	 * MUST be always the last element!!
+	 */
+	NR_DEV_EVENTS
+};
+
+static const char *dev_event_names[] = {
+	"Start",
+	"Stop",
+	"Connection up",
+	"Connection down",
+};
+
+/**
+ * Events of the connection statemachine
+ */
+enum conn_events {
+	/**
+	 * Events, representing callbacks from
+	 * lowlevel iucv layer)
+	 */
+	CONN_EVENT_CONN_REQ,
+	CONN_EVENT_CONN_ACK,
+	CONN_EVENT_CONN_REJ,
+	CONN_EVENT_CONN_SUS,
+	CONN_EVENT_CONN_RES,
+	CONN_EVENT_RX,
+	CONN_EVENT_TXDONE,
+
+	/**
+	 * Events, representing errors return codes from
+	 * calls to lowlevel iucv layer
+	 */
+
+	/**
+	 * Event, representing timer expiry.
+	 */
+	CONN_EVENT_TIMER,
+
+	/**
+	 * Events, representing commands from upper levels.
+	 */
+	CONN_EVENT_START,
+	CONN_EVENT_STOP,
+
+	/**
+	 * MUST be always the last element!!
+	 */
+	NR_CONN_EVENTS,
+};
+
+static const char *conn_event_names[] = {
+	"Remote connection request",
+	"Remote connection acknowledge",
+	"Remote connection reject",
+	"Connection suspended",
+	"Connection resumed",
+	"Data received",
+	"Data sent",
+
+	"Timer",
+
+	"Start",
+	"Stop",
+};
+
+/**
+ * States of the connection statemachine.
+ */
+enum conn_states {
+	/**
+	 * Connection not assigned to any device,
+	 * initial state, invalid
+	 */
+	CONN_STATE_INVALID,
+
+	/**
+	 * Userid assigned but not operating
+	 */
+	CONN_STATE_STOPPED,
+
+	/**
+	 * Connection registered,
+	 * no connection request sent yet,
+	 * no connection request received
+	 */
+	CONN_STATE_STARTWAIT,
+
+	/**
+	 * Connection registered and connection request sent,
+	 * no acknowledge and no connection request received yet.
+	 */
+	CONN_STATE_SETUPWAIT,
+
+	/**
+	 * Connection up and running idle
+	 */
+	CONN_STATE_IDLE,
+
+	/**
+	 * Data sent, awaiting CONN_EVENT_TXDONE
+	 */
+	CONN_STATE_TX,
+
+	/**
+	 * Error during registration.
+	 */
+	CONN_STATE_REGERR,
+
+	/**
+	 * Error during registration.
+	 */
+	CONN_STATE_CONNERR,
+
+	/**
+	 * MUST be always the last element!!
+	 */
+	NR_CONN_STATES,
+};
+
+static const char *conn_state_names[] = {
+	"Invalid",
+	"Stopped",
+	"StartWait",
+	"SetupWait",
+	"Idle",
+	"TX",
+	"Terminating",
+	"Registration error",
+	"Connect error",
+};
+
+
+/**
+ * Debug Facility Stuff
+ */
+static debug_info_t *iucv_dbf_setup = NULL;
+static debug_info_t *iucv_dbf_data = NULL;
+static debug_info_t *iucv_dbf_trace = NULL;
+
+DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
+
+static void iucv_unregister_dbf_views(void)
+{
+	debug_unregister(iucv_dbf_setup);
+	debug_unregister(iucv_dbf_data);
+	debug_unregister(iucv_dbf_trace);
+}
+static int iucv_register_dbf_views(void)
+{
+	iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
+					IUCV_DBF_SETUP_PAGES,
+					IUCV_DBF_SETUP_NR_AREAS,
+					IUCV_DBF_SETUP_LEN);
+	iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
+				       IUCV_DBF_DATA_PAGES,
+				       IUCV_DBF_DATA_NR_AREAS,
+				       IUCV_DBF_DATA_LEN);
+	iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
+					IUCV_DBF_TRACE_PAGES,
+					IUCV_DBF_TRACE_NR_AREAS,
+					IUCV_DBF_TRACE_LEN);
+
+	if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
+	    (iucv_dbf_trace == NULL)) {
+		iucv_unregister_dbf_views();
+		return -ENOMEM;
+	}
+	debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
+	debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
+
+	debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
+	debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
+
+	debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
+	debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
+
+	return 0;
+}
+
+/*
+ * Callback-wrappers, called from lowlevel iucv layer.
+ */
+
+static void netiucv_callback_rx(struct iucv_path *path,
+				struct iucv_message *msg)
+{
+	struct iucv_connection *conn = path->private;
+	struct iucv_event ev;
+
+	ev.conn = conn;
+	ev.data = msg;
+	fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
+}
+
+static void netiucv_callback_txdone(struct iucv_path *path,
+				    struct iucv_message *msg)
+{
+	struct iucv_connection *conn = path->private;
+	struct iucv_event ev;
+
+	ev.conn = conn;
+	ev.data = msg;
+	fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
+}
+
+static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
+{
+	struct iucv_connection *conn = path->private;
+
+	fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
+}
+
+static int netiucv_callback_connreq(struct iucv_path *path, u8 *ipvmid,
+				    u8 *ipuser)
+{
+	struct iucv_connection *conn = path->private;
+	struct iucv_event ev;
+	static char tmp_user[9];
+	static char tmp_udat[17];
+	int rc;
+
+	rc = -EINVAL;
+	memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
+	memcpy(tmp_udat, ipuser, 16);
+	EBCASC(tmp_udat, 16);
+	read_lock_bh(&iucv_connection_rwlock);
+	list_for_each_entry(conn, &iucv_connection_list, list) {
+		if (strncmp(ipvmid, conn->userid, 8) ||
+		    strncmp(ipuser, conn->userdata, 16))
+			continue;
+		/* Found a matching connection for this path. */
+		conn->path = path;
+		ev.conn = conn;
+		ev.data = path;
+		fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
+		rc = 0;
+	}
+	IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
+		       tmp_user, netiucv_printname(tmp_udat, 16));
+	read_unlock_bh(&iucv_connection_rwlock);
+	return rc;
+}
+
+static void netiucv_callback_connrej(struct iucv_path *path, u8 *ipuser)
+{
+	struct iucv_connection *conn = path->private;
+
+	fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
+}
+
+static void netiucv_callback_connsusp(struct iucv_path *path, u8 *ipuser)
+{
+	struct iucv_connection *conn = path->private;
+
+	fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
+}
+
+static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser)
+{
+	struct iucv_connection *conn = path->private;
+
+	fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
+}
+
+/**
+ * NOP action for statemachines
+ */
+static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
+{
+}
+
+/*
+ * Actions of the connection statemachine
+ */
+
+/**
+ * netiucv_unpack_skb
+ * @conn: The connection where this skb has been received.
+ * @pskb: The received skb.
+ *
+ * Unpack a just received skb and hand it over to upper layers.
+ * Helper function for conn_action_rx.
+ */
+static void netiucv_unpack_skb(struct iucv_connection *conn,
+			       struct sk_buff *pskb)
+{
+	struct net_device     *dev = conn->netdev;
+	struct netiucv_priv   *privptr = netdev_priv(dev);
+	u16 offset = 0;
+
+	skb_put(pskb, NETIUCV_HDRLEN);
+	pskb->dev = dev;
+	pskb->ip_summed = CHECKSUM_NONE;
+	pskb->protocol = cpu_to_be16(ETH_P_IP);
+
+	while (1) {
+		struct sk_buff *skb;
+		struct ll_header *header = (struct ll_header *) pskb->data;
+
+		if (!header->next)
+			break;
+
+		skb_pull(pskb, NETIUCV_HDRLEN);
+		header->next -= offset;
+		offset += header->next;
+		header->next -= NETIUCV_HDRLEN;
+		if (skb_tailroom(pskb) < header->next) {
+			IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
+				header->next, skb_tailroom(pskb));
+			return;
+		}
+		skb_put(pskb, header->next);
+		skb_reset_mac_header(pskb);
+		skb = dev_alloc_skb(pskb->len);
+		if (!skb) {
+			IUCV_DBF_TEXT(data, 2,
+				"Out of memory in netiucv_unpack_skb\n");
+			privptr->stats.rx_dropped++;
+			return;
+		}
+		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
+					  pskb->len);
+		skb_reset_mac_header(skb);
+		skb->dev = pskb->dev;
+		skb->protocol = pskb->protocol;
+		pskb->ip_summed = CHECKSUM_UNNECESSARY;
+		privptr->stats.rx_packets++;
+		privptr->stats.rx_bytes += skb->len;
+		/*
+		 * Since receiving is always initiated from a tasklet (in iucv.c),
+		 * we must use netif_rx_ni() instead of netif_rx()
+		 */
+		netif_rx_ni(skb);
+		skb_pull(pskb, header->next);
+		skb_put(pskb, NETIUCV_HDRLEN);
+	}
+}
+
+static void conn_action_rx(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_event *ev = arg;
+	struct iucv_connection *conn = ev->conn;
+	struct iucv_message *msg = ev->data;
+	struct netiucv_priv *privptr = netdev_priv(conn->netdev);
+	int rc;
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+
+	if (!conn->netdev) {
+		iucv_message_reject(conn->path, msg);
+		IUCV_DBF_TEXT(data, 2,
+			      "Received data for unlinked connection\n");
+		return;
+	}
+	if (msg->length > conn->max_buffsize) {
+		iucv_message_reject(conn->path, msg);
+		privptr->stats.rx_dropped++;
+		IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
+			       msg->length, conn->max_buffsize);
+		return;
+	}
+	conn->rx_buff->data = conn->rx_buff->head;
+	skb_reset_tail_pointer(conn->rx_buff);
+	conn->rx_buff->len = 0;
+	rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
+				  msg->length, NULL);
+	if (rc || msg->length < 5) {
+		privptr->stats.rx_errors++;
+		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
+		return;
+	}
+	netiucv_unpack_skb(conn, conn->rx_buff);
+}
+
+static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_event *ev = arg;
+	struct iucv_connection *conn = ev->conn;
+	struct iucv_message *msg = ev->data;
+	struct iucv_message txmsg;
+	struct netiucv_priv *privptr = NULL;
+	u32 single_flag = msg->tag;
+	u32 txbytes = 0;
+	u32 txpackets = 0;
+	u32 stat_maxcq = 0;
+	struct sk_buff *skb;
+	unsigned long saveflags;
+	struct ll_header header;
+	int rc;
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+
+	if (!conn || !conn->netdev) {
+		IUCV_DBF_TEXT(data, 2,
+			      "Send confirmation for unlinked connection\n");
+		return;
+	}
+	privptr = netdev_priv(conn->netdev);
+	conn->prof.tx_pending--;
+	if (single_flag) {
+		if ((skb = skb_dequeue(&conn->commit_queue))) {
+			refcount_dec(&skb->users);
+			if (privptr) {
+				privptr->stats.tx_packets++;
+				privptr->stats.tx_bytes +=
+					(skb->len - NETIUCV_HDRLEN
+						  - NETIUCV_HDRLEN);
+			}
+			dev_kfree_skb_any(skb);
+		}
+	}
+	conn->tx_buff->data = conn->tx_buff->head;
+	skb_reset_tail_pointer(conn->tx_buff);
+	conn->tx_buff->len = 0;
+	spin_lock_irqsave(&conn->collect_lock, saveflags);
+	while ((skb = skb_dequeue(&conn->collect_queue))) {
+		header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
+		skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
+		skb_copy_from_linear_data(skb,
+					  skb_put(conn->tx_buff, skb->len),
+					  skb->len);
+		txbytes += skb->len;
+		txpackets++;
+		stat_maxcq++;
+		refcount_dec(&skb->users);
+		dev_kfree_skb_any(skb);
+	}
+	if (conn->collect_len > conn->prof.maxmulti)
+		conn->prof.maxmulti = conn->collect_len;
+	conn->collect_len = 0;
+	spin_unlock_irqrestore(&conn->collect_lock, saveflags);
+	if (conn->tx_buff->len == 0) {
+		fsm_newstate(fi, CONN_STATE_IDLE);
+		return;
+	}
+
+	header.next = 0;
+	skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
+	conn->prof.send_stamp = jiffies;
+	txmsg.class = 0;
+	txmsg.tag = 0;
+	rc = iucv_message_send(conn->path, &txmsg, 0, 0,
+			       conn->tx_buff->data, conn->tx_buff->len);
+	conn->prof.doios_multi++;
+	conn->prof.txlen += conn->tx_buff->len;
+	conn->prof.tx_pending++;
+	if (conn->prof.tx_pending > conn->prof.tx_max_pending)
+		conn->prof.tx_max_pending = conn->prof.tx_pending;
+	if (rc) {
+		conn->prof.tx_pending--;
+		fsm_newstate(fi, CONN_STATE_IDLE);
+		if (privptr)
+			privptr->stats.tx_errors += txpackets;
+		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
+	} else {
+		if (privptr) {
+			privptr->stats.tx_packets += txpackets;
+			privptr->stats.tx_bytes += txbytes;
+		}
+		if (stat_maxcq > conn->prof.maxcqueue)
+			conn->prof.maxcqueue = stat_maxcq;
+	}
+}
+
+static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_event *ev = arg;
+	struct iucv_connection *conn = ev->conn;
+	struct iucv_path *path = ev->data;
+	struct net_device *netdev = conn->netdev;
+	struct netiucv_priv *privptr = netdev_priv(netdev);
+	int rc;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	conn->path = path;
+	path->msglim = NETIUCV_QUEUELEN_DEFAULT;
+	path->flags = 0;
+	rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
+	if (rc) {
+		IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
+		return;
+	}
+	fsm_newstate(fi, CONN_STATE_IDLE);
+	netdev->tx_queue_len = conn->path->msglim;
+	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
+}
+
+static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_event *ev = arg;
+	struct iucv_path *path = ev->data;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	iucv_path_sever(path, NULL);
+}
+
+static void conn_action_connack(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_connection *conn = arg;
+	struct net_device *netdev = conn->netdev;
+	struct netiucv_priv *privptr = netdev_priv(netdev);
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	fsm_deltimer(&conn->timer);
+	fsm_newstate(fi, CONN_STATE_IDLE);
+	netdev->tx_queue_len = conn->path->msglim;
+	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
+}
+
+static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_connection *conn = arg;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	fsm_deltimer(&conn->timer);
+	iucv_path_sever(conn->path, conn->userdata);
+	fsm_newstate(fi, CONN_STATE_STARTWAIT);
+}
+
+static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_connection *conn = arg;
+	struct net_device *netdev = conn->netdev;
+	struct netiucv_priv *privptr = netdev_priv(netdev);
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	fsm_deltimer(&conn->timer);
+	iucv_path_sever(conn->path, conn->userdata);
+	dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
+			       "connection\n", netiucv_printuser(conn));
+	IUCV_DBF_TEXT(data, 2,
+		      "conn_action_connsever: Remote dropped connection\n");
+	fsm_newstate(fi, CONN_STATE_STARTWAIT);
+	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
+}
+
+static void conn_action_start(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_connection *conn = arg;
+	struct net_device *netdev = conn->netdev;
+	struct netiucv_priv *privptr = netdev_priv(netdev);
+	int rc;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	fsm_newstate(fi, CONN_STATE_STARTWAIT);
+
+	/*
+	 * We must set the state before calling iucv_connect because the
+	 * callback handler could be called at any point after the connection
+	 * request is sent
+	 */
+
+	fsm_newstate(fi, CONN_STATE_SETUPWAIT);
+	conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
+	IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
+		netdev->name, netiucv_printuser(conn));
+
+	rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
+			       NULL, conn->userdata, conn);
+	switch (rc) {
+	case 0:
+		netdev->tx_queue_len = conn->path->msglim;
+		fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
+			     CONN_EVENT_TIMER, conn);
+		return;
+	case 11:
+		dev_warn(privptr->dev,
+			"The IUCV device failed to connect to z/VM guest %s\n",
+			netiucv_printname(conn->userid, 8));
+		fsm_newstate(fi, CONN_STATE_STARTWAIT);
+		break;
+	case 12:
+		dev_warn(privptr->dev,
+			"The IUCV device failed to connect to the peer on z/VM"
+			" guest %s\n", netiucv_printname(conn->userid, 8));
+		fsm_newstate(fi, CONN_STATE_STARTWAIT);
+		break;
+	case 13:
+		dev_err(privptr->dev,
+			"Connecting the IUCV device would exceed the maximum"
+			" number of IUCV connections\n");
+		fsm_newstate(fi, CONN_STATE_CONNERR);
+		break;
+	case 14:
+		dev_err(privptr->dev,
+			"z/VM guest %s has too many IUCV connections"
+			" to connect with the IUCV device\n",
+			netiucv_printname(conn->userid, 8));
+		fsm_newstate(fi, CONN_STATE_CONNERR);
+		break;
+	case 15:
+		dev_err(privptr->dev,
+			"The IUCV device cannot connect to a z/VM guest with no"
+			" IUCV authorization\n");
+		fsm_newstate(fi, CONN_STATE_CONNERR);
+		break;
+	default:
+		dev_err(privptr->dev,
+			"Connecting the IUCV device failed with error %d\n",
+			rc);
+		fsm_newstate(fi, CONN_STATE_CONNERR);
+		break;
+	}
+	IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
+	kfree(conn->path);
+	conn->path = NULL;
+}
+
+static void netiucv_purge_skb_queue(struct sk_buff_head *q)
+{
+	struct sk_buff *skb;
+
+	while ((skb = skb_dequeue(q))) {
+		refcount_dec(&skb->users);
+		dev_kfree_skb_any(skb);
+	}
+}
+
+static void conn_action_stop(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_event *ev = arg;
+	struct iucv_connection *conn = ev->conn;
+	struct net_device *netdev = conn->netdev;
+	struct netiucv_priv *privptr = netdev_priv(netdev);
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	fsm_deltimer(&conn->timer);
+	fsm_newstate(fi, CONN_STATE_STOPPED);
+	netiucv_purge_skb_queue(&conn->collect_queue);
+	if (conn->path) {
+		IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
+		iucv_path_sever(conn->path, conn->userdata);
+		kfree(conn->path);
+		conn->path = NULL;
+	}
+	netiucv_purge_skb_queue(&conn->commit_queue);
+	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
+}
+
+static void conn_action_inval(fsm_instance *fi, int event, void *arg)
+{
+	struct iucv_connection *conn = arg;
+	struct net_device *netdev = conn->netdev;
+
+	IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
+		netdev->name, conn->userid);
+}
+
+static const fsm_node conn_fsm[] = {
+	{ CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
+	{ CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
+
+	{ CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
+	{ CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
+	{ CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
+	{ CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
+	{ CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
+	{ CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
+	{ CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
+
+	{ CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
+        { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
+	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
+	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
+	{ CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
+
+	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
+	{ CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
+
+	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
+	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
+	{ CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
+
+	{ CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
+	{ CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
+
+	{ CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
+	{ CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
+};
+
+static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
+
+
+/*
+ * Actions for interface - statemachine.
+ */
+
+/**
+ * dev_action_start
+ * @fi: An instance of an interface statemachine.
+ * @event: The event, just happened.
+ * @arg: Generic pointer, casted from struct net_device * upon call.
+ *
+ * Startup connection by sending CONN_EVENT_START to it.
+ */
+static void dev_action_start(fsm_instance *fi, int event, void *arg)
+{
+	struct net_device   *dev = arg;
+	struct netiucv_priv *privptr = netdev_priv(dev);
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	fsm_newstate(fi, DEV_STATE_STARTWAIT);
+	fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
+}
+
+/**
+ * Shutdown connection by sending CONN_EVENT_STOP to it.
+ *
+ * @param fi    An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg   Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_stop(fsm_instance *fi, int event, void *arg)
+{
+	struct net_device   *dev = arg;
+	struct netiucv_priv *privptr = netdev_priv(dev);
+	struct iucv_event   ev;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	ev.conn = privptr->conn;
+
+	fsm_newstate(fi, DEV_STATE_STOPWAIT);
+	fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
+}
+
+/**
+ * Called from connection statemachine
+ * when a connection is up and running.
+ *
+ * @param fi    An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg   Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_connup(fsm_instance *fi, int event, void *arg)
+{
+	struct net_device   *dev = arg;
+	struct netiucv_priv *privptr = netdev_priv(dev);
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	switch (fsm_getstate(fi)) {
+		case DEV_STATE_STARTWAIT:
+			fsm_newstate(fi, DEV_STATE_RUNNING);
+			dev_info(privptr->dev,
+				"The IUCV device has been connected"
+				" successfully to %s\n",
+				netiucv_printuser(privptr->conn));
+			IUCV_DBF_TEXT(setup, 3,
+				"connection is up and running\n");
+			break;
+		case DEV_STATE_STOPWAIT:
+			IUCV_DBF_TEXT(data, 2,
+				"dev_action_connup: in DEV_STATE_STOPWAIT\n");
+			break;
+	}
+}
+
+/**
+ * Called from connection statemachine
+ * when a connection has been shutdown.
+ *
+ * @param fi    An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg   Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_conndown(fsm_instance *fi, int event, void *arg)
+{
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	switch (fsm_getstate(fi)) {
+		case DEV_STATE_RUNNING:
+			fsm_newstate(fi, DEV_STATE_STARTWAIT);
+			break;
+		case DEV_STATE_STOPWAIT:
+			fsm_newstate(fi, DEV_STATE_STOPPED);
+			IUCV_DBF_TEXT(setup, 3, "connection is down\n");
+			break;
+	}
+}
+
+static const fsm_node dev_fsm[] = {
+	{ DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
+
+	{ DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
+	{ DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
+
+	{ DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
+	{ DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
+
+	{ DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
+	{ DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
+	{ DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   netiucv_action_nop  },
+};
+
+static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
+
+/**
+ * Transmit a packet.
+ * This is a helper function for netiucv_tx().
+ *
+ * @param conn Connection to be used for sending.
+ * @param skb Pointer to struct sk_buff of packet to send.
+ *            The linklevel header has already been set up
+ *            by netiucv_tx().
+ *
+ * @return 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int netiucv_transmit_skb(struct iucv_connection *conn,
+				struct sk_buff *skb)
+{
+	struct iucv_message msg;
+	unsigned long saveflags;
+	struct ll_header header;
+	int rc;
+
+	if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
+		int l = skb->len + NETIUCV_HDRLEN;
+
+		spin_lock_irqsave(&conn->collect_lock, saveflags);
+		if (conn->collect_len + l >
+		    (conn->max_buffsize - NETIUCV_HDRLEN)) {
+			rc = -EBUSY;
+			IUCV_DBF_TEXT(data, 2,
+				      "EBUSY from netiucv_transmit_skb\n");
+		} else {
+			refcount_inc(&skb->users);
+			skb_queue_tail(&conn->collect_queue, skb);
+			conn->collect_len += l;
+			rc = 0;
+		}
+		spin_unlock_irqrestore(&conn->collect_lock, saveflags);
+	} else {
+		struct sk_buff *nskb = skb;
+		/**
+		 * Copy the skb to a new allocated skb in lowmem only if the
+		 * data is located above 2G in memory or tailroom is < 2.
+		 */
+		unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
+				    NETIUCV_HDRLEN)) >> 31;
+		int copied = 0;
+		if (hi || (skb_tailroom(skb) < 2)) {
+			nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
+					 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
+			if (!nskb) {
+				IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
+				rc = -ENOMEM;
+				return rc;
+			} else {
+				skb_reserve(nskb, NETIUCV_HDRLEN);
+				skb_put_data(nskb, skb->data, skb->len);
+			}
+			copied = 1;
+		}
+		/**
+		 * skb now is below 2G and has enough room. Add headers.
+		 */
+		header.next = nskb->len + NETIUCV_HDRLEN;
+		memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
+		header.next = 0;
+		skb_put_data(nskb, &header, NETIUCV_HDRLEN);
+
+		fsm_newstate(conn->fsm, CONN_STATE_TX);
+		conn->prof.send_stamp = jiffies;
+
+		msg.tag = 1;
+		msg.class = 0;
+		rc = iucv_message_send(conn->path, &msg, 0, 0,
+				       nskb->data, nskb->len);
+		conn->prof.doios_single++;
+		conn->prof.txlen += skb->len;
+		conn->prof.tx_pending++;
+		if (conn->prof.tx_pending > conn->prof.tx_max_pending)
+			conn->prof.tx_max_pending = conn->prof.tx_pending;
+		if (rc) {
+			struct netiucv_priv *privptr;
+			fsm_newstate(conn->fsm, CONN_STATE_IDLE);
+			conn->prof.tx_pending--;
+			privptr = netdev_priv(conn->netdev);
+			if (privptr)
+				privptr->stats.tx_errors++;
+			if (copied)
+				dev_kfree_skb(nskb);
+			else {
+				/**
+				 * Remove our headers. They get added
+				 * again on retransmit.
+				 */
+				skb_pull(skb, NETIUCV_HDRLEN);
+				skb_trim(skb, skb->len - NETIUCV_HDRLEN);
+			}
+			IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
+		} else {
+			if (copied)
+				dev_kfree_skb(skb);
+			refcount_inc(&nskb->users);
+			skb_queue_tail(&conn->commit_queue, nskb);
+		}
+	}
+
+	return rc;
+}
+
+/*
+ * Interface API for upper network layers
+ */
+
+/**
+ * Open an interface.
+ * Called from generic network layer when ifconfig up is run.
+ *
+ * @param dev Pointer to interface struct.
+ *
+ * @return 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int netiucv_open(struct net_device *dev)
+{
+	struct netiucv_priv *priv = netdev_priv(dev);
+
+	fsm_event(priv->fsm, DEV_EVENT_START, dev);
+	return 0;
+}
+
+/**
+ * Close an interface.
+ * Called from generic network layer when ifconfig down is run.
+ *
+ * @param dev Pointer to interface struct.
+ *
+ * @return 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int netiucv_close(struct net_device *dev)
+{
+	struct netiucv_priv *priv = netdev_priv(dev);
+
+	fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
+	return 0;
+}
+
+static int netiucv_pm_prepare(struct device *dev)
+{
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	return 0;
+}
+
+static void netiucv_pm_complete(struct device *dev)
+{
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	return;
+}
+
+/**
+ * netiucv_pm_freeze() - Freeze PM callback
+ * @dev:	netiucv device
+ *
+ * close open netiucv interfaces
+ */
+static int netiucv_pm_freeze(struct device *dev)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+	struct net_device *ndev = NULL;
+	int rc = 0;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	if (priv && priv->conn)
+		ndev = priv->conn->netdev;
+	if (!ndev)
+		goto out;
+	netif_device_detach(ndev);
+	priv->pm_state = fsm_getstate(priv->fsm);
+	rc = netiucv_close(ndev);
+out:
+	return rc;
+}
+
+/**
+ * netiucv_pm_restore_thaw() - Thaw and restore PM callback
+ * @dev:	netiucv device
+ *
+ * re-open netiucv interfaces closed during freeze
+ */
+static int netiucv_pm_restore_thaw(struct device *dev)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+	struct net_device *ndev = NULL;
+	int rc = 0;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	if (priv && priv->conn)
+		ndev = priv->conn->netdev;
+	if (!ndev)
+		goto out;
+	switch (priv->pm_state) {
+	case DEV_STATE_RUNNING:
+	case DEV_STATE_STARTWAIT:
+		rc = netiucv_open(ndev);
+		break;
+	default:
+		break;
+	}
+	netif_device_attach(ndev);
+out:
+	return rc;
+}
+
+/**
+ * Start transmission of a packet.
+ * Called from generic network device layer.
+ *
+ * @param skb Pointer to buffer containing the packet.
+ * @param dev Pointer to interface struct.
+ *
+ * @return 0 if packet consumed, !0 if packet rejected.
+ *         Note: If we return !0, then the packet is free'd by
+ *               the generic network layer.
+ */
+static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct netiucv_priv *privptr = netdev_priv(dev);
+	int rc;
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+	/**
+	 * Some sanity checks ...
+	 */
+	if (skb == NULL) {
+		IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
+		privptr->stats.tx_dropped++;
+		return NETDEV_TX_OK;
+	}
+	if (skb_headroom(skb) < NETIUCV_HDRLEN) {
+		IUCV_DBF_TEXT(data, 2,
+			"netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
+		dev_kfree_skb(skb);
+		privptr->stats.tx_dropped++;
+		return NETDEV_TX_OK;
+	}
+
+	/**
+	 * If connection is not running, try to restart it
+	 * and throw away packet.
+	 */
+	if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
+		dev_kfree_skb(skb);
+		privptr->stats.tx_dropped++;
+		privptr->stats.tx_errors++;
+		privptr->stats.tx_carrier_errors++;
+		return NETDEV_TX_OK;
+	}
+
+	if (netiucv_test_and_set_busy(dev)) {
+		IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
+		return NETDEV_TX_BUSY;
+	}
+	netif_trans_update(dev);
+	rc = netiucv_transmit_skb(privptr->conn, skb);
+	netiucv_clear_busy(dev);
+	return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
+}
+
+/**
+ * netiucv_stats
+ * @dev: Pointer to interface struct.
+ *
+ * Returns interface statistics of a device.
+ *
+ * Returns pointer to stats struct of this interface.
+ */
+static struct net_device_stats *netiucv_stats (struct net_device * dev)
+{
+	struct netiucv_priv *priv = netdev_priv(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return &priv->stats;
+}
+
+/*
+ * attributes in sysfs
+ */
+
+static ssize_t user_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
+}
+
+static int netiucv_check_user(const char *buf, size_t count, char *username,
+			      char *userdata)
+{
+	const char *p;
+	int i;
+
+	p = strchr(buf, '.');
+	if ((p && ((count > 26) ||
+		   ((p - buf) > 8) ||
+		   (buf + count - p > 18))) ||
+	    (!p && (count > 9))) {
+		IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
+		return -EINVAL;
+	}
+
+	for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
+		if (isalnum(*p) || *p == '$') {
+			username[i] = toupper(*p);
+			continue;
+		}
+		if (*p == '\n')
+			/* trailing lf, grr */
+			break;
+		IUCV_DBF_TEXT_(setup, 2,
+			       "conn_write: invalid character %02x\n", *p);
+		return -EINVAL;
+	}
+	while (i < 8)
+		username[i++] = ' ';
+	username[8] = '\0';
+
+	if (*p == '.') {
+		p++;
+		for (i = 0; i < 16 && *p; i++, p++) {
+			if (*p == '\n')
+				break;
+			userdata[i] = toupper(*p);
+		}
+		while (i > 0 && i < 16)
+			userdata[i++] = ' ';
+	} else
+		memcpy(userdata, iucvMagic_ascii, 16);
+	userdata[16] = '\0';
+	ASCEBC(userdata, 16);
+
+	return 0;
+}
+
+static ssize_t user_write(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+	struct net_device *ndev = priv->conn->netdev;
+	char	username[9];
+	char	userdata[17];
+	int	rc;
+	struct iucv_connection *cp;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	rc = netiucv_check_user(buf, count, username, userdata);
+	if (rc)
+		return rc;
+
+	if (memcmp(username, priv->conn->userid, 9) &&
+	    (ndev->flags & (IFF_UP | IFF_RUNNING))) {
+		/* username changed while the interface is active. */
+		IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
+		return -EPERM;
+	}
+	read_lock_bh(&iucv_connection_rwlock);
+	list_for_each_entry(cp, &iucv_connection_list, list) {
+		if (!strncmp(username, cp->userid, 9) &&
+		   !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
+			read_unlock_bh(&iucv_connection_rwlock);
+			IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
+				"already exists\n", netiucv_printuser(cp));
+			return -EEXIST;
+		}
+	}
+	read_unlock_bh(&iucv_connection_rwlock);
+	memcpy(priv->conn->userid, username, 9);
+	memcpy(priv->conn->userdata, userdata, 17);
+	return count;
+}
+
+static DEVICE_ATTR(user, 0644, user_show, user_write);
+
+static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%d\n", priv->conn->max_buffsize);
+}
+
+static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+	struct net_device *ndev = priv->conn->netdev;
+	unsigned int bs1;
+	int rc;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	if (count >= 39)
+		return -EINVAL;
+
+	rc = kstrtouint(buf, 0, &bs1);
+
+	if (rc == -EINVAL) {
+		IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %s\n",
+			buf);
+		return -EINVAL;
+	}
+	if ((rc == -ERANGE) || (bs1 > NETIUCV_BUFSIZE_MAX)) {
+		IUCV_DBF_TEXT_(setup, 2,
+			"buffer_write: buffer size %d too large\n",
+			bs1);
+		return -EINVAL;
+	}
+	if ((ndev->flags & IFF_RUNNING) &&
+	    (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
+		IUCV_DBF_TEXT_(setup, 2,
+			"buffer_write: buffer size %d too small\n",
+			bs1);
+		return -EINVAL;
+	}
+	if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
+		IUCV_DBF_TEXT_(setup, 2,
+			"buffer_write: buffer size %d too small\n",
+			bs1);
+		return -EINVAL;
+	}
+
+	priv->conn->max_buffsize = bs1;
+	if (!(ndev->flags & IFF_RUNNING))
+		ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
+
+	return count;
+
+}
+
+static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
+
+static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
+}
+
+static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
+
+static ssize_t conn_fsm_show (struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
+}
+
+static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
+
+static ssize_t maxmulti_show (struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
+}
+
+static ssize_t maxmulti_write (struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+	priv->conn->prof.maxmulti = 0;
+	return count;
+}
+
+static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
+
+static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
+}
+
+static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+	priv->conn->prof.maxcqueue = 0;
+	return count;
+}
+
+static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
+
+static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
+}
+
+static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+	priv->conn->prof.doios_single = 0;
+	return count;
+}
+
+static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
+
+static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
+}
+
+static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	priv->conn->prof.doios_multi = 0;
+	return count;
+}
+
+static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
+
+static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
+}
+
+static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+	priv->conn->prof.txlen = 0;
+	return count;
+}
+
+static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
+
+static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
+}
+
+static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+	priv->conn->prof.tx_time = 0;
+	return count;
+}
+
+static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
+
+static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
+}
+
+static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+	priv->conn->prof.tx_pending = 0;
+	return count;
+}
+
+static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
+
+static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 5, __func__);
+	return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
+}
+
+static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+	IUCV_DBF_TEXT(trace, 4, __func__);
+	priv->conn->prof.tx_max_pending = 0;
+	return count;
+}
+
+static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
+
+static struct attribute *netiucv_attrs[] = {
+	&dev_attr_buffer.attr,
+	&dev_attr_user.attr,
+	NULL,
+};
+
+static struct attribute_group netiucv_attr_group = {
+	.attrs = netiucv_attrs,
+};
+
+static struct attribute *netiucv_stat_attrs[] = {
+	&dev_attr_device_fsm_state.attr,
+	&dev_attr_connection_fsm_state.attr,
+	&dev_attr_max_tx_buffer_used.attr,
+	&dev_attr_max_chained_skbs.attr,
+	&dev_attr_tx_single_write_ops.attr,
+	&dev_attr_tx_multi_write_ops.attr,
+	&dev_attr_netto_bytes.attr,
+	&dev_attr_max_tx_io_time.attr,
+	&dev_attr_tx_pending.attr,
+	&dev_attr_tx_max_pending.attr,
+	NULL,
+};
+
+static struct attribute_group netiucv_stat_attr_group = {
+	.name  = "stats",
+	.attrs = netiucv_stat_attrs,
+};
+
+static const struct attribute_group *netiucv_attr_groups[] = {
+	&netiucv_stat_attr_group,
+	&netiucv_attr_group,
+	NULL,
+};
+
+static int netiucv_register_device(struct net_device *ndev)
+{
+	struct netiucv_priv *priv = netdev_priv(ndev);
+	struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+	int ret;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	if (dev) {
+		dev_set_name(dev, "net%s", ndev->name);
+		dev->bus = &iucv_bus;
+		dev->parent = iucv_root;
+		dev->groups = netiucv_attr_groups;
+		/*
+		 * The release function could be called after the
+		 * module has been unloaded. It's _only_ task is to
+		 * free the struct. Therefore, we specify kfree()
+		 * directly here. (Probably a little bit obfuscating
+		 * but legitime ...).
+		 */
+		dev->release = (void (*)(struct device *))kfree;
+		dev->driver = &netiucv_driver;
+	} else
+		return -ENOMEM;
+
+	ret = device_register(dev);
+	if (ret) {
+		put_device(dev);
+		return ret;
+	}
+	priv->dev = dev;
+	dev_set_drvdata(dev, priv);
+	return 0;
+}
+
+static void netiucv_unregister_device(struct device *dev)
+{
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	device_unregister(dev);
+}
+
+/**
+ * Allocate and initialize a new connection structure.
+ * Add it to the list of netiucv connections;
+ */
+static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
+						      char *username,
+						      char *userdata)
+{
+	struct iucv_connection *conn;
+
+	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+	if (!conn)
+		goto out;
+	skb_queue_head_init(&conn->collect_queue);
+	skb_queue_head_init(&conn->commit_queue);
+	spin_lock_init(&conn->collect_lock);
+	conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
+	conn->netdev = dev;
+
+	conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
+	if (!conn->rx_buff)
+		goto out_conn;
+	conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
+	if (!conn->tx_buff)
+		goto out_rx;
+	conn->fsm = init_fsm("netiucvconn", conn_state_names,
+			     conn_event_names, NR_CONN_STATES,
+			     NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
+			     GFP_KERNEL);
+	if (!conn->fsm)
+		goto out_tx;
+
+	fsm_settimer(conn->fsm, &conn->timer);
+	fsm_newstate(conn->fsm, CONN_STATE_INVALID);
+
+	if (userdata)
+		memcpy(conn->userdata, userdata, 17);
+	if (username) {
+		memcpy(conn->userid, username, 9);
+		fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
+	}
+
+	write_lock_bh(&iucv_connection_rwlock);
+	list_add_tail(&conn->list, &iucv_connection_list);
+	write_unlock_bh(&iucv_connection_rwlock);
+	return conn;
+
+out_tx:
+	kfree_skb(conn->tx_buff);
+out_rx:
+	kfree_skb(conn->rx_buff);
+out_conn:
+	kfree(conn);
+out:
+	return NULL;
+}
+
+/**
+ * Release a connection structure and remove it from the
+ * list of netiucv connections.
+ */
+static void netiucv_remove_connection(struct iucv_connection *conn)
+{
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	write_lock_bh(&iucv_connection_rwlock);
+	list_del_init(&conn->list);
+	write_unlock_bh(&iucv_connection_rwlock);
+	fsm_deltimer(&conn->timer);
+	netiucv_purge_skb_queue(&conn->collect_queue);
+	if (conn->path) {
+		iucv_path_sever(conn->path, conn->userdata);
+		kfree(conn->path);
+		conn->path = NULL;
+	}
+	netiucv_purge_skb_queue(&conn->commit_queue);
+	kfree_fsm(conn->fsm);
+	kfree_skb(conn->rx_buff);
+	kfree_skb(conn->tx_buff);
+}
+
+/**
+ * Release everything of a net device.
+ */
+static void netiucv_free_netdevice(struct net_device *dev)
+{
+	struct netiucv_priv *privptr = netdev_priv(dev);
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+	if (!dev)
+		return;
+
+	if (privptr) {
+		if (privptr->conn)
+			netiucv_remove_connection(privptr->conn);
+		if (privptr->fsm)
+			kfree_fsm(privptr->fsm);
+		privptr->conn = NULL; privptr->fsm = NULL;
+		/* privptr gets freed by free_netdev() */
+	}
+}
+
+/**
+ * Initialize a net device. (Called from kernel in alloc_netdev())
+ */
+static const struct net_device_ops netiucv_netdev_ops = {
+	.ndo_open		= netiucv_open,
+	.ndo_stop		= netiucv_close,
+	.ndo_get_stats		= netiucv_stats,
+	.ndo_start_xmit		= netiucv_tx,
+};
+
+static void netiucv_setup_netdevice(struct net_device *dev)
+{
+	dev->mtu	         = NETIUCV_MTU_DEFAULT;
+	dev->min_mtu		 = 576;
+	dev->max_mtu		 = NETIUCV_MTU_MAX;
+	dev->needs_free_netdev   = true;
+	dev->priv_destructor     = netiucv_free_netdevice;
+	dev->hard_header_len     = NETIUCV_HDRLEN;
+	dev->addr_len            = 0;
+	dev->type                = ARPHRD_SLIP;
+	dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
+	dev->flags	         = IFF_POINTOPOINT | IFF_NOARP;
+	dev->netdev_ops		 = &netiucv_netdev_ops;
+}
+
+/**
+ * Allocate and initialize everything of a net device.
+ */
+static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
+{
+	struct netiucv_priv *privptr;
+	struct net_device *dev;
+
+	dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
+			   NET_NAME_UNKNOWN, netiucv_setup_netdevice);
+	if (!dev)
+		return NULL;
+	rtnl_lock();
+	if (dev_alloc_name(dev, dev->name) < 0)
+		goto out_netdev;
+
+	privptr = netdev_priv(dev);
+	privptr->fsm = init_fsm("netiucvdev", dev_state_names,
+				dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
+				dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
+	if (!privptr->fsm)
+		goto out_netdev;
+
+	privptr->conn = netiucv_new_connection(dev, username, userdata);
+	if (!privptr->conn) {
+		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
+		goto out_fsm;
+	}
+	fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
+	return dev;
+
+out_fsm:
+	kfree_fsm(privptr->fsm);
+out_netdev:
+	rtnl_unlock();
+	free_netdev(dev);
+	return NULL;
+}
+
+static ssize_t connection_store(struct device_driver *drv, const char *buf,
+				size_t count)
+{
+	char username[9];
+	char userdata[17];
+	int rc;
+	struct net_device *dev;
+	struct netiucv_priv *priv;
+	struct iucv_connection *cp;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	rc = netiucv_check_user(buf, count, username, userdata);
+	if (rc)
+		return rc;
+
+	read_lock_bh(&iucv_connection_rwlock);
+	list_for_each_entry(cp, &iucv_connection_list, list) {
+		if (!strncmp(username, cp->userid, 9) &&
+		    !strncmp(userdata, cp->userdata, 17)) {
+			read_unlock_bh(&iucv_connection_rwlock);
+			IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
+				"already exists\n", netiucv_printuser(cp));
+			return -EEXIST;
+		}
+	}
+	read_unlock_bh(&iucv_connection_rwlock);
+
+	dev = netiucv_init_netdevice(username, userdata);
+	if (!dev) {
+		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
+		return -ENODEV;
+	}
+
+	rc = netiucv_register_device(dev);
+	if (rc) {
+		rtnl_unlock();
+		IUCV_DBF_TEXT_(setup, 2,
+			"ret %d from netiucv_register_device\n", rc);
+		goto out_free_ndev;
+	}
+
+	/* sysfs magic */
+	priv = netdev_priv(dev);
+	SET_NETDEV_DEV(dev, priv->dev);
+
+	rc = register_netdevice(dev);
+	rtnl_unlock();
+	if (rc)
+		goto out_unreg;
+
+	dev_info(priv->dev, "The IUCV interface to %s has been established "
+			    "successfully\n",
+		netiucv_printuser(priv->conn));
+
+	return count;
+
+out_unreg:
+	netiucv_unregister_device(priv->dev);
+out_free_ndev:
+	netiucv_free_netdevice(dev);
+	return rc;
+}
+static DRIVER_ATTR_WO(connection);
+
+static ssize_t remove_store(struct device_driver *drv, const char *buf,
+			    size_t count)
+{
+	struct iucv_connection *cp;
+        struct net_device *ndev;
+        struct netiucv_priv *priv;
+        struct device *dev;
+        char name[IFNAMSIZ];
+	const char *p;
+        int i;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+
+        if (count >= IFNAMSIZ)
+                count = IFNAMSIZ - 1;
+
+	for (i = 0, p = buf; i < count && *p; i++, p++) {
+		if (*p == '\n' || *p == ' ')
+                        /* trailing lf, grr */
+                        break;
+		name[i] = *p;
+        }
+        name[i] = '\0';
+
+	read_lock_bh(&iucv_connection_rwlock);
+	list_for_each_entry(cp, &iucv_connection_list, list) {
+		ndev = cp->netdev;
+		priv = netdev_priv(ndev);
+                dev = priv->dev;
+		if (strncmp(name, ndev->name, count))
+			continue;
+		read_unlock_bh(&iucv_connection_rwlock);
+                if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
+			dev_warn(dev, "The IUCV device is connected"
+				" to %s and cannot be removed\n",
+				priv->conn->userid);
+			IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
+			return -EPERM;
+                }
+                unregister_netdev(ndev);
+                netiucv_unregister_device(dev);
+                return count;
+        }
+	read_unlock_bh(&iucv_connection_rwlock);
+	IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
+        return -EINVAL;
+}
+static DRIVER_ATTR_WO(remove);
+
+static struct attribute * netiucv_drv_attrs[] = {
+	&driver_attr_connection.attr,
+	&driver_attr_remove.attr,
+	NULL,
+};
+
+static struct attribute_group netiucv_drv_attr_group = {
+	.attrs = netiucv_drv_attrs,
+};
+
+static const struct attribute_group *netiucv_drv_attr_groups[] = {
+	&netiucv_drv_attr_group,
+	NULL,
+};
+
+static void netiucv_banner(void)
+{
+	pr_info("driver initialized\n");
+}
+
+static void __exit netiucv_exit(void)
+{
+	struct iucv_connection *cp;
+	struct net_device *ndev;
+	struct netiucv_priv *priv;
+	struct device *dev;
+
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	while (!list_empty(&iucv_connection_list)) {
+		cp = list_entry(iucv_connection_list.next,
+				struct iucv_connection, list);
+		ndev = cp->netdev;
+		priv = netdev_priv(ndev);
+		dev = priv->dev;
+
+		unregister_netdev(ndev);
+		netiucv_unregister_device(dev);
+	}
+
+	device_unregister(netiucv_dev);
+	driver_unregister(&netiucv_driver);
+	iucv_unregister(&netiucv_handler, 1);
+	iucv_unregister_dbf_views();
+
+	pr_info("driver unloaded\n");
+	return;
+}
+
+static int __init netiucv_init(void)
+{
+	int rc;
+
+	rc = iucv_register_dbf_views();
+	if (rc)
+		goto out;
+	rc = iucv_register(&netiucv_handler, 1);
+	if (rc)
+		goto out_dbf;
+	IUCV_DBF_TEXT(trace, 3, __func__);
+	netiucv_driver.groups = netiucv_drv_attr_groups;
+	rc = driver_register(&netiucv_driver);
+	if (rc) {
+		IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
+		goto out_iucv;
+	}
+	/* establish dummy device */
+	netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (!netiucv_dev) {
+		rc = -ENOMEM;
+		goto out_driver;
+	}
+	dev_set_name(netiucv_dev, "netiucv");
+	netiucv_dev->bus = &iucv_bus;
+	netiucv_dev->parent = iucv_root;
+	netiucv_dev->release = (void (*)(struct device *))kfree;
+	netiucv_dev->driver = &netiucv_driver;
+	rc = device_register(netiucv_dev);
+	if (rc) {
+		put_device(netiucv_dev);
+		goto out_driver;
+	}
+	netiucv_banner();
+	return rc;
+
+out_driver:
+	driver_unregister(&netiucv_driver);
+out_iucv:
+	iucv_unregister(&netiucv_handler, 1);
+out_dbf:
+	iucv_unregister_dbf_views();
+out:
+	return rc;
+}
+
+module_init(netiucv_init);
+module_exit(netiucv_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
new file mode 100644
index 0000000..970654f
--- /dev/null
+++ b/drivers/s390/net/qeth_core.h
@@ -0,0 +1,1068 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+ *		 Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#ifndef __QETH_CORE_H__
+#define __QETH_CORE_H__
+
+#include <linux/if.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ctype.h>
+#include <linux/in6.h>
+#include <linux/bitops.h>
+#include <linux/seq_file.h>
+#include <linux/ethtool.h>
+#include <linux/hashtable.h>
+#include <linux/ip.h>
+#include <linux/refcount.h>
+
+#include <net/ipv6.h>
+#include <net/if_inet6.h>
+#include <net/addrconf.h>
+
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <asm/sysinfo.h>
+
+#include "qeth_core_mpc.h"
+
+/**
+ * Debug Facility stuff
+ */
+enum qeth_dbf_names {
+	QETH_DBF_SETUP,
+	QETH_DBF_MSG,
+	QETH_DBF_CTRL,
+	QETH_DBF_INFOS	/* must be last element */
+};
+
+struct qeth_dbf_info {
+	char name[DEBUG_MAX_NAME_LEN];
+	int pages;
+	int areas;
+	int len;
+	int level;
+	struct debug_view *view;
+	debug_info_t *id;
+};
+
+#define QETH_DBF_CTRL_LEN 256
+
+#define QETH_DBF_TEXT(name, level, text) \
+	debug_text_event(qeth_dbf[QETH_DBF_##name].id, level, text)
+
+#define QETH_DBF_HEX(name, level, addr, len) \
+	debug_event(qeth_dbf[QETH_DBF_##name].id, level, (void *)(addr), len)
+
+#define QETH_DBF_MESSAGE(level, text...) \
+	debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text)
+
+#define QETH_DBF_TEXT_(name, level, text...) \
+	qeth_dbf_longtext(qeth_dbf[QETH_DBF_##name].id, level, text)
+
+#define QETH_CARD_TEXT(card, level, text) \
+	debug_text_event(card->debug, level, text)
+
+#define QETH_CARD_HEX(card, level, addr, len) \
+	debug_event(card->debug, level, (void *)(addr), len)
+
+#define QETH_CARD_MESSAGE(card, text...) \
+	debug_sprintf_event(card->debug, level, text)
+
+#define QETH_CARD_TEXT_(card, level, text...) \
+	qeth_dbf_longtext(card->debug, level, text)
+
+#define SENSE_COMMAND_REJECT_BYTE 0
+#define SENSE_COMMAND_REJECT_FLAG 0x80
+#define SENSE_RESETTING_EVENT_BYTE 1
+#define SENSE_RESETTING_EVENT_FLAG 0x80
+
+/*
+ * Common IO related definitions
+ */
+#define CARD_RDEV(card) card->read.ccwdev
+#define CARD_WDEV(card) card->write.ccwdev
+#define CARD_DDEV(card) card->data.ccwdev
+#define CARD_BUS_ID(card) dev_name(&card->gdev->dev)
+#define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev)
+#define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev)
+#define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev)
+#define CHANNEL_ID(channel) dev_name(&channel->ccwdev->dev)
+
+/**
+ * card stuff
+ */
+struct qeth_perf_stats {
+	unsigned int bufs_rec;
+	unsigned int bufs_sent;
+	unsigned int buf_elements_sent;
+
+	unsigned int skbs_sent_pack;
+	unsigned int bufs_sent_pack;
+
+	unsigned int sc_dp_p;
+	unsigned int sc_p_dp;
+	/* qdio_cq_handler: number of times called, time spent in */
+	__u64 cq_start_time;
+	unsigned int cq_cnt;
+	unsigned int cq_time;
+	/* qdio_input_handler: number of times called, time spent in */
+	__u64 inbound_start_time;
+	unsigned int inbound_cnt;
+	unsigned int inbound_time;
+	/* qeth_send_packet: number of times called, time spent in */
+	__u64 outbound_start_time;
+	unsigned int outbound_cnt;
+	unsigned int outbound_time;
+	/* qdio_output_handler: number of times called, time spent in */
+	__u64 outbound_handler_start_time;
+	unsigned int outbound_handler_cnt;
+	unsigned int outbound_handler_time;
+	/* number of calls to and time spent in do_QDIO for inbound queue */
+	__u64 inbound_do_qdio_start_time;
+	unsigned int inbound_do_qdio_cnt;
+	unsigned int inbound_do_qdio_time;
+	/* number of calls to and time spent in do_QDIO for outbound queues */
+	__u64 outbound_do_qdio_start_time;
+	unsigned int outbound_do_qdio_cnt;
+	unsigned int outbound_do_qdio_time;
+	unsigned int large_send_bytes;
+	unsigned int large_send_cnt;
+	unsigned int sg_skbs_sent;
+	/* initial values when measuring starts */
+	unsigned long initial_rx_packets;
+	unsigned long initial_tx_packets;
+	/* inbound scatter gather data */
+	unsigned int sg_skbs_rx;
+	unsigned int sg_frags_rx;
+	unsigned int sg_alloc_page_rx;
+	unsigned int tx_csum;
+	unsigned int tx_lin;
+	unsigned int tx_linfail;
+	unsigned int rx_csum;
+};
+
+/* Routing stuff */
+struct qeth_routing_info {
+	enum qeth_routing_types type;
+};
+
+/* IPA stuff */
+struct qeth_ipa_info {
+	__u32 supported_funcs;
+	__u32 enabled_funcs;
+};
+
+/* SETBRIDGEPORT stuff */
+enum qeth_sbp_roles {
+	QETH_SBP_ROLE_NONE	= 0,
+	QETH_SBP_ROLE_PRIMARY	= 1,
+	QETH_SBP_ROLE_SECONDARY	= 2,
+};
+
+enum qeth_sbp_states {
+	QETH_SBP_STATE_INACTIVE	= 0,
+	QETH_SBP_STATE_STANDBY	= 1,
+	QETH_SBP_STATE_ACTIVE	= 2,
+};
+
+#define QETH_SBP_HOST_NOTIFICATION 1
+
+struct qeth_sbp_info {
+	__u32 supported_funcs;
+	enum qeth_sbp_roles role;
+	__u32 hostnotification:1;
+	__u32 reflect_promisc:1;
+	__u32 reflect_promisc_primary:1;
+};
+
+struct qeth_vnicc_info {
+	/* supported/currently configured VNICCs; updated in IPA exchanges */
+	u32 sup_chars;
+	u32 cur_chars;
+	/* supported commands: bitmasks which VNICCs support respective cmd */
+	u32 set_char_sup;
+	u32 getset_timeout_sup;
+	/* timeout value for the learning characteristic */
+	u32 learning_timeout;
+	/* characteristics wanted/configured by user */
+	u32 wanted_chars;
+	/* has user explicitly enabled rx_bcast while online? */
+	bool rx_bcast_enabled;
+};
+
+static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
+		enum qeth_ipa_funcs func)
+{
+	return (ipa->supported_funcs & func);
+}
+
+static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
+		enum qeth_ipa_funcs func)
+{
+	return (ipa->supported_funcs & ipa->enabled_funcs & func);
+}
+
+#define qeth_adp_supported(c, f) \
+	qeth_is_ipa_supported(&c->options.adp, f)
+#define qeth_adp_enabled(c, f) \
+	qeth_is_ipa_enabled(&c->options.adp, f)
+#define qeth_is_supported(c, f) \
+	qeth_is_ipa_supported(&c->options.ipa4, f)
+#define qeth_is_enabled(c, f) \
+	qeth_is_ipa_enabled(&c->options.ipa4, f)
+#define qeth_is_supported6(c, f) \
+	qeth_is_ipa_supported(&c->options.ipa6, f)
+#define qeth_is_enabled6(c, f) \
+	qeth_is_ipa_enabled(&c->options.ipa6, f)
+#define qeth_is_ipafunc_supported(c, prot, f) \
+	 ((prot == QETH_PROT_IPV6) ? \
+		qeth_is_supported6(c, f) : qeth_is_supported(c, f))
+#define qeth_is_ipafunc_enabled(c, prot, f) \
+	 ((prot == QETH_PROT_IPV6) ? \
+		qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
+
+#define QETH_IDX_FUNC_LEVEL_OSD		 0x0101
+#define QETH_IDX_FUNC_LEVEL_IQD		 0x4108
+
+#define QETH_BUFSIZE		4096
+#define CCW_CMD_WRITE		0x01
+#define CCW_CMD_READ		0x02
+
+/**
+ * some more defs
+ */
+#define QETH_TX_TIMEOUT		100 * HZ
+#define QETH_RCD_TIMEOUT	60 * HZ
+#define QETH_RECLAIM_WORK_TIME	HZ
+#define QETH_MAX_PORTNO		15
+
+/*IPv6 address autoconfiguration stuff*/
+#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
+#define UNIQUE_ID_NOT_BY_CARD		0x10000
+
+/*****************************************************************************/
+/* QDIO queue and buffer handling                                            */
+/*****************************************************************************/
+#define QETH_MAX_QUEUES 4
+#define QETH_IN_BUF_SIZE_DEFAULT 65536
+#define QETH_IN_BUF_COUNT_DEFAULT 64
+#define QETH_IN_BUF_COUNT_HSDEFAULT 128
+#define QETH_IN_BUF_COUNT_MIN 8
+#define QETH_IN_BUF_COUNT_MAX 128
+#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
+#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
+		 ((card)->qdio.in_buf_pool.buf_count / 2)
+
+/* buffers we have to be behind before we get a PCI */
+#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
+/*enqueued free buffers left before we get a PCI*/
+#define QETH_PCI_THRESHOLD_B(card) 0
+/*not used unless the microcode gets patched*/
+#define QETH_PCI_TIMER_VALUE(card) 3
+
+/* priority queing */
+#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
+#define QETH_DEFAULT_QUEUE    2
+#define QETH_NO_PRIO_QUEUEING 0
+#define QETH_PRIO_Q_ING_PREC  1
+#define QETH_PRIO_Q_ING_TOS   2
+#define QETH_PRIO_Q_ING_SKB   3
+#define QETH_PRIO_Q_ING_VLAN  4
+
+/* Packing */
+#define QETH_LOW_WATERMARK_PACK  2
+#define QETH_HIGH_WATERMARK_PACK 5
+#define QETH_WATERMARK_PACK_FUZZ 1
+
+/* large receive scatter gather copy break */
+#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
+#define QETH_RX_PULL_LEN 256
+
+struct qeth_hdr_layer3 {
+	__u8  id;
+	__u8  flags;
+	__u16 inbound_checksum; /*TSO:__u16 seqno */
+	__u32 token;		/*TSO: __u32 reserved */
+	__u16 length;
+	__u8  vlan_prio;
+	__u8  ext_flags;
+	__u16 vlan_id;
+	__u16 frame_offset;
+	union {
+		/* TX: */
+		u8 ipv6_addr[16];
+		struct ipv4 {
+			u8 res[12];
+			u32 addr;
+		} ipv4;
+		/* RX: */
+		struct rx {
+			u8 res1[2];
+			u8 src_mac[6];
+			u8 res2[4];
+			u16 vlan_id;
+			u8 res3[2];
+		} rx;
+	} next_hop;
+};
+
+struct qeth_hdr_layer2 {
+	__u8 id;
+	__u8 flags[3];
+	__u8 port_no;
+	__u8 hdr_length;
+	__u16 pkt_length;
+	__u16 seq_no;
+	__u16 vlan_id;
+	__u32 reserved;
+	__u8 reserved2[16];
+} __attribute__ ((packed));
+
+struct qeth_hdr_osn {
+	__u8 id;
+	__u8 reserved;
+	__u16 seq_no;
+	__u16 reserved2;
+	__u16 control_flags;
+	__u16 pdu_length;
+	__u8 reserved3[18];
+	__u32 ccid;
+} __attribute__ ((packed));
+
+struct qeth_hdr {
+	union {
+		struct qeth_hdr_layer2 l2;
+		struct qeth_hdr_layer3 l3;
+		struct qeth_hdr_osn    osn;
+	} hdr;
+} __attribute__ ((packed));
+
+/*TCP Segmentation Offload header*/
+struct qeth_hdr_ext_tso {
+	__u16 hdr_tot_len;
+	__u8  imb_hdr_no;
+	__u8  reserved;
+	__u8  hdr_type;
+	__u8  hdr_version;
+	__u16 hdr_len;
+	__u32 payload_len;
+	__u16 mss;
+	__u16 dg_hdr_len;
+	__u8  padding[16];
+} __attribute__ ((packed));
+
+struct qeth_hdr_tso {
+	struct qeth_hdr hdr;	/*hdr->hdr.l3.xxx*/
+	struct qeth_hdr_ext_tso ext;
+} __attribute__ ((packed));
+
+
+/* flags for qeth_hdr.flags */
+#define QETH_HDR_PASSTHRU 0x10
+#define QETH_HDR_IPV6     0x80
+#define QETH_HDR_CAST_MASK 0x07
+enum qeth_cast_flags {
+	QETH_CAST_UNICAST   = 0x06,
+	QETH_CAST_MULTICAST = 0x04,
+	QETH_CAST_BROADCAST = 0x05,
+	QETH_CAST_ANYCAST   = 0x07,
+	QETH_CAST_NOCAST    = 0x00,
+};
+
+enum qeth_layer2_frame_flags {
+	QETH_LAYER2_FLAG_MULTICAST = 0x01,
+	QETH_LAYER2_FLAG_BROADCAST = 0x02,
+	QETH_LAYER2_FLAG_UNICAST   = 0x04,
+	QETH_LAYER2_FLAG_VLAN      = 0x10,
+};
+
+enum qeth_header_ids {
+	QETH_HEADER_TYPE_LAYER3 = 0x01,
+	QETH_HEADER_TYPE_LAYER2 = 0x02,
+	QETH_HEADER_TYPE_TSO	= 0x03,
+	QETH_HEADER_TYPE_OSN    = 0x04,
+};
+/* flags for qeth_hdr.ext_flags */
+#define QETH_HDR_EXT_VLAN_FRAME       0x01
+#define QETH_HDR_EXT_TOKEN_ID         0x02
+#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04
+#define QETH_HDR_EXT_SRC_MAC_ADDR     0x08
+#define QETH_HDR_EXT_CSUM_HDR_REQ     0x10
+#define QETH_HDR_EXT_CSUM_TRANSP_REQ  0x20
+#define QETH_HDR_EXT_UDP	      0x40 /*bit off for TCP*/
+
+enum qeth_qdio_buffer_states {
+	/*
+	 * inbound: read out by driver; owned by hardware in order to be filled
+	 * outbound: owned by driver in order to be filled
+	 */
+	QETH_QDIO_BUF_EMPTY,
+	/*
+	 * inbound: filled by hardware; owned by driver in order to be read out
+	 * outbound: filled by driver; owned by hardware in order to be sent
+	 */
+	QETH_QDIO_BUF_PRIMED,
+	/*
+	 * inbound: not applicable
+	 * outbound: identified to be pending in TPQ
+	 */
+	QETH_QDIO_BUF_PENDING,
+	/*
+	 * inbound: not applicable
+	 * outbound: found in completion queue
+	 */
+	QETH_QDIO_BUF_IN_CQ,
+	/*
+	 * inbound: not applicable
+	 * outbound: handled via transfer pending / completion queue
+	 */
+	QETH_QDIO_BUF_HANDLED_DELAYED,
+};
+
+enum qeth_qdio_info_states {
+	QETH_QDIO_UNINITIALIZED,
+	QETH_QDIO_ALLOCATED,
+	QETH_QDIO_ESTABLISHED,
+	QETH_QDIO_CLEANING
+};
+
+struct qeth_buffer_pool_entry {
+	struct list_head list;
+	struct list_head init_list;
+	void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
+};
+
+struct qeth_qdio_buffer_pool {
+	struct list_head entry_list;
+	int buf_count;
+};
+
+struct qeth_qdio_buffer {
+	struct qdio_buffer *buffer;
+	/* the buffer pool entry currently associated to this buffer */
+	struct qeth_buffer_pool_entry *pool_entry;
+	struct sk_buff *rx_skb;
+};
+
+struct qeth_qdio_q {
+	struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
+	struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
+	int next_buf_to_init;
+};
+
+struct qeth_qdio_out_buffer {
+	struct qdio_buffer *buffer;
+	atomic_t state;
+	int next_element_to_fill;
+	struct sk_buff_head skb_list;
+	int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
+
+	struct qeth_qdio_out_q *q;
+	struct qeth_qdio_out_buffer *next_pending;
+};
+
+struct qeth_card;
+
+enum qeth_out_q_states {
+       QETH_OUT_Q_UNLOCKED,
+       QETH_OUT_Q_LOCKED,
+       QETH_OUT_Q_LOCKED_FLUSH,
+};
+
+struct qeth_qdio_out_q {
+	struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
+	struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
+	struct qdio_outbuf_state *bufstates; /* convenience pointer */
+	int queue_no;
+	struct qeth_card *card;
+	atomic_t state;
+	int do_pack;
+	/*
+	 * index of buffer to be filled by driver; state EMPTY or PACKING
+	 */
+	int next_buf_to_fill;
+	/*
+	 * number of buffers that are currently filled (PRIMED)
+	 * -> these buffers are hardware-owned
+	 */
+	atomic_t used_buffers;
+	/* indicates whether PCI flag must be set (or if one is outstanding) */
+	atomic_t set_pci_flags_count;
+};
+
+struct qeth_qdio_info {
+	atomic_t state;
+	/* input */
+	int no_in_queues;
+	struct qeth_qdio_q *in_q;
+	struct qeth_qdio_q *c_q;
+	struct qeth_qdio_buffer_pool in_buf_pool;
+	struct qeth_qdio_buffer_pool init_pool;
+	int in_buf_size;
+
+	/* output */
+	int no_out_queues;
+	struct qeth_qdio_out_q **out_qs;
+	struct qdio_outbuf_state *out_bufstates;
+
+	/* priority queueing */
+	int do_prio_queueing;
+	int default_out_queue;
+};
+
+/**
+ * buffer stuff for read channel
+ */
+#define QETH_CMD_BUFFER_NO	8
+
+/**
+ *  channel state machine
+ */
+enum qeth_channel_states {
+	CH_STATE_UP,
+	CH_STATE_DOWN,
+	CH_STATE_ACTIVATING,
+	CH_STATE_HALTED,
+	CH_STATE_STOPPED,
+	CH_STATE_RCD,
+	CH_STATE_RCD_DONE,
+};
+/**
+ * card state machine
+ */
+enum qeth_card_states {
+	CARD_STATE_DOWN,
+	CARD_STATE_HARDSETUP,
+	CARD_STATE_SOFTSETUP,
+	CARD_STATE_UP,
+	CARD_STATE_RECOVER,
+};
+
+/**
+ * Protocol versions
+ */
+enum qeth_prot_versions {
+	QETH_PROT_IPV4 = 0x0004,
+	QETH_PROT_IPV6 = 0x0006,
+};
+
+enum qeth_cmd_buffer_state {
+	BUF_STATE_FREE,
+	BUF_STATE_LOCKED,
+};
+
+enum qeth_cq {
+	QETH_CQ_DISABLED = 0,
+	QETH_CQ_ENABLED = 1,
+	QETH_CQ_NOTAVAILABLE = 2,
+};
+
+struct qeth_ipato {
+	bool enabled;
+	bool invert4;
+	bool invert6;
+	struct list_head entries;
+};
+
+struct qeth_channel;
+
+struct qeth_cmd_buffer {
+	enum qeth_cmd_buffer_state state;
+	struct qeth_channel *channel;
+	unsigned char *data;
+	int rc;
+	void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
+};
+
+static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
+{
+	return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
+}
+
+/**
+ * definition of a qeth channel, used for read and write
+ */
+struct qeth_channel {
+	enum qeth_channel_states state;
+	struct ccw1 *ccw;
+	spinlock_t iob_lock;
+	wait_queue_head_t wait_q;
+	struct ccw_device *ccwdev;
+/*command buffer for control data*/
+	struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
+	atomic_t irq_pending;
+	int io_buf_no;
+};
+
+/**
+ *  OSA card related definitions
+ */
+struct qeth_token {
+	__u32 issuer_rm_w;
+	__u32 issuer_rm_r;
+	__u32 cm_filter_w;
+	__u32 cm_filter_r;
+	__u32 cm_connection_w;
+	__u32 cm_connection_r;
+	__u32 ulp_filter_w;
+	__u32 ulp_filter_r;
+	__u32 ulp_connection_w;
+	__u32 ulp_connection_r;
+};
+
+struct qeth_seqno {
+	__u32 trans_hdr;
+	__u32 pdu_hdr;
+	__u32 pdu_hdr_ack;
+	__u16 ipa;
+	__u32 pkt_seqno;
+};
+
+struct qeth_reply {
+	struct list_head list;
+	wait_queue_head_t wait_q;
+	int (*callback)(struct qeth_card *, struct qeth_reply *,
+		unsigned long);
+	u32 seqno;
+	unsigned long offset;
+	atomic_t received;
+	int rc;
+	void *param;
+	struct qeth_card *card;
+	refcount_t refcnt;
+};
+
+struct qeth_card_blkt {
+	int time_total;
+	int inter_packet;
+	int inter_packet_jumbo;
+};
+
+#define QETH_BROADCAST_WITH_ECHO    0x01
+#define QETH_BROADCAST_WITHOUT_ECHO 0x02
+#define QETH_LAYER2_MAC_READ	    0x01
+#define QETH_LAYER2_MAC_REGISTERED  0x02
+struct qeth_card_info {
+	unsigned short unit_addr2;
+	unsigned short cula;
+	unsigned short chpid;
+	__u16 func_level;
+	char mcl_level[QETH_MCL_LENGTH + 1];
+	int guestlan;
+	int mac_bits;
+	enum qeth_card_types type;
+	enum qeth_link_types link_type;
+	int broadcast_capable;
+	int unique_id;
+	bool layer_enforced;
+	struct qeth_card_blkt blkt;
+	enum qeth_ipa_promisc_modes promisc_mode;
+	__u32 diagass_support;
+	__u32 hwtrap;
+};
+
+struct qeth_card_options {
+	struct qeth_routing_info route4;
+	struct qeth_ipa_info ipa4;
+	struct qeth_ipa_info adp; /*Adapter parameters*/
+	struct qeth_routing_info route6;
+	struct qeth_ipa_info ipa6;
+	struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
+	struct qeth_vnicc_info vnicc; /* VNICC options */
+	int fake_broadcast;
+	int layer2;
+	int performance_stats;
+	int rx_sg_cb;
+	enum qeth_ipa_isolation_modes isolation;
+	enum qeth_ipa_isolation_modes prev_isolation;
+	int sniffer;
+	enum qeth_cq cq;
+	char hsuid[9];
+};
+
+/*
+ * thread bits for qeth_card thread masks
+ */
+enum qeth_threads {
+	QETH_RECOVER_THREAD = 1,
+};
+
+struct qeth_osn_info {
+	int (*assist_cb)(struct net_device *dev, void *data);
+	int (*data_cb)(struct sk_buff *skb);
+};
+
+enum qeth_discipline_id {
+	QETH_DISCIPLINE_UNDETERMINED = -1,
+	QETH_DISCIPLINE_LAYER3 = 0,
+	QETH_DISCIPLINE_LAYER2 = 1,
+};
+
+struct qeth_discipline {
+	const struct device_type *devtype;
+	int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done);
+	int (*recover)(void *ptr);
+	int (*setup) (struct ccwgroup_device *);
+	void (*remove) (struct ccwgroup_device *);
+	int (*set_online) (struct ccwgroup_device *);
+	int (*set_offline) (struct ccwgroup_device *);
+	int (*freeze)(struct ccwgroup_device *);
+	int (*thaw) (struct ccwgroup_device *);
+	int (*restore)(struct ccwgroup_device *);
+	int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd);
+	int (*control_event_handler)(struct qeth_card *card,
+					struct qeth_ipa_cmd *cmd);
+};
+
+struct qeth_vlan_vid {
+	struct list_head list;
+	unsigned short vid;
+};
+
+enum qeth_addr_disposition {
+	QETH_DISP_ADDR_DELETE = 0,
+	QETH_DISP_ADDR_DO_NOTHING = 1,
+	QETH_DISP_ADDR_ADD = 2,
+};
+
+struct qeth_rx {
+	int b_count;
+	int b_index;
+	struct qdio_buffer_element *b_element;
+	int e_offset;
+	int qdio_err;
+};
+
+struct carrier_info {
+	__u8  card_type;
+	__u16 port_mode;
+	__u32 port_speed;
+};
+
+struct qeth_switch_info {
+	__u32 capabilities;
+	__u32 settings;
+};
+
+#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
+
+struct qeth_card {
+	struct list_head list;
+	enum qeth_card_states state;
+	int lan_online;
+	spinlock_t lock;
+	struct ccwgroup_device *gdev;
+	struct qeth_channel read;
+	struct qeth_channel write;
+	struct qeth_channel data;
+
+	struct net_device *dev;
+	struct net_device_stats stats;
+
+	struct qeth_card_info info;
+	struct qeth_token token;
+	struct qeth_seqno seqno;
+	struct qeth_card_options options;
+
+	wait_queue_head_t wait_q;
+	spinlock_t mclock;
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+	struct mutex vid_list_mutex;		/* vid_list */
+	struct list_head vid_list;
+	DECLARE_HASHTABLE(mac_htable, 4);
+	DECLARE_HASHTABLE(ip_htable, 4);
+	DECLARE_HASHTABLE(ip_mc_htable, 4);
+	struct work_struct kernel_thread_starter;
+	spinlock_t thread_mask_lock;
+	unsigned long thread_start_mask;
+	unsigned long thread_allowed_mask;
+	unsigned long thread_running_mask;
+	struct task_struct *recovery_task;
+	spinlock_t ip_lock;
+	struct qeth_ipato ipato;
+	struct list_head cmd_waiter_list;
+	/* QDIO buffer handling */
+	struct qeth_qdio_info qdio;
+	struct qeth_perf_stats perf_stats;
+	int read_or_write_problem;
+	struct qeth_osn_info osn_info;
+	struct qeth_discipline *discipline;
+	atomic_t force_alloc_skb;
+	struct service_level qeth_service_level;
+	struct qdio_ssqd_desc ssqd;
+	debug_info_t *debug;
+	struct mutex conf_mutex;
+	struct mutex discipline_mutex;
+	struct napi_struct napi;
+	struct qeth_rx rx;
+	struct delayed_work buffer_reclaim_work;
+	int reclaim_index;
+	struct work_struct close_dev_work;
+};
+
+struct qeth_card_list_struct {
+	struct list_head list;
+	rwlock_t rwlock;
+};
+
+struct qeth_trap_id {
+	__u16 lparnr;
+	char vmname[8];
+	__u8 chpid;
+	__u8 ssid;
+	__u16 devno;
+} __packed;
+
+/*some helper functions*/
+#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
+
+static inline bool qeth_netdev_is_registered(struct net_device *dev)
+{
+	return dev->netdev_ops != NULL;
+}
+
+static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
+					  unsigned int elements)
+{
+	unsigned int i;
+
+	for (i = 0; i < elements; i++)
+		memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
+	buf->element[14].sflags = 0;
+	buf->element[15].sflags = 0;
+}
+
+/**
+ * qeth_get_elements_for_range() -	find number of SBALEs to cover range.
+ * @start:				Start of the address range.
+ * @end:				Address after the end of the range.
+ *
+ * Returns the number of pages, and thus QDIO buffer elements, needed to cover
+ * the specified address range.
+ */
+static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
+{
+	return PFN_UP(end) - PFN_DOWN(start);
+}
+
+static inline int qeth_get_micros(void)
+{
+	return (int) (get_tod_clock() >> 12);
+}
+
+static inline int qeth_get_ip_version(struct sk_buff *skb)
+{
+	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
+	__be16 prot = veth->h_vlan_proto;
+
+	if (prot == htons(ETH_P_8021Q))
+		prot = veth->h_vlan_encapsulated_proto;
+
+	switch (prot) {
+	case htons(ETH_P_IPV6):
+		return 6;
+	case htons(ETH_P_IP):
+		return 4;
+	default:
+		return 0;
+	}
+}
+
+static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb,
+				u8 flags)
+{
+	if ((card->dev->features & NETIF_F_RXCSUM) &&
+	    (flags & QETH_HDR_EXT_CSUM_TRANSP_REQ)) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		if (card->options.performance_stats)
+			card->perf_stats.rx_csum++;
+	} else {
+		skb->ip_summed = CHECKSUM_NONE;
+	}
+}
+
+static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
+{
+	*flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
+	if ((ipv == 4 && ip_hdr(skb)->protocol == IPPROTO_UDP) ||
+	    (ipv == 6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))
+		*flags |= QETH_HDR_EXT_UDP;
+	if (ipv == 4) {
+		/* some HW requires combined L3+L4 csum offload: */
+		*flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
+		ip_hdr(skb)->check = 0;
+	}
+}
+
+static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
+		struct qeth_buffer_pool_entry *entry)
+{
+	list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
+}
+
+static inline int qeth_is_diagass_supported(struct qeth_card *card,
+		enum qeth_diags_cmds cmd)
+{
+	return card->info.diagass_support & (__u32)cmd;
+}
+
+int qeth_send_simple_setassparms_prot(struct qeth_card *card,
+				      enum qeth_ipa_funcs ipa_func,
+				      u16 cmd_code, long data,
+				      enum qeth_prot_versions prot);
+/* IPv4 variant */
+static inline int qeth_send_simple_setassparms(struct qeth_card *card,
+					       enum qeth_ipa_funcs ipa_func,
+					       u16 cmd_code, long data)
+{
+	return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code,
+						 data, QETH_PROT_IPV4);
+}
+
+static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
+						  enum qeth_ipa_funcs ipa_func,
+						  u16 cmd_code, long data)
+{
+	return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code,
+						 data, QETH_PROT_IPV6);
+}
+
+int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
+			    int ipv);
+static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card,
+							struct sk_buff *skb,
+							int ipv, int cast_type)
+{
+	if (IS_IQD(card) && cast_type != RTN_UNICAST)
+		return card->qdio.out_qs[card->qdio.no_out_queues - 1];
+	if (!card->qdio.do_prio_queueing)
+		return card->qdio.out_qs[card->qdio.default_out_queue];
+	return card->qdio.out_qs[qeth_get_priority_queue(card, skb, ipv)];
+}
+
+extern struct qeth_discipline qeth_l2_discipline;
+extern struct qeth_discipline qeth_l3_discipline;
+extern const struct attribute_group *qeth_generic_attr_groups[];
+extern const struct attribute_group *qeth_osn_attr_groups[];
+extern const struct attribute_group qeth_device_attr_group;
+extern const struct attribute_group qeth_device_blkt_group;
+extern const struct device_type qeth_generic_devtype;
+extern struct workqueue_struct *qeth_wq;
+
+int qeth_card_hw_is_reachable(struct qeth_card *);
+const char *qeth_get_cardname_short(struct qeth_card *);
+int qeth_realloc_buffer_pool(struct qeth_card *, int);
+int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
+void qeth_core_free_discipline(struct qeth_card *);
+
+/* exports for qeth discipline device drivers */
+extern struct qeth_card_list_struct qeth_core_card_list;
+extern struct kmem_cache *qeth_core_header_cache;
+extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
+
+struct net_device *qeth_clone_netdev(struct net_device *orig);
+void qeth_set_recovery_task(struct qeth_card *);
+void qeth_clear_recovery_task(struct qeth_card *);
+void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
+int qeth_threads_running(struct qeth_card *, unsigned long);
+int qeth_wait_for_threads(struct qeth_card *, unsigned long);
+int qeth_do_run_thread(struct qeth_card *, unsigned long);
+void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
+void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
+int qeth_core_hardsetup_card(struct qeth_card *);
+void qeth_print_status_message(struct qeth_card *);
+int qeth_init_qdio_queues(struct qeth_card *);
+int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
+		  int (*reply_cb)
+		  (struct qeth_card *, struct qeth_reply *, unsigned long),
+		  void *);
+struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
+			enum qeth_ipa_cmds, enum qeth_prot_versions);
+struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
+		struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
+		struct qeth_hdr **);
+void qeth_schedule_recovery(struct qeth_card *);
+int qeth_poll(struct napi_struct *napi, int budget);
+void qeth_clear_ipacmd_list(struct qeth_card *);
+int qeth_qdio_clear_card(struct qeth_card *, int);
+void qeth_clear_working_pool_list(struct qeth_card *);
+void qeth_clear_cmd_buffers(struct qeth_channel *);
+void qeth_clear_qdio_buffers(struct qeth_card *);
+void qeth_setadp_promisc_mode(struct qeth_card *);
+struct net_device_stats *qeth_get_stats(struct net_device *);
+int qeth_setadpparms_change_macaddr(struct qeth_card *);
+void qeth_tx_timeout(struct net_device *);
+void qeth_prepare_control_data(struct qeth_card *, int,
+				struct qeth_cmd_buffer *);
+void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
+void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob);
+struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
+int qeth_query_switch_attributes(struct qeth_card *card,
+				  struct qeth_switch_info *sw_info);
+int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
+	int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
+	void *reply_param);
+int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
+			 int extra_elems, int data_offset);
+int qeth_get_elements_for_frags(struct sk_buff *);
+int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
+			     struct qeth_hdr *hdr, unsigned int offset,
+			     unsigned int hd_len);
+int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
+			struct sk_buff *skb, struct qeth_hdr *hdr,
+			unsigned int offset, unsigned int hd_len,
+			int elements_needed);
+int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int qeth_core_get_sset_count(struct net_device *, int);
+void qeth_core_get_ethtool_stats(struct net_device *,
+				struct ethtool_stats *, u64 *);
+void qeth_core_get_strings(struct net_device *, u32, u8 *);
+void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
+void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
+int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
+					 struct ethtool_link_ksettings *cmd);
+int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
+int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int);
+int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
+int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
+void qeth_trace_features(struct qeth_card *);
+void qeth_close_dev(struct qeth_card *);
+int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
+			  long,
+			  int (*reply_cb)(struct qeth_card *,
+					  struct qeth_reply *, unsigned long),
+			  void *);
+int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
+struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
+						 enum qeth_ipa_funcs,
+						 __u16, __u16,
+						 enum qeth_prot_versions);
+int qeth_set_features(struct net_device *, netdev_features_t);
+void qeth_enable_hw_features(struct net_device *dev);
+netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+				      struct net_device *dev,
+				      netdev_features_t features);
+int qeth_vm_request_mac(struct qeth_card *card);
+int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
+		       struct qeth_hdr **hdr, unsigned int hdr_len,
+		       unsigned int proto_len, unsigned int *elements);
+
+/* exports for OSN */
+int qeth_osn_assist(struct net_device *, void *, int);
+int qeth_osn_register(unsigned char *read_dev_no, struct net_device **,
+		int (*assist_cb)(struct net_device *, void *),
+		int (*data_cb)(struct sk_buff *));
+void qeth_osn_deregister(struct net_device *);
+
+#endif /* __QETH_CORE_H__ */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
new file mode 100644
index 0000000..b03515d
--- /dev/null
+++ b/drivers/s390/net/qeth_core_main.c
@@ -0,0 +1,6674 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    Copyright IBM Corp. 2007, 2009
+ *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+ *		 Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "qeth"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/compat.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/mii.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <linux/netdev_features.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+
+#include <net/iucv/af_iucv.h>
+#include <net/dsfield.h>
+
+#include <asm/ebcdic.h>
+#include <asm/chpid.h>
+#include <asm/io.h>
+#include <asm/sysinfo.h>
+#include <asm/diag.h>
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+#include <asm/cpcmd.h>
+
+#include "qeth_core.h"
+
+struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
+	/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
+	/*                   N  P  A    M  L  V                      H  */
+	[QETH_DBF_SETUP] = {"qeth_setup",
+				8, 1,   8, 5, &debug_hex_ascii_view, NULL},
+	[QETH_DBF_MSG]	 = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
+			    &debug_sprintf_view, NULL},
+	[QETH_DBF_CTRL]  = {"qeth_control",
+		8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
+};
+EXPORT_SYMBOL_GPL(qeth_dbf);
+
+struct qeth_card_list_struct qeth_core_card_list;
+EXPORT_SYMBOL_GPL(qeth_core_card_list);
+struct kmem_cache *qeth_core_header_cache;
+EXPORT_SYMBOL_GPL(qeth_core_header_cache);
+static struct kmem_cache *qeth_qdio_outbuf_cache;
+
+static struct device *qeth_core_root_dev;
+static struct lock_class_key qdio_out_skb_queue_key;
+static struct mutex qeth_mod_mutex;
+
+static void qeth_send_control_data_cb(struct qeth_channel *,
+			struct qeth_cmd_buffer *);
+static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
+static void qeth_free_buffer_pool(struct qeth_card *);
+static int qeth_qdio_establish(struct qeth_card *);
+static void qeth_free_qdio_buffers(struct qeth_card *);
+static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
+		struct qeth_qdio_out_buffer *buf,
+		enum iucv_tx_notify notification);
+static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
+static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
+
+struct workqueue_struct *qeth_wq;
+EXPORT_SYMBOL_GPL(qeth_wq);
+
+int qeth_card_hw_is_reachable(struct qeth_card *card)
+{
+	return (card->state == CARD_STATE_SOFTSETUP) ||
+		(card->state == CARD_STATE_UP);
+}
+EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable);
+
+static void qeth_close_dev_handler(struct work_struct *work)
+{
+	struct qeth_card *card;
+
+	card = container_of(work, struct qeth_card, close_dev_work);
+	QETH_CARD_TEXT(card, 2, "cldevhdl");
+	rtnl_lock();
+	dev_close(card->dev);
+	rtnl_unlock();
+	ccwgroup_set_offline(card->gdev);
+}
+
+void qeth_close_dev(struct qeth_card *card)
+{
+	QETH_CARD_TEXT(card, 2, "cldevsubm");
+	queue_work(qeth_wq, &card->close_dev_work);
+}
+EXPORT_SYMBOL_GPL(qeth_close_dev);
+
+static const char *qeth_get_cardname(struct qeth_card *card)
+{
+	if (card->info.guestlan) {
+		switch (card->info.type) {
+		case QETH_CARD_TYPE_OSD:
+			return " Virtual NIC QDIO";
+		case QETH_CARD_TYPE_IQD:
+			return " Virtual NIC Hiper";
+		case QETH_CARD_TYPE_OSM:
+			return " Virtual NIC QDIO - OSM";
+		case QETH_CARD_TYPE_OSX:
+			return " Virtual NIC QDIO - OSX";
+		default:
+			return " unknown";
+		}
+	} else {
+		switch (card->info.type) {
+		case QETH_CARD_TYPE_OSD:
+			return " OSD Express";
+		case QETH_CARD_TYPE_IQD:
+			return " HiperSockets";
+		case QETH_CARD_TYPE_OSN:
+			return " OSN QDIO";
+		case QETH_CARD_TYPE_OSM:
+			return " OSM QDIO";
+		case QETH_CARD_TYPE_OSX:
+			return " OSX QDIO";
+		default:
+			return " unknown";
+		}
+	}
+	return " n/a";
+}
+
+/* max length to be returned: 14 */
+const char *qeth_get_cardname_short(struct qeth_card *card)
+{
+	if (card->info.guestlan) {
+		switch (card->info.type) {
+		case QETH_CARD_TYPE_OSD:
+			return "Virt.NIC QDIO";
+		case QETH_CARD_TYPE_IQD:
+			return "Virt.NIC Hiper";
+		case QETH_CARD_TYPE_OSM:
+			return "Virt.NIC OSM";
+		case QETH_CARD_TYPE_OSX:
+			return "Virt.NIC OSX";
+		default:
+			return "unknown";
+		}
+	} else {
+		switch (card->info.type) {
+		case QETH_CARD_TYPE_OSD:
+			switch (card->info.link_type) {
+			case QETH_LINK_TYPE_FAST_ETH:
+				return "OSD_100";
+			case QETH_LINK_TYPE_HSTR:
+				return "HSTR";
+			case QETH_LINK_TYPE_GBIT_ETH:
+				return "OSD_1000";
+			case QETH_LINK_TYPE_10GBIT_ETH:
+				return "OSD_10GIG";
+			case QETH_LINK_TYPE_LANE_ETH100:
+				return "OSD_FE_LANE";
+			case QETH_LINK_TYPE_LANE_TR:
+				return "OSD_TR_LANE";
+			case QETH_LINK_TYPE_LANE_ETH1000:
+				return "OSD_GbE_LANE";
+			case QETH_LINK_TYPE_LANE:
+				return "OSD_ATM_LANE";
+			default:
+				return "OSD_Express";
+			}
+		case QETH_CARD_TYPE_IQD:
+			return "HiperSockets";
+		case QETH_CARD_TYPE_OSN:
+			return "OSN";
+		case QETH_CARD_TYPE_OSM:
+			return "OSM_1000";
+		case QETH_CARD_TYPE_OSX:
+			return "OSX_10GIG";
+		default:
+			return "unknown";
+		}
+	}
+	return "n/a";
+}
+
+void qeth_set_recovery_task(struct qeth_card *card)
+{
+	card->recovery_task = current;
+}
+EXPORT_SYMBOL_GPL(qeth_set_recovery_task);
+
+void qeth_clear_recovery_task(struct qeth_card *card)
+{
+	card->recovery_task = NULL;
+}
+EXPORT_SYMBOL_GPL(qeth_clear_recovery_task);
+
+static bool qeth_is_recovery_task(const struct qeth_card *card)
+{
+	return card->recovery_task == current;
+}
+
+void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
+			 int clear_start_mask)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&card->thread_mask_lock, flags);
+	card->thread_allowed_mask = threads;
+	if (clear_start_mask)
+		card->thread_start_mask &= threads;
+	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+	wake_up(&card->wait_q);
+}
+EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
+
+int qeth_threads_running(struct qeth_card *card, unsigned long threads)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&card->thread_mask_lock, flags);
+	rc = (card->thread_running_mask & threads);
+	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_threads_running);
+
+int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
+{
+	if (qeth_is_recovery_task(card))
+		return 0;
+	return wait_event_interruptible(card->wait_q,
+			qeth_threads_running(card, threads) == 0);
+}
+EXPORT_SYMBOL_GPL(qeth_wait_for_threads);
+
+void qeth_clear_working_pool_list(struct qeth_card *card)
+{
+	struct qeth_buffer_pool_entry *pool_entry, *tmp;
+
+	QETH_CARD_TEXT(card, 5, "clwrklst");
+	list_for_each_entry_safe(pool_entry, tmp,
+			    &card->qdio.in_buf_pool.entry_list, list){
+			list_del(&pool_entry->list);
+	}
+}
+EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
+
+static int qeth_alloc_buffer_pool(struct qeth_card *card)
+{
+	struct qeth_buffer_pool_entry *pool_entry;
+	void *ptr;
+	int i, j;
+
+	QETH_CARD_TEXT(card, 5, "alocpool");
+	for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
+		pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
+		if (!pool_entry) {
+			qeth_free_buffer_pool(card);
+			return -ENOMEM;
+		}
+		for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
+			ptr = (void *) __get_free_page(GFP_KERNEL);
+			if (!ptr) {
+				while (j > 0)
+					free_page((unsigned long)
+						  pool_entry->elements[--j]);
+				kfree(pool_entry);
+				qeth_free_buffer_pool(card);
+				return -ENOMEM;
+			}
+			pool_entry->elements[j] = ptr;
+		}
+		list_add(&pool_entry->init_list,
+			 &card->qdio.init_pool.entry_list);
+	}
+	return 0;
+}
+
+int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
+{
+	QETH_CARD_TEXT(card, 2, "realcbp");
+
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER))
+		return -EPERM;
+
+	/* TODO: steel/add buffers from/to a running card's buffer pool (?) */
+	qeth_clear_working_pool_list(card);
+	qeth_free_buffer_pool(card);
+	card->qdio.in_buf_pool.buf_count = bufcnt;
+	card->qdio.init_pool.buf_count = bufcnt;
+	return qeth_alloc_buffer_pool(card);
+}
+EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
+
+static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
+{
+	if (!q)
+		return;
+
+	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
+	kfree(q);
+}
+
+static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
+{
+	struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
+	int i;
+
+	if (!q)
+		return NULL;
+
+	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
+		kfree(q);
+		return NULL;
+	}
+
+	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
+		q->bufs[i].buffer = q->qdio_bufs[i];
+
+	QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
+	return q;
+}
+
+static int qeth_cq_init(struct qeth_card *card)
+{
+	int rc;
+
+	if (card->options.cq == QETH_CQ_ENABLED) {
+		QETH_DBF_TEXT(SETUP, 2, "cqinit");
+		qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
+				   QDIO_MAX_BUFFERS_PER_Q);
+		card->qdio.c_q->next_buf_to_init = 127;
+		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
+			     card->qdio.no_in_queues - 1, 0,
+			     127);
+		if (rc) {
+			QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+			goto out;
+		}
+	}
+	rc = 0;
+out:
+	return rc;
+}
+
+static int qeth_alloc_cq(struct qeth_card *card)
+{
+	int rc;
+
+	if (card->options.cq == QETH_CQ_ENABLED) {
+		int i;
+		struct qdio_outbuf_state *outbuf_states;
+
+		QETH_DBF_TEXT(SETUP, 2, "cqon");
+		card->qdio.c_q = qeth_alloc_qdio_queue();
+		if (!card->qdio.c_q) {
+			rc = -1;
+			goto kmsg_out;
+		}
+		card->qdio.no_in_queues = 2;
+		card->qdio.out_bufstates =
+			kcalloc(card->qdio.no_out_queues *
+					QDIO_MAX_BUFFERS_PER_Q,
+				sizeof(struct qdio_outbuf_state),
+				GFP_KERNEL);
+		outbuf_states = card->qdio.out_bufstates;
+		if (outbuf_states == NULL) {
+			rc = -1;
+			goto free_cq_out;
+		}
+		for (i = 0; i < card->qdio.no_out_queues; ++i) {
+			card->qdio.out_qs[i]->bufstates = outbuf_states;
+			outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
+		}
+	} else {
+		QETH_DBF_TEXT(SETUP, 2, "nocq");
+		card->qdio.c_q = NULL;
+		card->qdio.no_in_queues = 1;
+	}
+	QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues);
+	rc = 0;
+out:
+	return rc;
+free_cq_out:
+	qeth_free_qdio_queue(card->qdio.c_q);
+	card->qdio.c_q = NULL;
+kmsg_out:
+	dev_err(&card->gdev->dev, "Failed to create completion queue\n");
+	goto out;
+}
+
+static void qeth_free_cq(struct qeth_card *card)
+{
+	if (card->qdio.c_q) {
+		--card->qdio.no_in_queues;
+		qeth_free_qdio_queue(card->qdio.c_q);
+		card->qdio.c_q = NULL;
+	}
+	kfree(card->qdio.out_bufstates);
+	card->qdio.out_bufstates = NULL;
+}
+
+static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
+							int delayed)
+{
+	enum iucv_tx_notify n;
+
+	switch (sbalf15) {
+	case 0:
+		n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
+		break;
+	case 4:
+	case 16:
+	case 17:
+	case 18:
+		n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
+			TX_NOTIFY_UNREACHABLE;
+		break;
+	default:
+		n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
+			TX_NOTIFY_GENERALERROR;
+		break;
+	}
+
+	return n;
+}
+
+static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
+					 int forced_cleanup)
+{
+	if (q->card->options.cq != QETH_CQ_ENABLED)
+		return;
+
+	if (q->bufs[bidx]->next_pending != NULL) {
+		struct qeth_qdio_out_buffer *head = q->bufs[bidx];
+		struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
+
+		while (c) {
+			if (forced_cleanup ||
+			    atomic_read(&c->state) ==
+			      QETH_QDIO_BUF_HANDLED_DELAYED) {
+				struct qeth_qdio_out_buffer *f = c;
+				QETH_CARD_TEXT(f->q->card, 5, "fp");
+				QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
+				/* release here to avoid interleaving between
+				   outbound tasklet and inbound tasklet
+				   regarding notifications and lifecycle */
+				qeth_release_skbs(c);
+
+				c = f->next_pending;
+				WARN_ON_ONCE(head->next_pending != f);
+				head->next_pending = c;
+				kmem_cache_free(qeth_qdio_outbuf_cache, f);
+			} else {
+				head = c;
+				c = c->next_pending;
+			}
+
+		}
+	}
+	if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
+					QETH_QDIO_BUF_HANDLED_DELAYED)) {
+		/* for recovery situations */
+		qeth_init_qdio_out_buf(q, bidx);
+		QETH_CARD_TEXT(q->card, 2, "clprecov");
+	}
+}
+
+
+static void qeth_qdio_handle_aob(struct qeth_card *card,
+				 unsigned long phys_aob_addr)
+{
+	struct qaob *aob;
+	struct qeth_qdio_out_buffer *buffer;
+	enum iucv_tx_notify notification;
+	unsigned int i;
+
+	aob = (struct qaob *) phys_to_virt(phys_aob_addr);
+	QETH_CARD_TEXT(card, 5, "haob");
+	QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
+	buffer = (struct qeth_qdio_out_buffer *) aob->user1;
+	QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
+
+	if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
+			   QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
+		notification = TX_NOTIFY_OK;
+	} else {
+		WARN_ON_ONCE(atomic_read(&buffer->state) !=
+							QETH_QDIO_BUF_PENDING);
+		atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
+		notification = TX_NOTIFY_DELAYED_OK;
+	}
+
+	if (aob->aorc != 0)  {
+		QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
+		notification = qeth_compute_cq_notification(aob->aorc, 1);
+	}
+	qeth_notify_skbs(buffer->q, buffer, notification);
+
+	/* Free dangling allocations. The attached skbs are handled by
+	 * qeth_cleanup_handled_pending().
+	 */
+	for (i = 0;
+	     i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
+	     i++) {
+		if (aob->sba[i] && buffer->is_header[i])
+			kmem_cache_free(qeth_core_header_cache,
+					(void *) aob->sba[i]);
+	}
+	atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
+
+	qdio_release_aob(aob);
+}
+
+static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
+{
+	return card->options.cq == QETH_CQ_ENABLED &&
+	    card->qdio.c_q != NULL &&
+	    queue != 0 &&
+	    queue == card->qdio.no_in_queues - 1;
+}
+
+static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u32 len, void *data)
+{
+	ccw->cmd_code = cmd_code;
+	ccw->flags = CCW_FLAG_SLI;
+	ccw->count = len;
+	ccw->cda = (__u32) __pa(data);
+}
+
+static int __qeth_issue_next_read(struct qeth_card *card)
+{
+	struct qeth_channel *channel = &card->read;
+	struct qeth_cmd_buffer *iob;
+	int rc;
+
+	QETH_CARD_TEXT(card, 5, "issnxrd");
+	if (channel->state != CH_STATE_UP)
+		return -EIO;
+	iob = qeth_get_buffer(channel);
+	if (!iob) {
+		dev_warn(&card->gdev->dev, "The qeth device driver "
+			"failed to recover an error on the device\n");
+		QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob "
+			"available\n", dev_name(&card->gdev->dev));
+		return -ENOMEM;
+	}
+	qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
+	QETH_CARD_TEXT(card, 6, "noirqpnd");
+	rc = ccw_device_start(channel->ccwdev, channel->ccw,
+			      (addr_t) iob, 0, 0);
+	if (rc) {
+		QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
+			"rc=%i\n", dev_name(&card->gdev->dev), rc);
+		atomic_set(&channel->irq_pending, 0);
+		card->read_or_write_problem = 1;
+		qeth_schedule_recovery(card);
+		wake_up(&card->wait_q);
+	}
+	return rc;
+}
+
+static int qeth_issue_next_read(struct qeth_card *card)
+{
+	int ret;
+
+	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
+	ret = __qeth_issue_next_read(card);
+	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
+
+	return ret;
+}
+
+static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
+{
+	struct qeth_reply *reply;
+
+	reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
+	if (reply) {
+		refcount_set(&reply->refcnt, 1);
+		atomic_set(&reply->received, 0);
+		reply->card = card;
+	}
+	return reply;
+}
+
+static void qeth_get_reply(struct qeth_reply *reply)
+{
+	refcount_inc(&reply->refcnt);
+}
+
+static void qeth_put_reply(struct qeth_reply *reply)
+{
+	if (refcount_dec_and_test(&reply->refcnt))
+		kfree(reply);
+}
+
+static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
+		struct qeth_card *card)
+{
+	const char *ipa_name;
+	int com = cmd->hdr.command;
+	ipa_name = qeth_get_ipa_cmd_name(com);
+	if (rc)
+		QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
+				"x%X \"%s\"\n",
+				ipa_name, com, dev_name(&card->gdev->dev),
+				QETH_CARD_IFNAME(card), rc,
+				qeth_get_ipa_msg(rc));
+	else
+		QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
+				ipa_name, com, dev_name(&card->gdev->dev),
+				QETH_CARD_IFNAME(card));
+}
+
+static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
+		struct qeth_cmd_buffer *iob)
+{
+	struct qeth_ipa_cmd *cmd = NULL;
+
+	QETH_CARD_TEXT(card, 5, "chkipad");
+	if (IS_IPA(iob->data)) {
+		cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
+		if (IS_IPA_REPLY(cmd)) {
+			if (cmd->hdr.command != IPA_CMD_SETCCID &&
+			    cmd->hdr.command != IPA_CMD_DELCCID &&
+			    cmd->hdr.command != IPA_CMD_MODCCID &&
+			    cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
+				qeth_issue_ipa_msg(cmd,
+						cmd->hdr.return_code, card);
+			return cmd;
+		} else {
+			switch (cmd->hdr.command) {
+			case IPA_CMD_STOPLAN:
+				if (cmd->hdr.return_code ==
+						IPA_RC_VEPA_TO_VEB_TRANSITION) {
+					dev_err(&card->gdev->dev,
+					   "Interface %s is down because the "
+					   "adjacent port is no longer in "
+					   "reflective relay mode\n",
+					   QETH_CARD_IFNAME(card));
+					qeth_close_dev(card);
+				} else {
+					dev_warn(&card->gdev->dev,
+					   "The link for interface %s on CHPID"
+					   " 0x%X failed\n",
+					   QETH_CARD_IFNAME(card),
+					   card->info.chpid);
+					qeth_issue_ipa_msg(cmd,
+						cmd->hdr.return_code, card);
+				}
+				card->lan_online = 0;
+				netif_carrier_off(card->dev);
+				return NULL;
+			case IPA_CMD_STARTLAN:
+				dev_info(&card->gdev->dev,
+					   "The link for %s on CHPID 0x%X has"
+					   " been restored\n",
+					   QETH_CARD_IFNAME(card),
+					   card->info.chpid);
+				netif_carrier_on(card->dev);
+				card->lan_online = 1;
+				if (card->info.hwtrap)
+					card->info.hwtrap = 2;
+				qeth_schedule_recovery(card);
+				return NULL;
+			case IPA_CMD_SETBRIDGEPORT_IQD:
+			case IPA_CMD_SETBRIDGEPORT_OSA:
+			case IPA_CMD_ADDRESS_CHANGE_NOTIF:
+				if (card->discipline->control_event_handler
+								(card, cmd))
+					return cmd;
+				else
+					return NULL;
+			case IPA_CMD_MODCCID:
+				return cmd;
+			case IPA_CMD_REGISTER_LOCAL_ADDR:
+				QETH_CARD_TEXT(card, 3, "irla");
+				break;
+			case IPA_CMD_UNREGISTER_LOCAL_ADDR:
+				QETH_CARD_TEXT(card, 3, "urla");
+				break;
+			default:
+				QETH_DBF_MESSAGE(2, "Received data is IPA "
+					   "but not a reply!\n");
+				break;
+			}
+		}
+	}
+	return cmd;
+}
+
+void qeth_clear_ipacmd_list(struct qeth_card *card)
+{
+	struct qeth_reply *reply, *r;
+	unsigned long flags;
+
+	QETH_CARD_TEXT(card, 4, "clipalst");
+
+	spin_lock_irqsave(&card->lock, flags);
+	list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
+		qeth_get_reply(reply);
+		reply->rc = -EIO;
+		atomic_inc(&reply->received);
+		list_del_init(&reply->list);
+		wake_up(&reply->wait_q);
+		qeth_put_reply(reply);
+	}
+	spin_unlock_irqrestore(&card->lock, flags);
+}
+EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
+
+static int qeth_check_idx_response(struct qeth_card *card,
+	unsigned char *buffer)
+{
+	if (!buffer)
+		return 0;
+
+	QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
+	if ((buffer[2] & 0xc0) == 0xc0) {
+		QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#02x\n",
+				 buffer[4]);
+		QETH_CARD_TEXT(card, 2, "ckidxres");
+		QETH_CARD_TEXT(card, 2, " idxterm");
+		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
+		if (buffer[4] == 0xf6) {
+			dev_err(&card->gdev->dev,
+			"The qeth device is not configured "
+			"for the OSI layer required by z/VM\n");
+			return -EPERM;
+		}
+		return -EIO;
+	}
+	return 0;
+}
+
+static struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *)
+		dev_get_drvdata(&cdev->dev))->dev);
+	return card;
+}
+
+static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
+{
+	__u8 index;
+
+	QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff");
+	index = channel->io_buf_no;
+	do {
+		if (channel->iob[index].state == BUF_STATE_FREE) {
+			channel->iob[index].state = BUF_STATE_LOCKED;
+			channel->io_buf_no = (channel->io_buf_no + 1) %
+				QETH_CMD_BUFFER_NO;
+			memset(channel->iob[index].data, 0, QETH_BUFSIZE);
+			return channel->iob + index;
+		}
+		index = (index + 1) % QETH_CMD_BUFFER_NO;
+	} while (index != channel->io_buf_no);
+
+	return NULL;
+}
+
+void qeth_release_buffer(struct qeth_channel *channel,
+		struct qeth_cmd_buffer *iob)
+{
+	unsigned long flags;
+
+	QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff");
+	spin_lock_irqsave(&channel->iob_lock, flags);
+	memset(iob->data, 0, QETH_BUFSIZE);
+	iob->state = BUF_STATE_FREE;
+	iob->callback = qeth_send_control_data_cb;
+	iob->rc = 0;
+	spin_unlock_irqrestore(&channel->iob_lock, flags);
+	wake_up(&channel->wait_q);
+}
+EXPORT_SYMBOL_GPL(qeth_release_buffer);
+
+static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
+{
+	struct qeth_cmd_buffer *buffer = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&channel->iob_lock, flags);
+	buffer = __qeth_get_buffer(channel);
+	spin_unlock_irqrestore(&channel->iob_lock, flags);
+	return buffer;
+}
+
+struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel)
+{
+	struct qeth_cmd_buffer *buffer;
+	wait_event(channel->wait_q,
+		   ((buffer = qeth_get_buffer(channel)) != NULL));
+	return buffer;
+}
+EXPORT_SYMBOL_GPL(qeth_wait_for_buffer);
+
+void qeth_clear_cmd_buffers(struct qeth_channel *channel)
+{
+	int cnt;
+
+	for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
+		qeth_release_buffer(channel, &channel->iob[cnt]);
+	channel->io_buf_no = 0;
+}
+EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
+
+static void qeth_send_control_data_cb(struct qeth_channel *channel,
+		  struct qeth_cmd_buffer *iob)
+{
+	struct qeth_card *card;
+	struct qeth_reply *reply, *r;
+	struct qeth_ipa_cmd *cmd;
+	unsigned long flags;
+	int keep_reply;
+	int rc = 0;
+
+	card = CARD_FROM_CDEV(channel->ccwdev);
+	QETH_CARD_TEXT(card, 4, "sndctlcb");
+	rc = qeth_check_idx_response(card, iob->data);
+	switch (rc) {
+	case 0:
+		break;
+	case -EIO:
+		qeth_clear_ipacmd_list(card);
+		qeth_schedule_recovery(card);
+		/* fall through */
+	default:
+		goto out;
+	}
+
+	cmd = qeth_check_ipa_data(card, iob);
+	if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
+		goto out;
+	/*in case of OSN : check if cmd is set */
+	if (card->info.type == QETH_CARD_TYPE_OSN &&
+	    cmd &&
+	    cmd->hdr.command != IPA_CMD_STARTLAN &&
+	    card->osn_info.assist_cb != NULL) {
+		card->osn_info.assist_cb(card->dev, cmd);
+		goto out;
+	}
+
+	spin_lock_irqsave(&card->lock, flags);
+	list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
+		if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
+		    ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
+			qeth_get_reply(reply);
+			list_del_init(&reply->list);
+			spin_unlock_irqrestore(&card->lock, flags);
+			keep_reply = 0;
+			if (reply->callback != NULL) {
+				if (cmd) {
+					reply->offset = (__u16)((char *)cmd -
+							(char *)iob->data);
+					keep_reply = reply->callback(card,
+							reply,
+							(unsigned long)cmd);
+				} else
+					keep_reply = reply->callback(card,
+							reply,
+							(unsigned long)iob);
+			}
+			if (cmd)
+				reply->rc = (u16) cmd->hdr.return_code;
+			else if (iob->rc)
+				reply->rc = iob->rc;
+			if (keep_reply) {
+				spin_lock_irqsave(&card->lock, flags);
+				list_add_tail(&reply->list,
+					      &card->cmd_waiter_list);
+				spin_unlock_irqrestore(&card->lock, flags);
+			} else {
+				atomic_inc(&reply->received);
+				wake_up(&reply->wait_q);
+			}
+			qeth_put_reply(reply);
+			goto out;
+		}
+	}
+	spin_unlock_irqrestore(&card->lock, flags);
+out:
+	memcpy(&card->seqno.pdu_hdr_ack,
+		QETH_PDU_HEADER_SEQ_NO(iob->data),
+		QETH_SEQ_NO_LENGTH);
+	qeth_release_buffer(channel, iob);
+}
+
+static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
+{
+	int cnt;
+
+	QETH_DBF_TEXT(SETUP, 2, "setupch");
+
+	channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+	if (!channel->ccw)
+		return -ENOMEM;
+	channel->state = CH_STATE_DOWN;
+	atomic_set(&channel->irq_pending, 0);
+	init_waitqueue_head(&channel->wait_q);
+
+	if (!alloc_buffers)
+		return 0;
+
+	for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
+		channel->iob[cnt].data =
+			kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
+		if (channel->iob[cnt].data == NULL)
+			break;
+		channel->iob[cnt].state = BUF_STATE_FREE;
+		channel->iob[cnt].channel = channel;
+		channel->iob[cnt].callback = qeth_send_control_data_cb;
+		channel->iob[cnt].rc = 0;
+	}
+	if (cnt < QETH_CMD_BUFFER_NO) {
+		kfree(channel->ccw);
+		while (cnt-- > 0)
+			kfree(channel->iob[cnt].data);
+		return -ENOMEM;
+	}
+	channel->io_buf_no = 0;
+	spin_lock_init(&channel->iob_lock);
+
+	return 0;
+}
+
+static int qeth_set_thread_start_bit(struct qeth_card *card,
+		unsigned long thread)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&card->thread_mask_lock, flags);
+	if (!(card->thread_allowed_mask & thread) ||
+	      (card->thread_start_mask & thread)) {
+		spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+		return -EPERM;
+	}
+	card->thread_start_mask |= thread;
+	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+	return 0;
+}
+
+void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&card->thread_mask_lock, flags);
+	card->thread_start_mask &= ~thread;
+	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+	wake_up(&card->wait_q);
+}
+EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
+
+void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&card->thread_mask_lock, flags);
+	card->thread_running_mask &= ~thread;
+	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+	wake_up_all(&card->wait_q);
+}
+EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
+
+static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&card->thread_mask_lock, flags);
+	if (card->thread_start_mask & thread) {
+		if ((card->thread_allowed_mask & thread) &&
+		    !(card->thread_running_mask & thread)) {
+			rc = 1;
+			card->thread_start_mask &= ~thread;
+			card->thread_running_mask |= thread;
+		} else
+			rc = -EPERM;
+	}
+	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+	return rc;
+}
+
+int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
+{
+	int rc = 0;
+
+	wait_event(card->wait_q,
+		   (rc = __qeth_do_run_thread(card, thread)) >= 0);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_do_run_thread);
+
+void qeth_schedule_recovery(struct qeth_card *card)
+{
+	QETH_CARD_TEXT(card, 2, "startrec");
+	if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
+		schedule_work(&card->kernel_thread_starter);
+}
+EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
+
+static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
+{
+	int dstat, cstat;
+	char *sense;
+	struct qeth_card *card;
+
+	sense = (char *) irb->ecw;
+	cstat = irb->scsw.cmd.cstat;
+	dstat = irb->scsw.cmd.dstat;
+	card = CARD_FROM_CDEV(cdev);
+
+	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
+		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
+		     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
+		QETH_CARD_TEXT(card, 2, "CGENCHK");
+		dev_warn(&cdev->dev, "The qeth device driver "
+			"failed to recover an error on the device\n");
+		QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
+			dev_name(&cdev->dev), dstat, cstat);
+		print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
+				16, 1, irb, 64, 1);
+		return 1;
+	}
+
+	if (dstat & DEV_STAT_UNIT_CHECK) {
+		if (sense[SENSE_RESETTING_EVENT_BYTE] &
+		    SENSE_RESETTING_EVENT_FLAG) {
+			QETH_CARD_TEXT(card, 2, "REVIND");
+			return 1;
+		}
+		if (sense[SENSE_COMMAND_REJECT_BYTE] &
+		    SENSE_COMMAND_REJECT_FLAG) {
+			QETH_CARD_TEXT(card, 2, "CMDREJi");
+			return 1;
+		}
+		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
+			QETH_CARD_TEXT(card, 2, "AFFE");
+			return 1;
+		}
+		if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
+			QETH_CARD_TEXT(card, 2, "ZEROSEN");
+			return 0;
+		}
+		QETH_CARD_TEXT(card, 2, "DGENCHK");
+			return 1;
+	}
+	return 0;
+}
+
+static long __qeth_check_irb_error(struct ccw_device *cdev,
+		unsigned long intparm, struct irb *irb)
+{
+	struct qeth_card *card;
+
+	card = CARD_FROM_CDEV(cdev);
+
+	if (!card || !IS_ERR(irb))
+		return 0;
+
+	switch (PTR_ERR(irb)) {
+	case -EIO:
+		QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
+			dev_name(&cdev->dev));
+		QETH_CARD_TEXT(card, 2, "ckirberr");
+		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
+		break;
+	case -ETIMEDOUT:
+		dev_warn(&cdev->dev, "A hardware operation timed out"
+			" on the device\n");
+		QETH_CARD_TEXT(card, 2, "ckirberr");
+		QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
+		if (intparm == QETH_RCD_PARM) {
+			if (card->data.ccwdev == cdev) {
+				card->data.state = CH_STATE_DOWN;
+				wake_up(&card->wait_q);
+			}
+		}
+		break;
+	default:
+		QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
+			dev_name(&cdev->dev), PTR_ERR(irb));
+		QETH_CARD_TEXT(card, 2, "ckirberr");
+		QETH_CARD_TEXT(card, 2, "  rc???");
+	}
+	return PTR_ERR(irb);
+}
+
+static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+		struct irb *irb)
+{
+	int rc;
+	int cstat, dstat;
+	struct qeth_cmd_buffer *iob = NULL;
+	struct qeth_channel *channel;
+	struct qeth_card *card;
+
+	card = CARD_FROM_CDEV(cdev);
+	if (!card)
+		return;
+
+	QETH_CARD_TEXT(card, 5, "irq");
+
+	if (card->read.ccwdev == cdev) {
+		channel = &card->read;
+		QETH_CARD_TEXT(card, 5, "read");
+	} else if (card->write.ccwdev == cdev) {
+		channel = &card->write;
+		QETH_CARD_TEXT(card, 5, "write");
+	} else {
+		channel = &card->data;
+		QETH_CARD_TEXT(card, 5, "data");
+	}
+
+	if (qeth_intparm_is_iob(intparm))
+		iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
+
+	if (__qeth_check_irb_error(cdev, intparm, irb)) {
+		/* IO was terminated, free its resources. */
+		if (iob)
+			qeth_release_buffer(iob->channel, iob);
+		atomic_set(&channel->irq_pending, 0);
+		wake_up(&card->wait_q);
+		return;
+	}
+
+	atomic_set(&channel->irq_pending, 0);
+
+	if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
+		channel->state = CH_STATE_STOPPED;
+
+	if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
+		channel->state = CH_STATE_HALTED;
+
+	/*let's wake up immediately on data channel*/
+	if ((channel == &card->data) && (intparm != 0) &&
+	    (intparm != QETH_RCD_PARM))
+		goto out;
+
+	if (intparm == QETH_CLEAR_CHANNEL_PARM) {
+		QETH_CARD_TEXT(card, 6, "clrchpar");
+		/* we don't have to handle this further */
+		intparm = 0;
+	}
+	if (intparm == QETH_HALT_CHANNEL_PARM) {
+		QETH_CARD_TEXT(card, 6, "hltchpar");
+		/* we don't have to handle this further */
+		intparm = 0;
+	}
+
+	cstat = irb->scsw.cmd.cstat;
+	dstat = irb->scsw.cmd.dstat;
+
+	if ((dstat & DEV_STAT_UNIT_EXCEP) ||
+	    (dstat & DEV_STAT_UNIT_CHECK) ||
+	    (cstat)) {
+		if (irb->esw.esw0.erw.cons) {
+			dev_warn(&channel->ccwdev->dev,
+				"The qeth device driver failed to recover "
+				"an error on the device\n");
+			QETH_DBF_MESSAGE(2, "%s sense data available. cstat "
+				"0x%X dstat 0x%X\n",
+				dev_name(&channel->ccwdev->dev), cstat, dstat);
+			print_hex_dump(KERN_WARNING, "qeth: irb ",
+				DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
+			print_hex_dump(KERN_WARNING, "qeth: sense data ",
+				DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
+		}
+		if (intparm == QETH_RCD_PARM) {
+			channel->state = CH_STATE_DOWN;
+			goto out;
+		}
+		rc = qeth_get_problem(cdev, irb);
+		if (rc) {
+			card->read_or_write_problem = 1;
+			qeth_clear_ipacmd_list(card);
+			qeth_schedule_recovery(card);
+			goto out;
+		}
+	}
+
+	if (intparm == QETH_RCD_PARM) {
+		channel->state = CH_STATE_RCD_DONE;
+		goto out;
+	}
+	if (channel == &card->data)
+		return;
+	if (channel == &card->read &&
+	    channel->state == CH_STATE_UP)
+		__qeth_issue_next_read(card);
+
+	if (iob && iob->callback)
+		iob->callback(iob->channel, iob);
+
+out:
+	wake_up(&card->wait_q);
+	return;
+}
+
+static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
+		struct qeth_qdio_out_buffer *buf,
+		enum iucv_tx_notify notification)
+{
+	struct sk_buff *skb;
+
+	if (skb_queue_empty(&buf->skb_list))
+		goto out;
+	skb = skb_peek(&buf->skb_list);
+	while (skb) {
+		QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
+		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
+		if (be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) {
+			if (skb->sk) {
+				struct iucv_sock *iucv = iucv_sk(skb->sk);
+				iucv->sk_txnotify(skb, notification);
+			}
+		}
+		if (skb_queue_is_last(&buf->skb_list, skb))
+			skb = NULL;
+		else
+			skb = skb_queue_next(&buf->skb_list, skb);
+	}
+out:
+	return;
+}
+
+static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
+{
+	struct sk_buff *skb;
+	struct iucv_sock *iucv;
+	int notify_general_error = 0;
+
+	if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
+		notify_general_error = 1;
+
+	/* release may never happen from within CQ tasklet scope */
+	WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
+
+	skb = skb_dequeue(&buf->skb_list);
+	while (skb) {
+		QETH_CARD_TEXT(buf->q->card, 5, "skbr");
+		QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
+		if (notify_general_error &&
+		    be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) {
+			if (skb->sk) {
+				iucv = iucv_sk(skb->sk);
+				iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
+			}
+		}
+		refcount_dec(&skb->users);
+		dev_kfree_skb_any(skb);
+		skb = skb_dequeue(&buf->skb_list);
+	}
+}
+
+static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
+				     struct qeth_qdio_out_buffer *buf)
+{
+	int i;
+
+	/* is PCI flag set on buffer? */
+	if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
+		atomic_dec(&queue->set_pci_flags_count);
+
+	qeth_release_skbs(buf);
+
+	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
+		if (buf->buffer->element[i].addr && buf->is_header[i])
+			kmem_cache_free(qeth_core_header_cache,
+				buf->buffer->element[i].addr);
+		buf->is_header[i] = 0;
+	}
+
+	qeth_scrub_qdio_buffer(buf->buffer,
+			       QETH_MAX_BUFFER_ELEMENTS(queue->card));
+	buf->next_element_to_fill = 0;
+	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
+}
+
+static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
+{
+	int j;
+
+	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
+		if (!q->bufs[j])
+			continue;
+		qeth_cleanup_handled_pending(q, j, 1);
+		qeth_clear_output_buffer(q, q->bufs[j]);
+		if (free) {
+			kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
+			q->bufs[j] = NULL;
+		}
+	}
+}
+
+void qeth_clear_qdio_buffers(struct qeth_card *card)
+{
+	int i;
+
+	QETH_CARD_TEXT(card, 2, "clearqdbf");
+	/* clear outbound buffers to free skbs */
+	for (i = 0; i < card->qdio.no_out_queues; ++i) {
+		if (card->qdio.out_qs[i]) {
+			qeth_clear_outq_buffers(card->qdio.out_qs[i], 0);
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
+
+static void qeth_free_buffer_pool(struct qeth_card *card)
+{
+	struct qeth_buffer_pool_entry *pool_entry, *tmp;
+	int i = 0;
+	list_for_each_entry_safe(pool_entry, tmp,
+				 &card->qdio.init_pool.entry_list, init_list){
+		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
+			free_page((unsigned long)pool_entry->elements[i]);
+		list_del(&pool_entry->init_list);
+		kfree(pool_entry);
+	}
+}
+
+static void qeth_clean_channel(struct qeth_channel *channel)
+{
+	int cnt;
+
+	QETH_DBF_TEXT(SETUP, 2, "freech");
+	for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
+		kfree(channel->iob[cnt].data);
+	kfree(channel->ccw);
+}
+
+static void qeth_set_single_write_queues(struct qeth_card *card)
+{
+	if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
+	    (card->qdio.no_out_queues == 4))
+		qeth_free_qdio_buffers(card);
+
+	card->qdio.no_out_queues = 1;
+	if (card->qdio.default_out_queue != 0)
+		dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
+
+	card->qdio.default_out_queue = 0;
+}
+
+static void qeth_set_multiple_write_queues(struct qeth_card *card)
+{
+	if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
+	    (card->qdio.no_out_queues == 1)) {
+		qeth_free_qdio_buffers(card);
+		card->qdio.default_out_queue = 2;
+	}
+	card->qdio.no_out_queues = 4;
+}
+
+static void qeth_update_from_chp_desc(struct qeth_card *card)
+{
+	struct ccw_device *ccwdev;
+	struct channel_path_desc_fmt0 *chp_dsc;
+
+	QETH_DBF_TEXT(SETUP, 2, "chp_desc");
+
+	ccwdev = card->data.ccwdev;
+	chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
+	if (!chp_dsc)
+		goto out;
+
+	card->info.func_level = 0x4100 + chp_dsc->desc;
+	if (card->info.type == QETH_CARD_TYPE_IQD)
+		goto out;
+
+	/* CHPP field bit 6 == 1 -> single queue */
+	if ((chp_dsc->chpp & 0x02) == 0x02)
+		qeth_set_single_write_queues(card);
+	else
+		qeth_set_multiple_write_queues(card);
+out:
+	kfree(chp_dsc);
+	QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
+	QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
+}
+
+static void qeth_init_qdio_info(struct qeth_card *card)
+{
+	QETH_DBF_TEXT(SETUP, 4, "intqdinf");
+	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
+	card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
+	card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+	card->qdio.no_out_queues = QETH_MAX_QUEUES;
+
+	/* inbound */
+	card->qdio.no_in_queues = 1;
+	card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
+	if (card->info.type == QETH_CARD_TYPE_IQD)
+		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
+	else
+		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
+	card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
+	INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
+	INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
+}
+
+static void qeth_set_initial_options(struct qeth_card *card)
+{
+	card->options.route4.type = NO_ROUTER;
+	card->options.route6.type = NO_ROUTER;
+	card->options.rx_sg_cb = QETH_RX_SG_CB;
+	card->options.isolation = ISOLATION_MODE_NONE;
+	card->options.cq = QETH_CQ_DISABLED;
+}
+
+static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&card->thread_mask_lock, flags);
+	QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
+			(u8) card->thread_start_mask,
+			(u8) card->thread_allowed_mask,
+			(u8) card->thread_running_mask);
+	rc = (card->thread_start_mask & thread);
+	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+	return rc;
+}
+
+static void qeth_start_kernel_thread(struct work_struct *work)
+{
+	struct task_struct *ts;
+	struct qeth_card *card = container_of(work, struct qeth_card,
+					kernel_thread_starter);
+	QETH_CARD_TEXT(card , 2, "strthrd");
+
+	if (card->read.state != CH_STATE_UP &&
+	    card->write.state != CH_STATE_UP)
+		return;
+	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
+		ts = kthread_run(card->discipline->recover, (void *)card,
+				"qeth_recover");
+		if (IS_ERR(ts)) {
+			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+			qeth_clear_thread_running_bit(card,
+				QETH_RECOVER_THREAD);
+		}
+	}
+}
+
+static void qeth_buffer_reclaim_work(struct work_struct *);
+static void qeth_setup_card(struct qeth_card *card)
+{
+	QETH_DBF_TEXT(SETUP, 2, "setupcrd");
+	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+
+	card->info.type = CARD_RDEV(card)->id.driver_info;
+	card->state = CARD_STATE_DOWN;
+	spin_lock_init(&card->mclock);
+	spin_lock_init(&card->lock);
+	spin_lock_init(&card->ip_lock);
+	spin_lock_init(&card->thread_mask_lock);
+	mutex_init(&card->conf_mutex);
+	mutex_init(&card->discipline_mutex);
+	mutex_init(&card->vid_list_mutex);
+	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
+	INIT_LIST_HEAD(&card->cmd_waiter_list);
+	init_waitqueue_head(&card->wait_q);
+	qeth_set_initial_options(card);
+	/* IP address takeover */
+	INIT_LIST_HEAD(&card->ipato.entries);
+	qeth_init_qdio_info(card);
+	INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
+	INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
+}
+
+static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
+{
+	struct qeth_card *card = container_of(slr, struct qeth_card,
+					qeth_service_level);
+	if (card->info.mcl_level[0])
+		seq_printf(m, "qeth: %s firmware level %s\n",
+			CARD_BUS_ID(card), card->info.mcl_level);
+}
+
+static struct qeth_card *qeth_alloc_card(void)
+{
+	struct qeth_card *card;
+
+	QETH_DBF_TEXT(SETUP, 2, "alloccrd");
+	card = kzalloc(sizeof(*card), GFP_KERNEL);
+	if (!card)
+		goto out;
+	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+	if (qeth_setup_channel(&card->read, true))
+		goto out_ip;
+	if (qeth_setup_channel(&card->write, true))
+		goto out_channel;
+	if (qeth_setup_channel(&card->data, false))
+		goto out_data;
+	card->options.layer2 = -1;
+	card->qeth_service_level.seq_print = qeth_core_sl_print;
+	register_service_level(&card->qeth_service_level);
+	return card;
+
+out_data:
+	qeth_clean_channel(&card->write);
+out_channel:
+	qeth_clean_channel(&card->read);
+out_ip:
+	kfree(card);
+out:
+	return NULL;
+}
+
+static int qeth_clear_channel(struct qeth_channel *channel)
+{
+	unsigned long flags;
+	struct qeth_card *card;
+	int rc;
+
+	card = CARD_FROM_CDEV(channel->ccwdev);
+	QETH_CARD_TEXT(card, 3, "clearch");
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+
+	if (rc)
+		return rc;
+	rc = wait_event_interruptible_timeout(card->wait_q,
+			channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
+	if (rc == -ERESTARTSYS)
+		return rc;
+	if (channel->state != CH_STATE_STOPPED)
+		return -ETIME;
+	channel->state = CH_STATE_DOWN;
+	return 0;
+}
+
+static int qeth_halt_channel(struct qeth_channel *channel)
+{
+	unsigned long flags;
+	struct qeth_card *card;
+	int rc;
+
+	card = CARD_FROM_CDEV(channel->ccwdev);
+	QETH_CARD_TEXT(card, 3, "haltch");
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+
+	if (rc)
+		return rc;
+	rc = wait_event_interruptible_timeout(card->wait_q,
+			channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
+	if (rc == -ERESTARTSYS)
+		return rc;
+	if (channel->state != CH_STATE_HALTED)
+		return -ETIME;
+	return 0;
+}
+
+static int qeth_halt_channels(struct qeth_card *card)
+{
+	int rc1 = 0, rc2 = 0, rc3 = 0;
+
+	QETH_CARD_TEXT(card, 3, "haltchs");
+	rc1 = qeth_halt_channel(&card->read);
+	rc2 = qeth_halt_channel(&card->write);
+	rc3 = qeth_halt_channel(&card->data);
+	if (rc1)
+		return rc1;
+	if (rc2)
+		return rc2;
+	return rc3;
+}
+
+static int qeth_clear_channels(struct qeth_card *card)
+{
+	int rc1 = 0, rc2 = 0, rc3 = 0;
+
+	QETH_CARD_TEXT(card, 3, "clearchs");
+	rc1 = qeth_clear_channel(&card->read);
+	rc2 = qeth_clear_channel(&card->write);
+	rc3 = qeth_clear_channel(&card->data);
+	if (rc1)
+		return rc1;
+	if (rc2)
+		return rc2;
+	return rc3;
+}
+
+static int qeth_clear_halt_card(struct qeth_card *card, int halt)
+{
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 3, "clhacrd");
+
+	if (halt)
+		rc = qeth_halt_channels(card);
+	if (rc)
+		return rc;
+	return qeth_clear_channels(card);
+}
+
+int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
+{
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 3, "qdioclr");
+	switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
+		QETH_QDIO_CLEANING)) {
+	case QETH_QDIO_ESTABLISHED:
+		if (card->info.type == QETH_CARD_TYPE_IQD)
+			rc = qdio_shutdown(CARD_DDEV(card),
+				QDIO_FLAG_CLEANUP_USING_HALT);
+		else
+			rc = qdio_shutdown(CARD_DDEV(card),
+				QDIO_FLAG_CLEANUP_USING_CLEAR);
+		if (rc)
+			QETH_CARD_TEXT_(card, 3, "1err%d", rc);
+		atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
+		break;
+	case QETH_QDIO_CLEANING:
+		return rc;
+	default:
+		break;
+	}
+	rc = qeth_clear_halt_card(card, use_halt);
+	if (rc)
+		QETH_CARD_TEXT_(card, 3, "2err%d", rc);
+	card->state = CARD_STATE_DOWN;
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
+
+static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
+			       int *length)
+{
+	struct ciw *ciw;
+	char *rcd_buf;
+	int ret;
+	struct qeth_channel *channel = &card->data;
+	unsigned long flags;
+
+	/*
+	 * scan for RCD command in extended SenseID data
+	 */
+	ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
+	if (!ciw || ciw->cmd == 0)
+		return -EOPNOTSUPP;
+	rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
+	if (!rcd_buf)
+		return -ENOMEM;
+
+	qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf);
+	channel->state = CH_STATE_RCD;
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
+				       QETH_RCD_PARM, LPM_ANYPATH, 0,
+				       QETH_RCD_TIMEOUT);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	if (!ret)
+		wait_event(card->wait_q,
+			   (channel->state == CH_STATE_RCD_DONE ||
+			    channel->state == CH_STATE_DOWN));
+	if (channel->state == CH_STATE_DOWN)
+		ret = -EIO;
+	else
+		channel->state = CH_STATE_DOWN;
+	if (ret) {
+		kfree(rcd_buf);
+		*buffer = NULL;
+		*length = 0;
+	} else {
+		*length = ciw->count;
+		*buffer = rcd_buf;
+	}
+	return ret;
+}
+
+static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
+{
+	QETH_DBF_TEXT(SETUP, 2, "cfgunit");
+	card->info.chpid = prcd[30];
+	card->info.unit_addr2 = prcd[31];
+	card->info.cula = prcd[63];
+	card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
+			       (prcd[0x11] == _ascebc['M']));
+}
+
+static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
+{
+	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
+	struct diag26c_vnic_resp *response = NULL;
+	struct diag26c_vnic_req *request = NULL;
+	struct ccw_dev_id id;
+	char userid[80];
+	int rc = 0;
+
+	QETH_DBF_TEXT(SETUP, 2, "vmlayer");
+
+	cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
+	if (rc)
+		goto out;
+
+	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
+	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
+	if (!request || !response) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	ccw_device_get_id(CARD_RDEV(card), &id);
+	request->resp_buf_len = sizeof(*response);
+	request->resp_version = DIAG26C_VERSION6_VM65918;
+	request->req_format = DIAG26C_VNIC_INFO;
+	ASCEBC(userid, 8);
+	memcpy(&request->sys_name, userid, 8);
+	request->devno = id.devno;
+
+	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
+	rc = diag26c(request, response, DIAG26C_PORT_VNIC);
+	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
+	if (rc)
+		goto out;
+	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
+
+	if (request->resp_buf_len < sizeof(*response) ||
+	    response->version != request->resp_version) {
+		rc = -EIO;
+		goto out;
+	}
+
+	if (response->protocol == VNIC_INFO_PROT_L2)
+		disc = QETH_DISCIPLINE_LAYER2;
+	else if (response->protocol == VNIC_INFO_PROT_L3)
+		disc = QETH_DISCIPLINE_LAYER3;
+
+out:
+	kfree(response);
+	kfree(request);
+	if (rc)
+		QETH_DBF_TEXT_(SETUP, 2, "err%x", rc);
+	return disc;
+}
+
+/* Determine whether the device requires a specific layer discipline */
+static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
+{
+	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
+
+	if (card->info.type == QETH_CARD_TYPE_OSM ||
+	    card->info.type == QETH_CARD_TYPE_OSN)
+		disc = QETH_DISCIPLINE_LAYER2;
+	else if (card->info.guestlan)
+		disc = (card->info.type == QETH_CARD_TYPE_IQD) ?
+				QETH_DISCIPLINE_LAYER3 :
+				qeth_vm_detect_layer(card);
+
+	switch (disc) {
+	case QETH_DISCIPLINE_LAYER2:
+		QETH_DBF_TEXT(SETUP, 3, "force l2");
+		break;
+	case QETH_DISCIPLINE_LAYER3:
+		QETH_DBF_TEXT(SETUP, 3, "force l3");
+		break;
+	default:
+		QETH_DBF_TEXT(SETUP, 3, "force no");
+	}
+
+	return disc;
+}
+
+static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
+{
+	QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
+
+	if (prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
+	    prcd[76] >= 0xF1 && prcd[76] <= 0xF4) {
+		card->info.blkt.time_total = 0;
+		card->info.blkt.inter_packet = 0;
+		card->info.blkt.inter_packet_jumbo = 0;
+	} else {
+		card->info.blkt.time_total = 250;
+		card->info.blkt.inter_packet = 5;
+		card->info.blkt.inter_packet_jumbo = 15;
+	}
+}
+
+static void qeth_init_tokens(struct qeth_card *card)
+{
+	card->token.issuer_rm_w = 0x00010103UL;
+	card->token.cm_filter_w = 0x00010108UL;
+	card->token.cm_connection_w = 0x0001010aUL;
+	card->token.ulp_filter_w = 0x0001010bUL;
+	card->token.ulp_connection_w = 0x0001010dUL;
+}
+
+static void qeth_init_func_level(struct qeth_card *card)
+{
+	switch (card->info.type) {
+	case QETH_CARD_TYPE_IQD:
+		card->info.func_level =	QETH_IDX_FUNC_LEVEL_IQD;
+		break;
+	case QETH_CARD_TYPE_OSD:
+	case QETH_CARD_TYPE_OSN:
+		card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
+		break;
+	default:
+		break;
+	}
+}
+
+static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
+		void (*idx_reply_cb)(struct qeth_channel *,
+			struct qeth_cmd_buffer *))
+{
+	struct qeth_cmd_buffer *iob;
+	unsigned long flags;
+	int rc;
+	struct qeth_card *card;
+
+	QETH_DBF_TEXT(SETUP, 2, "idxanswr");
+	card = CARD_FROM_CDEV(channel->ccwdev);
+	iob = qeth_get_buffer(channel);
+	if (!iob)
+		return -ENOMEM;
+	iob->callback = idx_reply_cb;
+	qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
+
+	wait_event(card->wait_q,
+		   atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
+	QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
+				      (addr_t) iob, 0, 0, QETH_TIMEOUT);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+
+	if (rc) {
+		QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
+		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+		atomic_set(&channel->irq_pending, 0);
+		wake_up(&card->wait_q);
+		return rc;
+	}
+	rc = wait_event_interruptible_timeout(card->wait_q,
+			 channel->state == CH_STATE_UP, QETH_TIMEOUT);
+	if (rc == -ERESTARTSYS)
+		return rc;
+	if (channel->state != CH_STATE_UP) {
+		rc = -ETIME;
+		QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+	} else
+		rc = 0;
+	return rc;
+}
+
+static int qeth_idx_activate_channel(struct qeth_channel *channel,
+		void (*idx_reply_cb)(struct qeth_channel *,
+			struct qeth_cmd_buffer *))
+{
+	struct qeth_card *card;
+	struct qeth_cmd_buffer *iob;
+	unsigned long flags;
+	__u16 temp;
+	__u8 tmp;
+	int rc;
+	struct ccw_dev_id temp_devid;
+
+	card = CARD_FROM_CDEV(channel->ccwdev);
+
+	QETH_DBF_TEXT(SETUP, 2, "idxactch");
+
+	iob = qeth_get_buffer(channel);
+	if (!iob)
+		return -ENOMEM;
+	iob->callback = idx_reply_cb;
+	qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE,
+		       iob->data);
+	if (channel == &card->write) {
+		memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
+		memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
+		       &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
+		card->seqno.trans_hdr++;
+	} else {
+		memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
+		memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
+		       &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
+	}
+	tmp = ((u8)card->dev->dev_port) | 0x80;
+	memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
+	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
+	       &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
+	       &card->info.func_level, sizeof(__u16));
+	ccw_device_get_id(CARD_DDEV(card), &temp_devid);
+	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2);
+	temp = (card->info.cula << 8) + card->info.unit_addr2;
+	memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
+
+	wait_event(card->wait_q,
+		   atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
+	QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
+				      (addr_t) iob, 0, 0, QETH_TIMEOUT);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+
+	if (rc) {
+		QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
+			rc);
+		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+		atomic_set(&channel->irq_pending, 0);
+		wake_up(&card->wait_q);
+		return rc;
+	}
+	rc = wait_event_interruptible_timeout(card->wait_q,
+			channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
+	if (rc == -ERESTARTSYS)
+		return rc;
+	if (channel->state != CH_STATE_ACTIVATING) {
+		dev_warn(&channel->ccwdev->dev, "The qeth device driver"
+			" failed to recover an error on the device\n");
+		QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
+			dev_name(&channel->ccwdev->dev));
+		QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
+		return -ETIME;
+	}
+	return qeth_idx_activate_get_answer(channel, idx_reply_cb);
+}
+
+static int qeth_peer_func_level(int level)
+{
+	if ((level & 0xff) == 8)
+		return (level & 0xff) + 0x400;
+	if (((level >> 8) & 3) == 1)
+		return (level & 0xff) + 0x200;
+	return level;
+}
+
+static void qeth_idx_write_cb(struct qeth_channel *channel,
+		struct qeth_cmd_buffer *iob)
+{
+	struct qeth_card *card;
+	__u16 temp;
+
+	QETH_DBF_TEXT(SETUP , 2, "idxwrcb");
+
+	if (channel->state == CH_STATE_DOWN) {
+		channel->state = CH_STATE_ACTIVATING;
+		goto out;
+	}
+	card = CARD_FROM_CDEV(channel->ccwdev);
+
+	if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
+		if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
+			dev_err(&channel->ccwdev->dev,
+				"The adapter is used exclusively by another "
+				"host\n");
+		else
+			QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:"
+				" negative reply\n",
+				dev_name(&channel->ccwdev->dev));
+		goto out;
+	}
+	memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
+	if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
+		QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: "
+			"function level mismatch (sent: 0x%x, received: "
+			"0x%x)\n", dev_name(&channel->ccwdev->dev),
+			card->info.func_level, temp);
+		goto out;
+	}
+	channel->state = CH_STATE_UP;
+out:
+	qeth_release_buffer(channel, iob);
+}
+
+static void qeth_idx_read_cb(struct qeth_channel *channel,
+		struct qeth_cmd_buffer *iob)
+{
+	struct qeth_card *card;
+	__u16 temp;
+
+	QETH_DBF_TEXT(SETUP , 2, "idxrdcb");
+	if (channel->state == CH_STATE_DOWN) {
+		channel->state = CH_STATE_ACTIVATING;
+		goto out;
+	}
+
+	card = CARD_FROM_CDEV(channel->ccwdev);
+	if (qeth_check_idx_response(card, iob->data))
+			goto out;
+
+	if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
+		switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
+		case QETH_IDX_ACT_ERR_EXCL:
+			dev_err(&channel->ccwdev->dev,
+				"The adapter is used exclusively by another "
+				"host\n");
+			break;
+		case QETH_IDX_ACT_ERR_AUTH:
+		case QETH_IDX_ACT_ERR_AUTH_USER:
+			dev_err(&channel->ccwdev->dev,
+				"Setting the device online failed because of "
+				"insufficient authorization\n");
+			break;
+		default:
+			QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
+				" negative reply\n",
+				dev_name(&channel->ccwdev->dev));
+		}
+		QETH_CARD_TEXT_(card, 2, "idxread%c",
+			QETH_IDX_ACT_CAUSE_CODE(iob->data));
+		goto out;
+	}
+
+	memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
+	if (temp != qeth_peer_func_level(card->info.func_level)) {
+		QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function "
+			"level mismatch (sent: 0x%x, received: 0x%x)\n",
+			dev_name(&channel->ccwdev->dev),
+			card->info.func_level, temp);
+		goto out;
+	}
+	memcpy(&card->token.issuer_rm_r,
+	       QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
+	       QETH_MPC_TOKEN_LENGTH);
+	memcpy(&card->info.mcl_level[0],
+	       QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
+	channel->state = CH_STATE_UP;
+out:
+	qeth_release_buffer(channel, iob);
+}
+
+void qeth_prepare_control_data(struct qeth_card *card, int len,
+		struct qeth_cmd_buffer *iob)
+{
+	qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data);
+	iob->callback = qeth_release_buffer;
+
+	memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
+	       &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
+	card->seqno.trans_hdr++;
+	memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
+	       &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
+	card->seqno.pdu_hdr++;
+	memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
+	       &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
+	QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
+}
+EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
+
+/**
+ * qeth_send_control_data() -	send control command to the card
+ * @card:			qeth_card structure pointer
+ * @len:			size of the command buffer
+ * @iob:			qeth_cmd_buffer pointer
+ * @reply_cb:			callback function pointer
+ * @cb_card:			pointer to the qeth_card structure
+ * @cb_reply:			pointer to the qeth_reply structure
+ * @cb_cmd:			pointer to the original iob for non-IPA
+ *				commands, or to the qeth_ipa_cmd structure
+ *				for the IPA commands.
+ * @reply_param:		private pointer passed to the callback
+ *
+ * Returns the value of the `return_code' field of the response
+ * block returned from the hardware, or other error indication.
+ * Value of zero indicates successful execution of the command.
+ *
+ * Callback function gets called one or more times, with cb_cmd
+ * pointing to the response returned by the hardware. Callback
+ * function must return non-zero if more reply blocks are expected,
+ * and zero if the last or only reply block is received. Callback
+ * function can get the value of the reply_param pointer from the
+ * field 'param' of the structure qeth_reply.
+ */
+
+int qeth_send_control_data(struct qeth_card *card, int len,
+		struct qeth_cmd_buffer *iob,
+		int (*reply_cb)(struct qeth_card *cb_card,
+				struct qeth_reply *cb_reply,
+				unsigned long cb_cmd),
+		void *reply_param)
+{
+	struct qeth_channel *channel = iob->channel;
+	int rc;
+	unsigned long flags;
+	struct qeth_reply *reply = NULL;
+	unsigned long timeout, event_timeout;
+	struct qeth_ipa_cmd *cmd = NULL;
+
+	QETH_CARD_TEXT(card, 2, "sendctl");
+
+	if (card->read_or_write_problem) {
+		qeth_release_buffer(channel, iob);
+		return -EIO;
+	}
+	reply = qeth_alloc_reply(card);
+	if (!reply) {
+		return -ENOMEM;
+	}
+	reply->callback = reply_cb;
+	reply->param = reply_param;
+
+	init_waitqueue_head(&reply->wait_q);
+
+	while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ;
+
+	if (IS_IPA(iob->data)) {
+		cmd = __ipa_cmd(iob);
+		cmd->hdr.seqno = card->seqno.ipa++;
+		reply->seqno = cmd->hdr.seqno;
+		event_timeout = QETH_IPA_TIMEOUT;
+	} else {
+		reply->seqno = QETH_IDX_COMMAND_SEQNO;
+		event_timeout = QETH_TIMEOUT;
+	}
+	qeth_prepare_control_data(card, len, iob);
+
+	spin_lock_irqsave(&card->lock, flags);
+	list_add_tail(&reply->list, &card->cmd_waiter_list);
+	spin_unlock_irqrestore(&card->lock, flags);
+
+	timeout = jiffies + event_timeout;
+
+	QETH_CARD_TEXT(card, 6, "noirqpnd");
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
+				      (addr_t) iob, 0, 0, event_timeout);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	if (rc) {
+		QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
+			"ccw_device_start rc = %i\n",
+			dev_name(&channel->ccwdev->dev), rc);
+		QETH_CARD_TEXT_(card, 2, " err%d", rc);
+		spin_lock_irqsave(&card->lock, flags);
+		list_del_init(&reply->list);
+		qeth_put_reply(reply);
+		spin_unlock_irqrestore(&card->lock, flags);
+		qeth_release_buffer(channel, iob);
+		atomic_set(&channel->irq_pending, 0);
+		wake_up(&card->wait_q);
+		return rc;
+	}
+
+	/* we have only one long running ipassist, since we can ensure
+	   process context of this command we can sleep */
+	if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
+	    cmd->hdr.prot_version == QETH_PROT_IPV4) {
+		if (!wait_event_timeout(reply->wait_q,
+		    atomic_read(&reply->received), event_timeout))
+			goto time_err;
+	} else {
+		while (!atomic_read(&reply->received)) {
+			if (time_after(jiffies, timeout))
+				goto time_err;
+			cpu_relax();
+		}
+	}
+
+	rc = reply->rc;
+	qeth_put_reply(reply);
+	return rc;
+
+time_err:
+	reply->rc = -ETIME;
+	spin_lock_irqsave(&reply->card->lock, flags);
+	list_del_init(&reply->list);
+	spin_unlock_irqrestore(&reply->card->lock, flags);
+	atomic_inc(&reply->received);
+	rc = reply->rc;
+	qeth_put_reply(reply);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_send_control_data);
+
+static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
+		unsigned long data)
+{
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT(SETUP, 2, "cmenblcb");
+
+	iob = (struct qeth_cmd_buffer *) data;
+	memcpy(&card->token.cm_filter_r,
+	       QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
+	       QETH_MPC_TOKEN_LENGTH);
+	QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
+	return 0;
+}
+
+static int qeth_cm_enable(struct qeth_card *card)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT(SETUP, 2, "cmenable");
+
+	iob = qeth_wait_for_buffer(&card->write);
+	memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
+	memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
+	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
+	       &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
+
+	rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
+				    qeth_cm_enable_cb, NULL);
+	return rc;
+}
+
+static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
+		unsigned long data)
+{
+
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT(SETUP, 2, "cmsetpcb");
+
+	iob = (struct qeth_cmd_buffer *) data;
+	memcpy(&card->token.cm_connection_r,
+	       QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
+	       QETH_MPC_TOKEN_LENGTH);
+	QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
+	return 0;
+}
+
+static int qeth_cm_setup(struct qeth_card *card)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT(SETUP, 2, "cmsetup");
+
+	iob = qeth_wait_for_buffer(&card->write);
+	memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
+	memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
+	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
+	       &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
+	       &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
+	rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
+				    qeth_cm_setup_cb, NULL);
+	return rc;
+
+}
+
+static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
+{
+	struct net_device *dev = card->dev;
+	unsigned int new_mtu;
+
+	if (!max_mtu) {
+		/* IQD needs accurate max MTU to set up its RX buffers: */
+		if (IS_IQD(card))
+			return -EINVAL;
+		/* tolerate quirky HW: */
+		max_mtu = ETH_MAX_MTU;
+	}
+
+	rtnl_lock();
+	if (IS_IQD(card)) {
+		/* move any device with default MTU to new max MTU: */
+		new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
+
+		/* adjust RX buffer size to new max MTU: */
+		card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
+		if (dev->max_mtu && dev->max_mtu != max_mtu)
+			qeth_free_qdio_buffers(card);
+	} else {
+		if (dev->mtu)
+			new_mtu = dev->mtu;
+		/* default MTUs for first setup: */
+		else if (card->options.layer2)
+			new_mtu = ETH_DATA_LEN;
+		else
+			new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
+	}
+
+	dev->max_mtu = max_mtu;
+	dev->mtu = min(new_mtu, max_mtu);
+	rtnl_unlock();
+	return 0;
+}
+
+static int qeth_get_mtu_outof_framesize(int framesize)
+{
+	switch (framesize) {
+	case 0x4000:
+		return 8192;
+	case 0x6000:
+		return 16384;
+	case 0xa000:
+		return 32768;
+	case 0xffff:
+		return 57344;
+	default:
+		return 0;
+	}
+}
+
+static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
+		unsigned long data)
+{
+
+	__u16 mtu, framesize;
+	__u16 len;
+	__u8 link_type;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT(SETUP, 2, "ulpenacb");
+
+	iob = (struct qeth_cmd_buffer *) data;
+	memcpy(&card->token.ulp_filter_r,
+	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
+	       QETH_MPC_TOKEN_LENGTH);
+	if (card->info.type == QETH_CARD_TYPE_IQD) {
+		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
+		mtu = qeth_get_mtu_outof_framesize(framesize);
+	} else {
+		mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
+	}
+	*(u16 *)reply->param = mtu;
+
+	memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
+	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
+		memcpy(&link_type,
+		       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
+		card->info.link_type = link_type;
+	} else
+		card->info.link_type = 0;
+	QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type);
+	QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
+	return 0;
+}
+
+static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
+{
+	if (IS_OSN(card))
+		return QETH_PROT_OSN2;
+	return (card->options.layer2 == 1) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
+}
+
+static int qeth_ulp_enable(struct qeth_card *card)
+{
+	u8 prot_type = qeth_mpc_select_prot_type(card);
+	struct qeth_cmd_buffer *iob;
+	u16 max_mtu;
+	int rc;
+
+	/*FIXME: trace view callbacks*/
+	QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
+
+	iob = qeth_wait_for_buffer(&card->write);
+	memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
+
+	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
+	memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
+	memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
+	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
+	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
+	rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
+				    qeth_ulp_enable_cb, &max_mtu);
+	if (rc)
+		return rc;
+	return qeth_update_max_mtu(card, max_mtu);
+}
+
+static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
+		unsigned long data)
+{
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
+
+	iob = (struct qeth_cmd_buffer *) data;
+	memcpy(&card->token.ulp_connection_r,
+	       QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
+	       QETH_MPC_TOKEN_LENGTH);
+	if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
+		     3)) {
+		QETH_DBF_TEXT(SETUP, 2, "olmlimit");
+		dev_err(&card->gdev->dev, "A connection could not be "
+			"established because of an OLM limit\n");
+		iob->rc = -EMLINK;
+	}
+	QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
+	return 0;
+}
+
+static int qeth_ulp_setup(struct qeth_card *card)
+{
+	int rc;
+	__u16 temp;
+	struct qeth_cmd_buffer *iob;
+	struct ccw_dev_id dev_id;
+
+	QETH_DBF_TEXT(SETUP, 2, "ulpsetup");
+
+	iob = qeth_wait_for_buffer(&card->write);
+	memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
+
+	memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
+	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
+	       &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
+	       &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
+
+	ccw_device_get_id(CARD_DDEV(card), &dev_id);
+	memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
+	temp = (card->info.cula << 8) + card->info.unit_addr2;
+	memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
+	rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
+				    qeth_ulp_setup_cb, NULL);
+	return rc;
+}
+
+static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
+{
+	struct qeth_qdio_out_buffer *newbuf;
+
+	newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
+	if (!newbuf)
+		return -ENOMEM;
+
+	newbuf->buffer = q->qdio_bufs[bidx];
+	skb_queue_head_init(&newbuf->skb_list);
+	lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
+	newbuf->q = q;
+	newbuf->next_pending = q->bufs[bidx];
+	atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
+	q->bufs[bidx] = newbuf;
+	return 0;
+}
+
+static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
+{
+	if (!q)
+		return;
+
+	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
+	kfree(q);
+}
+
+static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
+{
+	struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
+
+	if (!q)
+		return NULL;
+
+	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
+		kfree(q);
+		return NULL;
+	}
+	return q;
+}
+
+static int qeth_alloc_qdio_buffers(struct qeth_card *card)
+{
+	int i, j;
+
+	QETH_DBF_TEXT(SETUP, 2, "allcqdbf");
+
+	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
+		QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
+		return 0;
+
+	QETH_DBF_TEXT(SETUP, 2, "inq");
+	card->qdio.in_q = qeth_alloc_qdio_queue();
+	if (!card->qdio.in_q)
+		goto out_nomem;
+
+	/* inbound buffer pool */
+	if (qeth_alloc_buffer_pool(card))
+		goto out_freeinq;
+
+	/* outbound */
+	card->qdio.out_qs =
+		kcalloc(card->qdio.no_out_queues,
+			sizeof(struct qeth_qdio_out_q *),
+			GFP_KERNEL);
+	if (!card->qdio.out_qs)
+		goto out_freepool;
+	for (i = 0; i < card->qdio.no_out_queues; ++i) {
+		card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf();
+		if (!card->qdio.out_qs[i])
+			goto out_freeoutq;
+		QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
+		QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
+		card->qdio.out_qs[i]->queue_no = i;
+		/* give outbound qeth_qdio_buffers their qdio_buffers */
+		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
+			WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
+			if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j))
+				goto out_freeoutqbufs;
+		}
+	}
+
+	/* completion */
+	if (qeth_alloc_cq(card))
+		goto out_freeoutq;
+
+	return 0;
+
+out_freeoutqbufs:
+	while (j > 0) {
+		--j;
+		kmem_cache_free(qeth_qdio_outbuf_cache,
+				card->qdio.out_qs[i]->bufs[j]);
+		card->qdio.out_qs[i]->bufs[j] = NULL;
+	}
+out_freeoutq:
+	while (i > 0) {
+		qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
+		qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
+	}
+	kfree(card->qdio.out_qs);
+	card->qdio.out_qs = NULL;
+out_freepool:
+	qeth_free_buffer_pool(card);
+out_freeinq:
+	qeth_free_qdio_queue(card->qdio.in_q);
+	card->qdio.in_q = NULL;
+out_nomem:
+	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
+	return -ENOMEM;
+}
+
+static void qeth_free_qdio_buffers(struct qeth_card *card)
+{
+	int i, j;
+
+	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
+		QETH_QDIO_UNINITIALIZED)
+		return;
+
+	qeth_free_cq(card);
+	cancel_delayed_work_sync(&card->buffer_reclaim_work);
+	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
+		if (card->qdio.in_q->bufs[j].rx_skb)
+			dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
+	}
+	qeth_free_qdio_queue(card->qdio.in_q);
+	card->qdio.in_q = NULL;
+	/* inbound buffer pool */
+	qeth_free_buffer_pool(card);
+	/* free outbound qdio_qs */
+	if (card->qdio.out_qs) {
+		for (i = 0; i < card->qdio.no_out_queues; ++i) {
+			qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
+			qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
+		}
+		kfree(card->qdio.out_qs);
+		card->qdio.out_qs = NULL;
+	}
+}
+
+static void qeth_create_qib_param_field(struct qeth_card *card,
+		char *param_field)
+{
+
+	param_field[0] = _ascebc['P'];
+	param_field[1] = _ascebc['C'];
+	param_field[2] = _ascebc['I'];
+	param_field[3] = _ascebc['T'];
+	*((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
+	*((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
+	*((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
+}
+
+static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
+		char *param_field)
+{
+	param_field[16] = _ascebc['B'];
+	param_field[17] = _ascebc['L'];
+	param_field[18] = _ascebc['K'];
+	param_field[19] = _ascebc['T'];
+	*((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
+	*((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
+	*((unsigned int *) (&param_field[28])) =
+		card->info.blkt.inter_packet_jumbo;
+}
+
+static int qeth_qdio_activate(struct qeth_card *card)
+{
+	QETH_DBF_TEXT(SETUP, 3, "qdioact");
+	return qdio_activate(CARD_DDEV(card));
+}
+
+static int qeth_dm_act(struct qeth_card *card)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT(SETUP, 2, "dmact");
+
+	iob = qeth_wait_for_buffer(&card->write);
+	memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
+
+	memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
+	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
+	memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
+	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+	rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
+	return rc;
+}
+
+static int qeth_mpc_initialize(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_DBF_TEXT(SETUP, 2, "mpcinit");
+
+	rc = qeth_issue_next_read(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+		return rc;
+	}
+	rc = qeth_cm_enable(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+		goto out_qdio;
+	}
+	rc = qeth_cm_setup(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+		goto out_qdio;
+	}
+	rc = qeth_ulp_enable(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+		goto out_qdio;
+	}
+	rc = qeth_ulp_setup(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+		goto out_qdio;
+	}
+	rc = qeth_alloc_qdio_buffers(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+		goto out_qdio;
+	}
+	rc = qeth_qdio_establish(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+		qeth_free_qdio_buffers(card);
+		goto out_qdio;
+	}
+	rc = qeth_qdio_activate(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+		goto out_qdio;
+	}
+	rc = qeth_dm_act(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
+		goto out_qdio;
+	}
+
+	return 0;
+out_qdio:
+	qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
+	qdio_free(CARD_DDEV(card));
+	return rc;
+}
+
+void qeth_print_status_message(struct qeth_card *card)
+{
+	switch (card->info.type) {
+	case QETH_CARD_TYPE_OSD:
+	case QETH_CARD_TYPE_OSM:
+	case QETH_CARD_TYPE_OSX:
+		/* VM will use a non-zero first character
+		 * to indicate a HiperSockets like reporting
+		 * of the level OSA sets the first character to zero
+		 * */
+		if (!card->info.mcl_level[0]) {
+			sprintf(card->info.mcl_level, "%02x%02x",
+				card->info.mcl_level[2],
+				card->info.mcl_level[3]);
+			break;
+		}
+		/* fallthrough */
+	case QETH_CARD_TYPE_IQD:
+		if ((card->info.guestlan) ||
+		    (card->info.mcl_level[0] & 0x80)) {
+			card->info.mcl_level[0] = (char) _ebcasc[(__u8)
+				card->info.mcl_level[0]];
+			card->info.mcl_level[1] = (char) _ebcasc[(__u8)
+				card->info.mcl_level[1]];
+			card->info.mcl_level[2] = (char) _ebcasc[(__u8)
+				card->info.mcl_level[2]];
+			card->info.mcl_level[3] = (char) _ebcasc[(__u8)
+				card->info.mcl_level[3]];
+			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
+		}
+		break;
+	default:
+		memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
+	}
+	dev_info(&card->gdev->dev,
+		 "Device is a%s card%s%s%s\nwith link type %s.\n",
+		 qeth_get_cardname(card),
+		 (card->info.mcl_level[0]) ? " (level: " : "",
+		 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
+		 (card->info.mcl_level[0]) ? ")" : "",
+		 qeth_get_cardname_short(card));
+}
+EXPORT_SYMBOL_GPL(qeth_print_status_message);
+
+static void qeth_initialize_working_pool_list(struct qeth_card *card)
+{
+	struct qeth_buffer_pool_entry *entry;
+
+	QETH_CARD_TEXT(card, 5, "inwrklst");
+
+	list_for_each_entry(entry,
+			    &card->qdio.init_pool.entry_list, init_list) {
+		qeth_put_buffer_pool_entry(card, entry);
+	}
+}
+
+static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
+					struct qeth_card *card)
+{
+	struct list_head *plh;
+	struct qeth_buffer_pool_entry *entry;
+	int i, free;
+	struct page *page;
+
+	if (list_empty(&card->qdio.in_buf_pool.entry_list))
+		return NULL;
+
+	list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
+		entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
+		free = 1;
+		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
+			if (page_count(virt_to_page(entry->elements[i])) > 1) {
+				free = 0;
+				break;
+			}
+		}
+		if (free) {
+			list_del_init(&entry->list);
+			return entry;
+		}
+	}
+
+	/* no free buffer in pool so take first one and swap pages */
+	entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
+			struct qeth_buffer_pool_entry, list);
+	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
+		if (page_count(virt_to_page(entry->elements[i])) > 1) {
+			page = alloc_page(GFP_ATOMIC);
+			if (!page) {
+				return NULL;
+			} else {
+				free_page((unsigned long)entry->elements[i]);
+				entry->elements[i] = page_address(page);
+				if (card->options.performance_stats)
+					card->perf_stats.sg_alloc_page_rx++;
+			}
+		}
+	}
+	list_del_init(&entry->list);
+	return entry;
+}
+
+static int qeth_init_input_buffer(struct qeth_card *card,
+		struct qeth_qdio_buffer *buf)
+{
+	struct qeth_buffer_pool_entry *pool_entry;
+	int i;
+
+	if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
+		buf->rx_skb = netdev_alloc_skb(card->dev,
+					       QETH_RX_PULL_LEN + ETH_HLEN);
+		if (!buf->rx_skb)
+			return 1;
+	}
+
+	pool_entry = qeth_find_free_buffer_pool_entry(card);
+	if (!pool_entry)
+		return 1;
+
+	/*
+	 * since the buffer is accessed only from the input_tasklet
+	 * there shouldn't be a need to synchronize; also, since we use
+	 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
+	 * buffers
+	 */
+
+	buf->pool_entry = pool_entry;
+	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
+		buf->buffer->element[i].length = PAGE_SIZE;
+		buf->buffer->element[i].addr =  pool_entry->elements[i];
+		if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
+			buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
+		else
+			buf->buffer->element[i].eflags = 0;
+		buf->buffer->element[i].sflags = 0;
+	}
+	return 0;
+}
+
+int qeth_init_qdio_queues(struct qeth_card *card)
+{
+	int i, j;
+	int rc;
+
+	QETH_DBF_TEXT(SETUP, 2, "initqdqs");
+
+	/* inbound queue */
+	qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
+	memset(&card->rx, 0, sizeof(struct qeth_rx));
+	qeth_initialize_working_pool_list(card);
+	/*give only as many buffers to hardware as we have buffer pool entries*/
+	for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
+		qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
+	card->qdio.in_q->next_buf_to_init =
+		card->qdio.in_buf_pool.buf_count - 1;
+	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
+		     card->qdio.in_buf_pool.buf_count - 1);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+		return rc;
+	}
+
+	/* completion */
+	rc = qeth_cq_init(card);
+	if (rc) {
+		return rc;
+	}
+
+	/* outbound queue */
+	for (i = 0; i < card->qdio.no_out_queues; ++i) {
+		qdio_reset_buffers(card->qdio.out_qs[i]->qdio_bufs,
+				   QDIO_MAX_BUFFERS_PER_Q);
+		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
+			qeth_clear_output_buffer(card->qdio.out_qs[i],
+						 card->qdio.out_qs[i]->bufs[j]);
+		}
+		card->qdio.out_qs[i]->card = card;
+		card->qdio.out_qs[i]->next_buf_to_fill = 0;
+		card->qdio.out_qs[i]->do_pack = 0;
+		atomic_set(&card->qdio.out_qs[i]->used_buffers, 0);
+		atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
+		atomic_set(&card->qdio.out_qs[i]->state,
+			   QETH_OUT_Q_UNLOCKED);
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
+
+static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
+{
+	switch (link_type) {
+	case QETH_LINK_TYPE_HSTR:
+		return 2;
+	default:
+		return 1;
+	}
+}
+
+static void qeth_fill_ipacmd_header(struct qeth_card *card,
+		struct qeth_ipa_cmd *cmd, __u8 command,
+		enum qeth_prot_versions prot)
+{
+	memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
+	cmd->hdr.command = command;
+	cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
+	/* cmd->hdr.seqno is set by qeth_send_control_data() */
+	cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
+	cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port;
+	if (card->options.layer2)
+		cmd->hdr.prim_version_no = 2;
+	else
+		cmd->hdr.prim_version_no = 1;
+	cmd->hdr.param_count = 1;
+	cmd->hdr.prot_version = prot;
+	cmd->hdr.ipa_supported = 0;
+	cmd->hdr.ipa_enabled = 0;
+}
+
+struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
+		enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
+{
+	struct qeth_cmd_buffer *iob;
+
+	iob = qeth_get_buffer(&card->write);
+	if (iob) {
+		qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot);
+	} else {
+		dev_warn(&card->gdev->dev,
+			 "The qeth driver ran out of channel command buffers\n");
+		QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
+				 dev_name(&card->gdev->dev));
+	}
+
+	return iob;
+}
+EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
+
+void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob)
+{
+	u8 prot_type = qeth_mpc_select_prot_type(card);
+
+	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
+	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
+	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
+	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+}
+EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
+
+/**
+ * qeth_send_ipa_cmd() - send an IPA command
+ *
+ * See qeth_send_control_data() for explanation of the arguments.
+ */
+
+int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+		int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
+			unsigned long),
+		void *reply_param)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 4, "sendipa");
+	qeth_prepare_ipa_cmd(card, iob);
+	rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
+						iob, reply_cb, reply_param);
+	if (rc == -ETIME) {
+		qeth_clear_ipacmd_list(card);
+		qeth_schedule_recovery(card);
+	}
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
+
+static int qeth_send_startlan(struct qeth_card *card)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT(SETUP, 2, "strtlan");
+
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
+	if (!iob)
+		return -ENOMEM;
+	rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
+	return rc;
+}
+
+static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
+{
+	if (!cmd->hdr.return_code)
+		cmd->hdr.return_code =
+			cmd->data.setadapterparms.hdr.return_code;
+	return cmd->hdr.return_code;
+}
+
+static int qeth_query_setadapterparms_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+
+	QETH_CARD_TEXT(card, 3, "quyadpcb");
+	if (qeth_setadpparms_inspect_rc(cmd))
+		return 0;
+
+	if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
+		card->info.link_type =
+		      cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
+		QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type);
+	}
+	card->options.adp.supported_funcs =
+		cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
+	return 0;
+}
+
+static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
+		__u32 command, __u32 cmdlen)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
+				     QETH_PROT_IPV4);
+	if (iob) {
+		cmd = __ipa_cmd(iob);
+		cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
+		cmd->data.setadapterparms.hdr.command_code = command;
+		cmd->data.setadapterparms.hdr.used_total = 1;
+		cmd->data.setadapterparms.hdr.seq_no = 1;
+	}
+
+	return iob;
+}
+
+static int qeth_query_setadapterparms(struct qeth_card *card)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_CARD_TEXT(card, 3, "queryadp");
+	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
+				   sizeof(struct qeth_ipacmd_setadpparms));
+	if (!iob)
+		return -ENOMEM;
+	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
+	return rc;
+}
+
+static int qeth_query_ipassists_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_DBF_TEXT(SETUP, 2, "qipasscb");
+
+	cmd = (struct qeth_ipa_cmd *) data;
+
+	switch (cmd->hdr.return_code) {
+	case IPA_RC_NOTSUPP:
+	case IPA_RC_L2_UNSUPPORTED_CMD:
+		QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
+		card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
+		card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
+		return -0;
+	default:
+		if (cmd->hdr.return_code) {
+			QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
+						"rc=%d\n",
+						dev_name(&card->gdev->dev),
+						cmd->hdr.return_code);
+			return 0;
+		}
+	}
+
+	if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
+		card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
+		card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
+	} else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
+		card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
+		card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
+	} else
+		QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected"
+					"\n", dev_name(&card->gdev->dev));
+	return 0;
+}
+
+static int qeth_query_ipassists(struct qeth_card *card,
+				enum qeth_prot_versions prot)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
+	if (!iob)
+		return -ENOMEM;
+	rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
+	return rc;
+}
+
+static int qeth_query_switch_attributes_cb(struct qeth_card *card,
+				struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+	struct qeth_query_switch_attributes *attrs;
+	struct qeth_switch_info *sw_info;
+
+	QETH_CARD_TEXT(card, 2, "qswiatcb");
+	if (qeth_setadpparms_inspect_rc(cmd))
+		return 0;
+
+	sw_info = (struct qeth_switch_info *)reply->param;
+	attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
+	sw_info->capabilities = attrs->capabilities;
+	sw_info->settings = attrs->settings;
+	QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
+			sw_info->settings);
+	return 0;
+}
+
+int qeth_query_switch_attributes(struct qeth_card *card,
+				 struct qeth_switch_info *sw_info)
+{
+	struct qeth_cmd_buffer *iob;
+
+	QETH_CARD_TEXT(card, 2, "qswiattr");
+	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
+		return -EOPNOTSUPP;
+	if (!netif_carrier_ok(card->dev))
+		return -ENOMEDIUM;
+	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
+				sizeof(struct qeth_ipacmd_setadpparms_hdr));
+	if (!iob)
+		return -ENOMEM;
+	return qeth_send_ipa_cmd(card, iob,
+				qeth_query_switch_attributes_cb, sw_info);
+}
+
+static int qeth_query_setdiagass_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+	__u16 rc;
+
+	cmd = (struct qeth_ipa_cmd *)data;
+	rc = cmd->hdr.return_code;
+	if (rc)
+		QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
+	else
+		card->info.diagass_support = cmd->data.diagass.ext;
+	return 0;
+}
+
+static int qeth_query_setdiagass(struct qeth_card *card)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd    *cmd;
+
+	QETH_DBF_TEXT(SETUP, 2, "qdiagass");
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+	if (!iob)
+		return -ENOMEM;
+	cmd = __ipa_cmd(iob);
+	cmd->data.diagass.subcmd_len = 16;
+	cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
+	return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
+}
+
+static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
+{
+	unsigned long info = get_zeroed_page(GFP_KERNEL);
+	struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
+	struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
+	struct ccw_dev_id ccwid;
+	int level;
+
+	tid->chpid = card->info.chpid;
+	ccw_device_get_id(CARD_RDEV(card), &ccwid);
+	tid->ssid = ccwid.ssid;
+	tid->devno = ccwid.devno;
+	if (!info)
+		return;
+	level = stsi(NULL, 0, 0, 0);
+	if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
+		tid->lparnr = info222->lpar_number;
+	if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
+		EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
+		memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
+	}
+	free_page(info);
+	return;
+}
+
+static int qeth_hw_trap_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+	__u16 rc;
+
+	cmd = (struct qeth_ipa_cmd *)data;
+	rc = cmd->hdr.return_code;
+	if (rc)
+		QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
+	return 0;
+}
+
+int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_DBF_TEXT(SETUP, 2, "diagtrap");
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+	if (!iob)
+		return -ENOMEM;
+	cmd = __ipa_cmd(iob);
+	cmd->data.diagass.subcmd_len = 80;
+	cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
+	cmd->data.diagass.type = 1;
+	cmd->data.diagass.action = action;
+	switch (action) {
+	case QETH_DIAGS_TRAP_ARM:
+		cmd->data.diagass.options = 0x0003;
+		cmd->data.diagass.ext = 0x00010000 +
+			sizeof(struct qeth_trap_id);
+		qeth_get_trap_id(card,
+			(struct qeth_trap_id *)cmd->data.diagass.cdata);
+		break;
+	case QETH_DIAGS_TRAP_DISARM:
+		cmd->data.diagass.options = 0x0001;
+		break;
+	case QETH_DIAGS_TRAP_CAPTURE:
+		break;
+	}
+	return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
+}
+EXPORT_SYMBOL_GPL(qeth_hw_trap);
+
+static int qeth_check_qdio_errors(struct qeth_card *card,
+				  struct qdio_buffer *buf,
+				  unsigned int qdio_error,
+				  const char *dbftext)
+{
+	if (qdio_error) {
+		QETH_CARD_TEXT(card, 2, dbftext);
+		QETH_CARD_TEXT_(card, 2, " F15=%02X",
+			       buf->element[15].sflags);
+		QETH_CARD_TEXT_(card, 2, " F14=%02X",
+			       buf->element[14].sflags);
+		QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
+		if ((buf->element[15].sflags) == 0x12) {
+			card->stats.rx_dropped++;
+			return 0;
+		} else
+			return 1;
+	}
+	return 0;
+}
+
+static void qeth_queue_input_buffer(struct qeth_card *card, int index)
+{
+	struct qeth_qdio_q *queue = card->qdio.in_q;
+	struct list_head *lh;
+	int count;
+	int i;
+	int rc;
+	int newcount = 0;
+
+	count = (index < queue->next_buf_to_init)?
+		card->qdio.in_buf_pool.buf_count -
+		(queue->next_buf_to_init - index) :
+		card->qdio.in_buf_pool.buf_count -
+		(queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
+	/* only requeue at a certain threshold to avoid SIGAs */
+	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
+		for (i = queue->next_buf_to_init;
+		     i < queue->next_buf_to_init + count; ++i) {
+			if (qeth_init_input_buffer(card,
+				&queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
+				break;
+			} else {
+				newcount++;
+			}
+		}
+
+		if (newcount < count) {
+			/* we are in memory shortage so we switch back to
+			   traditional skb allocation and drop packages */
+			atomic_set(&card->force_alloc_skb, 3);
+			count = newcount;
+		} else {
+			atomic_add_unless(&card->force_alloc_skb, -1, 0);
+		}
+
+		if (!count) {
+			i = 0;
+			list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
+				i++;
+			if (i == card->qdio.in_buf_pool.buf_count) {
+				QETH_CARD_TEXT(card, 2, "qsarbw");
+				card->reclaim_index = index;
+				schedule_delayed_work(
+					&card->buffer_reclaim_work,
+					QETH_RECLAIM_WORK_TIME);
+			}
+			return;
+		}
+
+		/*
+		 * according to old code it should be avoided to requeue all
+		 * 128 buffers in order to benefit from PCI avoidance.
+		 * this function keeps at least one buffer (the buffer at
+		 * 'index') un-requeued -> this buffer is the first buffer that
+		 * will be requeued the next time
+		 */
+		if (card->options.performance_stats) {
+			card->perf_stats.inbound_do_qdio_cnt++;
+			card->perf_stats.inbound_do_qdio_start_time =
+				qeth_get_micros();
+		}
+		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
+			     queue->next_buf_to_init, count);
+		if (card->options.performance_stats)
+			card->perf_stats.inbound_do_qdio_time +=
+				qeth_get_micros() -
+				card->perf_stats.inbound_do_qdio_start_time;
+		if (rc) {
+			QETH_CARD_TEXT(card, 2, "qinberr");
+		}
+		queue->next_buf_to_init = (queue->next_buf_to_init + count) %
+					  QDIO_MAX_BUFFERS_PER_Q;
+	}
+}
+
+static void qeth_buffer_reclaim_work(struct work_struct *work)
+{
+	struct qeth_card *card = container_of(work, struct qeth_card,
+		buffer_reclaim_work.work);
+
+	QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
+	qeth_queue_input_buffer(card, card->reclaim_index);
+}
+
+static void qeth_handle_send_error(struct qeth_card *card,
+		struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
+{
+	int sbalf15 = buffer->buffer->element[15].sflags;
+
+	QETH_CARD_TEXT(card, 6, "hdsnderr");
+	if (card->info.type == QETH_CARD_TYPE_IQD) {
+		if (sbalf15 == 0) {
+			qdio_err = 0;
+		} else {
+			qdio_err = 1;
+		}
+	}
+	qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
+
+	if (!qdio_err)
+		return;
+
+	if ((sbalf15 >= 15) && (sbalf15 <= 31))
+		return;
+
+	QETH_CARD_TEXT(card, 1, "lnkfail");
+	QETH_CARD_TEXT_(card, 1, "%04x %02x",
+		       (u16)qdio_err, (u8)sbalf15);
+}
+
+/**
+ * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
+ * @queue: queue to check for packing buffer
+ *
+ * Returns number of buffers that were prepared for flush.
+ */
+static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
+{
+	struct qeth_qdio_out_buffer *buffer;
+
+	buffer = queue->bufs[queue->next_buf_to_fill];
+	if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
+	    (buffer->next_element_to_fill > 0)) {
+		/* it's a packing buffer */
+		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+		queue->next_buf_to_fill =
+			(queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * Switched to packing state if the number of used buffers on a queue
+ * reaches a certain limit.
+ */
+static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
+{
+	if (!queue->do_pack) {
+		if (atomic_read(&queue->used_buffers)
+		    >= QETH_HIGH_WATERMARK_PACK){
+			/* switch non-PACKING -> PACKING */
+			QETH_CARD_TEXT(queue->card, 6, "np->pack");
+			if (queue->card->options.performance_stats)
+				queue->card->perf_stats.sc_dp_p++;
+			queue->do_pack = 1;
+		}
+	}
+}
+
+/*
+ * Switches from packing to non-packing mode. If there is a packing
+ * buffer on the queue this buffer will be prepared to be flushed.
+ * In that case 1 is returned to inform the caller. If no buffer
+ * has to be flushed, zero is returned.
+ */
+static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
+{
+	if (queue->do_pack) {
+		if (atomic_read(&queue->used_buffers)
+		    <= QETH_LOW_WATERMARK_PACK) {
+			/* switch PACKING -> non-PACKING */
+			QETH_CARD_TEXT(queue->card, 6, "pack->np");
+			if (queue->card->options.performance_stats)
+				queue->card->perf_stats.sc_p_dp++;
+			queue->do_pack = 0;
+			return qeth_prep_flush_pack_buffer(queue);
+		}
+	}
+	return 0;
+}
+
+static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
+			       int count)
+{
+	struct qeth_qdio_out_buffer *buf;
+	int rc;
+	int i;
+	unsigned int qdio_flags;
+
+	for (i = index; i < index + count; ++i) {
+		int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
+		buf = queue->bufs[bidx];
+		buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
+				SBAL_EFLAGS_LAST_ENTRY;
+
+		if (queue->bufstates)
+			queue->bufstates[bidx].user = buf;
+
+		if (queue->card->info.type == QETH_CARD_TYPE_IQD)
+			continue;
+
+		if (!queue->do_pack) {
+			if ((atomic_read(&queue->used_buffers) >=
+				(QETH_HIGH_WATERMARK_PACK -
+				 QETH_WATERMARK_PACK_FUZZ)) &&
+			    !atomic_read(&queue->set_pci_flags_count)) {
+				/* it's likely that we'll go to packing
+				 * mode soon */
+				atomic_inc(&queue->set_pci_flags_count);
+				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
+			}
+		} else {
+			if (!atomic_read(&queue->set_pci_flags_count)) {
+				/*
+				 * there's no outstanding PCI any more, so we
+				 * have to request a PCI to be sure the the PCI
+				 * will wake at some time in the future then we
+				 * can flush packed buffers that might still be
+				 * hanging around, which can happen if no
+				 * further send was requested by the stack
+				 */
+				atomic_inc(&queue->set_pci_flags_count);
+				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
+			}
+		}
+	}
+
+	netif_trans_update(queue->card->dev);
+	if (queue->card->options.performance_stats) {
+		queue->card->perf_stats.outbound_do_qdio_cnt++;
+		queue->card->perf_stats.outbound_do_qdio_start_time =
+			qeth_get_micros();
+	}
+	qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
+	if (atomic_read(&queue->set_pci_flags_count))
+		qdio_flags |= QDIO_FLAG_PCI_OUT;
+	atomic_add(count, &queue->used_buffers);
+
+	rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
+		     queue->queue_no, index, count);
+	if (queue->card->options.performance_stats)
+		queue->card->perf_stats.outbound_do_qdio_time +=
+			qeth_get_micros() -
+			queue->card->perf_stats.outbound_do_qdio_start_time;
+	if (rc) {
+		queue->card->stats.tx_errors += count;
+		/* ignore temporary SIGA errors without busy condition */
+		if (rc == -ENOBUFS)
+			return;
+		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
+		QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
+		QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
+		QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
+		QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
+
+		/* this must not happen under normal circumstances. if it
+		 * happens something is really wrong -> recover */
+		qeth_schedule_recovery(queue->card);
+		return;
+	}
+	if (queue->card->options.performance_stats)
+		queue->card->perf_stats.bufs_sent += count;
+}
+
+static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
+{
+	int index;
+	int flush_cnt = 0;
+	int q_was_packing = 0;
+
+	/*
+	 * check if weed have to switch to non-packing mode or if
+	 * we have to get a pci flag out on the queue
+	 */
+	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
+	    !atomic_read(&queue->set_pci_flags_count)) {
+		if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
+				QETH_OUT_Q_UNLOCKED) {
+			/*
+			 * If we get in here, there was no action in
+			 * do_send_packet. So, we check if there is a
+			 * packing buffer to be flushed here.
+			 */
+			netif_stop_queue(queue->card->dev);
+			index = queue->next_buf_to_fill;
+			q_was_packing = queue->do_pack;
+			/* queue->do_pack may change */
+			barrier();
+			flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
+			if (!flush_cnt &&
+			    !atomic_read(&queue->set_pci_flags_count))
+				flush_cnt += qeth_prep_flush_pack_buffer(queue);
+			if (queue->card->options.performance_stats &&
+			    q_was_packing)
+				queue->card->perf_stats.bufs_sent_pack +=
+					flush_cnt;
+			if (flush_cnt)
+				qeth_flush_buffers(queue, index, flush_cnt);
+			atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+		}
+	}
+}
+
+static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
+				 unsigned long card_ptr)
+{
+	struct qeth_card *card = (struct qeth_card *)card_ptr;
+
+	if (card->dev->flags & IFF_UP)
+		napi_schedule(&card->napi);
+}
+
+int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
+{
+	int rc;
+
+	if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
+		rc = -1;
+		goto out;
+	} else {
+		if (card->options.cq == cq) {
+			rc = 0;
+			goto out;
+		}
+
+		if (card->state != CARD_STATE_DOWN &&
+		    card->state != CARD_STATE_RECOVER) {
+			rc = -1;
+			goto out;
+		}
+
+		qeth_free_qdio_buffers(card);
+		card->options.cq = cq;
+		rc = 0;
+	}
+out:
+	return rc;
+
+}
+EXPORT_SYMBOL_GPL(qeth_configure_cq);
+
+static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
+				 unsigned int queue, int first_element,
+				 int count)
+{
+	struct qeth_qdio_q *cq = card->qdio.c_q;
+	int i;
+	int rc;
+
+	if (!qeth_is_cq(card, queue))
+		goto out;
+
+	QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
+	QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
+	QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
+
+	if (qdio_err) {
+		netif_stop_queue(card->dev);
+		qeth_schedule_recovery(card);
+		goto out;
+	}
+
+	if (card->options.performance_stats) {
+		card->perf_stats.cq_cnt++;
+		card->perf_stats.cq_start_time = qeth_get_micros();
+	}
+
+	for (i = first_element; i < first_element + count; ++i) {
+		int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
+		struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
+		int e = 0;
+
+		while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
+		       buffer->element[e].addr) {
+			unsigned long phys_aob_addr;
+
+			phys_aob_addr = (unsigned long) buffer->element[e].addr;
+			qeth_qdio_handle_aob(card, phys_aob_addr);
+			++e;
+		}
+		qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
+	}
+	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
+		    card->qdio.c_q->next_buf_to_init,
+		    count);
+	if (rc) {
+		dev_warn(&card->gdev->dev,
+			"QDIO reported an error, rc=%i\n", rc);
+		QETH_CARD_TEXT(card, 2, "qcqherr");
+	}
+	card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
+				   + count) % QDIO_MAX_BUFFERS_PER_Q;
+
+	netif_wake_queue(card->dev);
+
+	if (card->options.performance_stats) {
+		int delta_t = qeth_get_micros();
+		delta_t -= card->perf_stats.cq_start_time;
+		card->perf_stats.cq_time += delta_t;
+	}
+out:
+	return;
+}
+
+static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
+				    unsigned int qdio_err, int queue,
+				    int first_elem, int count,
+				    unsigned long card_ptr)
+{
+	struct qeth_card *card = (struct qeth_card *)card_ptr;
+
+	QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
+	QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
+
+	if (qeth_is_cq(card, queue))
+		qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
+	else if (qdio_err)
+		qeth_schedule_recovery(card);
+}
+
+static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
+				     unsigned int qdio_error, int __queue,
+				     int first_element, int count,
+				     unsigned long card_ptr)
+{
+	struct qeth_card *card        = (struct qeth_card *) card_ptr;
+	struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
+	struct qeth_qdio_out_buffer *buffer;
+	int i;
+
+	QETH_CARD_TEXT(card, 6, "qdouhdl");
+	if (qdio_error & QDIO_ERROR_FATAL) {
+		QETH_CARD_TEXT(card, 2, "achkcond");
+		netif_stop_queue(card->dev);
+		qeth_schedule_recovery(card);
+		return;
+	}
+	if (card->options.performance_stats) {
+		card->perf_stats.outbound_handler_cnt++;
+		card->perf_stats.outbound_handler_start_time =
+			qeth_get_micros();
+	}
+	for (i = first_element; i < (first_element + count); ++i) {
+		int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
+		buffer = queue->bufs[bidx];
+		qeth_handle_send_error(card, buffer, qdio_error);
+
+		if (queue->bufstates &&
+		    (queue->bufstates[bidx].flags &
+		     QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
+			WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
+
+			if (atomic_cmpxchg(&buffer->state,
+					   QETH_QDIO_BUF_PRIMED,
+					   QETH_QDIO_BUF_PENDING) ==
+				QETH_QDIO_BUF_PRIMED) {
+				qeth_notify_skbs(queue, buffer,
+						 TX_NOTIFY_PENDING);
+			}
+			QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
+
+			/* prepare the queue slot for re-use: */
+			qeth_scrub_qdio_buffer(buffer->buffer,
+					       QETH_MAX_BUFFER_ELEMENTS(card));
+			if (qeth_init_qdio_out_buf(queue, bidx)) {
+				QETH_CARD_TEXT(card, 2, "outofbuf");
+				qeth_schedule_recovery(card);
+			}
+		} else {
+			if (card->options.cq == QETH_CQ_ENABLED) {
+				enum iucv_tx_notify n;
+
+				n = qeth_compute_cq_notification(
+					buffer->buffer->element[15].sflags, 0);
+				qeth_notify_skbs(queue, buffer, n);
+			}
+
+			qeth_clear_output_buffer(queue, buffer);
+		}
+		qeth_cleanup_handled_pending(queue, bidx, 0);
+	}
+	atomic_sub(count, &queue->used_buffers);
+	/* check if we need to do something on this outbound queue */
+	if (card->info.type != QETH_CARD_TYPE_IQD)
+		qeth_check_outbound_queue(queue);
+
+	netif_wake_queue(queue->card->dev);
+	if (card->options.performance_stats)
+		card->perf_stats.outbound_handler_time += qeth_get_micros() -
+			card->perf_stats.outbound_handler_start_time;
+}
+
+/* We cannot use outbound queue 3 for unicast packets on HiperSockets */
+static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
+{
+	if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3))
+		return 2;
+	return queue_num;
+}
+
+/**
+ * Note: Function assumes that we have 4 outbound queues.
+ */
+int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
+			    int ipv)
+{
+	__be16 *tci;
+	u8 tos;
+
+	switch (card->qdio.do_prio_queueing) {
+	case QETH_PRIO_Q_ING_TOS:
+	case QETH_PRIO_Q_ING_PREC:
+		switch (ipv) {
+		case 4:
+			tos = ipv4_get_dsfield(ip_hdr(skb));
+			break;
+		case 6:
+			tos = ipv6_get_dsfield(ipv6_hdr(skb));
+			break;
+		default:
+			return card->qdio.default_out_queue;
+		}
+		if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
+			return qeth_cut_iqd_prio(card, ~tos >> 6 & 3);
+		if (tos & IPTOS_MINCOST)
+			return qeth_cut_iqd_prio(card, 3);
+		if (tos & IPTOS_RELIABILITY)
+			return 2;
+		if (tos & IPTOS_THROUGHPUT)
+			return 1;
+		if (tos & IPTOS_LOWDELAY)
+			return 0;
+		break;
+	case QETH_PRIO_Q_ING_SKB:
+		if (skb->priority > 5)
+			return 0;
+		return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3);
+	case QETH_PRIO_Q_ING_VLAN:
+		tci = &((struct ethhdr *)skb->data)->h_proto;
+		if (be16_to_cpu(*tci) == ETH_P_8021Q)
+			return qeth_cut_iqd_prio(card,
+			~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3);
+		break;
+	default:
+		break;
+	}
+	return card->qdio.default_out_queue;
+}
+EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
+
+/**
+ * qeth_get_elements_for_frags() -	find number of SBALEs for skb frags.
+ * @skb:				SKB address
+ *
+ * Returns the number of pages, and thus QDIO buffer elements, needed to cover
+ * fragmented part of the SKB. Returns zero for linear SKB.
+ */
+int qeth_get_elements_for_frags(struct sk_buff *skb)
+{
+	int cnt, elements = 0;
+
+	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt];
+
+		elements += qeth_get_elements_for_range(
+			(addr_t)skb_frag_address(frag),
+			(addr_t)skb_frag_address(frag) + skb_frag_size(frag));
+	}
+	return elements;
+}
+EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
+
+static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset)
+{
+	unsigned int elements = qeth_get_elements_for_frags(skb);
+	addr_t end = (addr_t)skb->data + skb_headlen(skb);
+	addr_t start = (addr_t)skb->data + data_offset;
+
+	if (start != end)
+		elements += qeth_get_elements_for_range(start, end);
+	return elements;
+}
+
+/**
+ * qeth_get_elements_no() -	find number of SBALEs for skb data, inc. frags.
+ * @card:			qeth card structure, to check max. elems.
+ * @skb:			SKB address
+ * @extra_elems:		extra elems needed, to check against max.
+ * @data_offset:		range starts at skb->data + data_offset
+ *
+ * Returns the number of pages, and thus QDIO buffer elements, needed to cover
+ * skb data, including linear part and fragments. Checks if the result plus
+ * extra_elems fits under the limit for the card. Returns 0 if it does not.
+ * Note: extra_elems is not included in the returned result.
+ */
+int qeth_get_elements_no(struct qeth_card *card,
+		     struct sk_buff *skb, int extra_elems, int data_offset)
+{
+	int elements = qeth_count_elements(skb, data_offset);
+
+	if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
+		QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
+			"(Number=%d / Length=%d). Discarded.\n",
+			elements + extra_elems, skb->len);
+		return 0;
+	}
+	return elements;
+}
+EXPORT_SYMBOL_GPL(qeth_get_elements_no);
+
+int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
+{
+	int hroom, inpage, rest;
+
+	if (((unsigned long)skb->data & PAGE_MASK) !=
+	    (((unsigned long)skb->data + len - 1) & PAGE_MASK)) {
+		hroom = skb_headroom(skb);
+		inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE);
+		rest = len - inpage;
+		if (rest > hroom)
+			return 1;
+		memmove(skb->data - rest, skb->data, skb_headlen(skb));
+		skb->data -= rest;
+		skb->tail -= rest;
+		*hdr = (struct qeth_hdr *)skb->data;
+		QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest);
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
+
+/**
+ * qeth_add_hw_header() - add a HW header to an skb.
+ * @skb: skb that the HW header should be added to.
+ * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
+ *	 it contains a valid pointer to a qeth_hdr.
+ * @hdr_len: length of the HW header.
+ * @proto_len: length of protocol headers that need to be in same page as the
+ *	       HW header.
+ *
+ * Returns the pushed length. If the header can't be pushed on
+ * (eg. because it would cross a page boundary), it is allocated from
+ * the cache instead and 0 is returned.
+ * The number of needed buffer elements is returned in @elements.
+ * Error to create the hdr is indicated by returning with < 0.
+ */
+int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
+		       struct qeth_hdr **hdr, unsigned int hdr_len,
+		       unsigned int proto_len, unsigned int *elements)
+{
+	const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+	const unsigned int contiguous = proto_len ? proto_len : 1;
+	unsigned int __elements;
+	addr_t start, end;
+	bool push_ok;
+	int rc;
+
+check_layout:
+	start = (addr_t)skb->data - hdr_len;
+	end = (addr_t)skb->data;
+
+	if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
+		/* Push HW header into same page as first protocol header. */
+		push_ok = true;
+		__elements = qeth_count_elements(skb, 0);
+	} else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) {
+		/* Push HW header into a new page. */
+		push_ok = true;
+		__elements = 1 + qeth_count_elements(skb, 0);
+	} else {
+		/* Use header cache, copy protocol headers up. */
+		push_ok = false;
+		__elements = 1 + qeth_count_elements(skb, proto_len);
+	}
+
+	/* Compress skb to fit into one IO buffer: */
+	if (__elements > max_elements) {
+		if (!skb_is_nonlinear(skb)) {
+			/* Drop it, no easy way of shrinking it further. */
+			QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
+					 max_elements, __elements, skb->len);
+			return -E2BIG;
+		}
+
+		rc = skb_linearize(skb);
+		if (card->options.performance_stats) {
+			if (rc)
+				card->perf_stats.tx_linfail++;
+			else
+				card->perf_stats.tx_lin++;
+		}
+		if (rc)
+			return rc;
+
+		/* Linearization changed the layout, re-evaluate: */
+		goto check_layout;
+	}
+
+	*elements = __elements;
+	/* Add the header: */
+	if (push_ok) {
+		*hdr = skb_push(skb, hdr_len);
+		return hdr_len;
+	}
+	/* fall back */
+	*hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
+	if (!*hdr)
+		return -ENOMEM;
+	/* Copy protocol headers behind HW header: */
+	skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_add_hw_header);
+
+static void __qeth_fill_buffer(struct sk_buff *skb,
+			       struct qeth_qdio_out_buffer *buf,
+			       bool is_first_elem, unsigned int offset)
+{
+	struct qdio_buffer *buffer = buf->buffer;
+	int element = buf->next_element_to_fill;
+	int length = skb_headlen(skb) - offset;
+	char *data = skb->data + offset;
+	int length_here, cnt;
+
+	/* map linear part into buffer element(s) */
+	while (length > 0) {
+		/* length_here is the remaining amount of data in this page */
+		length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
+		if (length < length_here)
+			length_here = length;
+
+		buffer->element[element].addr = data;
+		buffer->element[element].length = length_here;
+		length -= length_here;
+		if (is_first_elem) {
+			is_first_elem = false;
+			if (length || skb_is_nonlinear(skb))
+				/* skb needs additional elements */
+				buffer->element[element].eflags =
+					SBAL_EFLAGS_FIRST_FRAG;
+			else
+				buffer->element[element].eflags = 0;
+		} else {
+			buffer->element[element].eflags =
+				SBAL_EFLAGS_MIDDLE_FRAG;
+		}
+		data += length_here;
+		element++;
+	}
+
+	/* map page frags into buffer element(s) */
+	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
+
+		data = skb_frag_address(frag);
+		length = skb_frag_size(frag);
+		while (length > 0) {
+			length_here = PAGE_SIZE -
+				((unsigned long) data % PAGE_SIZE);
+			if (length < length_here)
+				length_here = length;
+
+			buffer->element[element].addr = data;
+			buffer->element[element].length = length_here;
+			buffer->element[element].eflags =
+				SBAL_EFLAGS_MIDDLE_FRAG;
+			length -= length_here;
+			data += length_here;
+			element++;
+		}
+	}
+
+	if (buffer->element[element - 1].eflags)
+		buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
+	buf->next_element_to_fill = element;
+}
+
+/**
+ * qeth_fill_buffer() - map skb into an output buffer
+ * @queue:	QDIO queue to submit the buffer on
+ * @buf:	buffer to transport the skb
+ * @skb:	skb to map into the buffer
+ * @hdr:	qeth_hdr for this skb. Either at skb->data, or allocated
+ *		from qeth_core_header_cache.
+ * @offset:	when mapping the skb, start at skb->data + offset
+ * @hd_len:	if > 0, build a dedicated header element of this size
+ */
+static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
+			    struct qeth_qdio_out_buffer *buf,
+			    struct sk_buff *skb, struct qeth_hdr *hdr,
+			    unsigned int offset, unsigned int hd_len)
+{
+	struct qdio_buffer *buffer = buf->buffer;
+	bool is_first_elem = true;
+	int flush_cnt = 0;
+
+	refcount_inc(&skb->users);
+	skb_queue_tail(&buf->skb_list, skb);
+
+	/* build dedicated header element */
+	if (hd_len) {
+		int element = buf->next_element_to_fill;
+		is_first_elem = false;
+
+		buffer->element[element].addr = hdr;
+		buffer->element[element].length = hd_len;
+		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
+		/* remember to free cache-allocated qeth_hdr: */
+		buf->is_header[element] = ((void *)hdr != skb->data);
+		buf->next_element_to_fill++;
+	}
+
+	__qeth_fill_buffer(skb, buf, is_first_elem, offset);
+
+	if (!queue->do_pack) {
+		QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
+		/* set state to PRIMED -> will be flushed */
+		atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+		flush_cnt = 1;
+	} else {
+		QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
+		if (queue->card->options.performance_stats)
+			queue->card->perf_stats.skbs_sent_pack++;
+		if (buf->next_element_to_fill >=
+				QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
+			/*
+			 * packed buffer if full -> set state PRIMED
+			 * -> will be flushed
+			 */
+			atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+			flush_cnt = 1;
+		}
+	}
+	return flush_cnt;
+}
+
+int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
+			     struct qeth_hdr *hdr, unsigned int offset,
+			     unsigned int hd_len)
+{
+	int index = queue->next_buf_to_fill;
+	struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
+
+	/*
+	 * check if buffer is empty to make sure that we do not 'overtake'
+	 * ourselves and try to fill a buffer that is already primed
+	 */
+	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
+		return -EBUSY;
+	queue->next_buf_to_fill = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
+	qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
+	qeth_flush_buffers(queue, index, 1);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
+
+int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
+			struct sk_buff *skb, struct qeth_hdr *hdr,
+			unsigned int offset, unsigned int hd_len,
+			int elements_needed)
+{
+	struct qeth_qdio_out_buffer *buffer;
+	int start_index;
+	int flush_count = 0;
+	int do_pack = 0;
+	int tmp;
+	int rc = 0;
+
+	/* spin until we get the queue ... */
+	while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
+			      QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
+	start_index = queue->next_buf_to_fill;
+	buffer = queue->bufs[queue->next_buf_to_fill];
+	/*
+	 * check if buffer is empty to make sure that we do not 'overtake'
+	 * ourselves and try to fill a buffer that is already primed
+	 */
+	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
+		atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+		return -EBUSY;
+	}
+	/* check if we need to switch packing state of this queue */
+	qeth_switch_to_packing_if_needed(queue);
+	if (queue->do_pack) {
+		do_pack = 1;
+		/* does packet fit in current buffer? */
+		if ((QETH_MAX_BUFFER_ELEMENTS(card) -
+		    buffer->next_element_to_fill) < elements_needed) {
+			/* ... no -> set state PRIMED */
+			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+			flush_count++;
+			queue->next_buf_to_fill =
+				(queue->next_buf_to_fill + 1) %
+				QDIO_MAX_BUFFERS_PER_Q;
+			buffer = queue->bufs[queue->next_buf_to_fill];
+			/* we did a step forward, so check buffer state
+			 * again */
+			if (atomic_read(&buffer->state) !=
+			    QETH_QDIO_BUF_EMPTY) {
+				qeth_flush_buffers(queue, start_index,
+							   flush_count);
+				atomic_set(&queue->state,
+						QETH_OUT_Q_UNLOCKED);
+				rc = -EBUSY;
+				goto out;
+			}
+		}
+	}
+	tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
+	queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
+				  QDIO_MAX_BUFFERS_PER_Q;
+	flush_count += tmp;
+	if (flush_count)
+		qeth_flush_buffers(queue, start_index, flush_count);
+	else if (!atomic_read(&queue->set_pci_flags_count))
+		atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
+	/*
+	 * queue->state will go from LOCKED -> UNLOCKED or from
+	 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
+	 * (switch packing state or flush buffer to get another pci flag out).
+	 * In that case we will enter this loop
+	 */
+	while (atomic_dec_return(&queue->state)) {
+		start_index = queue->next_buf_to_fill;
+		/* check if we can go back to non-packing state */
+		tmp = qeth_switch_to_nonpacking_if_needed(queue);
+		/*
+		 * check if we need to flush a packing buffer to get a pci
+		 * flag out on the queue
+		 */
+		if (!tmp && !atomic_read(&queue->set_pci_flags_count))
+			tmp = qeth_prep_flush_pack_buffer(queue);
+		if (tmp) {
+			qeth_flush_buffers(queue, start_index, tmp);
+			flush_count += tmp;
+		}
+	}
+out:
+	/* at this point the queue is UNLOCKED again */
+	if (queue->card->options.performance_stats && do_pack)
+		queue->card->perf_stats.bufs_sent_pack += flush_count;
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_do_send_packet);
+
+static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+	struct qeth_ipacmd_setadpparms *setparms;
+
+	QETH_CARD_TEXT(card, 4, "prmadpcb");
+
+	setparms = &(cmd->data.setadapterparms);
+	if (qeth_setadpparms_inspect_rc(cmd)) {
+		QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
+		setparms->data.mode = SET_PROMISC_MODE_OFF;
+	}
+	card->info.promisc_mode = setparms->data.mode;
+	return 0;
+}
+
+void qeth_setadp_promisc_mode(struct qeth_card *card)
+{
+	enum qeth_ipa_promisc_modes mode;
+	struct net_device *dev = card->dev;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "setprom");
+
+	if (((dev->flags & IFF_PROMISC) &&
+	     (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
+	    (!(dev->flags & IFF_PROMISC) &&
+	     (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
+		return;
+	mode = SET_PROMISC_MODE_OFF;
+	if (dev->flags & IFF_PROMISC)
+		mode = SET_PROMISC_MODE_ON;
+	QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
+
+	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
+			sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8);
+	if (!iob)
+		return;
+	cmd = __ipa_cmd(iob);
+	cmd->data.setadapterparms.data.mode = mode;
+	qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
+}
+EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
+
+struct net_device_stats *qeth_get_stats(struct net_device *dev)
+{
+	struct qeth_card *card;
+
+	card = dev->ml_priv;
+
+	QETH_CARD_TEXT(card, 5, "getstat");
+
+	return &card->stats;
+}
+EXPORT_SYMBOL_GPL(qeth_get_stats);
+
+static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+
+	QETH_CARD_TEXT(card, 4, "chgmaccb");
+	if (qeth_setadpparms_inspect_rc(cmd))
+		return 0;
+
+	if (!card->options.layer2 ||
+	    !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
+		ether_addr_copy(card->dev->dev_addr,
+				cmd->data.setadapterparms.data.change_addr.addr);
+		card->info.mac_bits |= QETH_LAYER2_MAC_READ;
+	}
+	return 0;
+}
+
+int qeth_setadpparms_change_macaddr(struct qeth_card *card)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "chgmac");
+
+	iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
+				   sizeof(struct qeth_ipacmd_setadpparms_hdr) +
+				   sizeof(struct qeth_change_addr));
+	if (!iob)
+		return -ENOMEM;
+	cmd = __ipa_cmd(iob);
+	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
+	cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
+	ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
+			card->dev->dev_addr);
+	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
+			       NULL);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
+
+static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+	struct qeth_set_access_ctrl *access_ctrl_req;
+	int fallback = *(int *)reply->param;
+
+	QETH_CARD_TEXT(card, 4, "setaccb");
+	if (cmd->hdr.return_code)
+		return 0;
+	qeth_setadpparms_inspect_rc(cmd);
+
+	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
+	QETH_DBF_TEXT_(SETUP, 2, "setaccb");
+	QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
+	QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
+		cmd->data.setadapterparms.hdr.return_code);
+	if (cmd->data.setadapterparms.hdr.return_code !=
+						SET_ACCESS_CTRL_RC_SUCCESS)
+		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
+				card->gdev->dev.kobj.name,
+				access_ctrl_req->subcmd_code,
+				cmd->data.setadapterparms.hdr.return_code);
+	switch (cmd->data.setadapterparms.hdr.return_code) {
+	case SET_ACCESS_CTRL_RC_SUCCESS:
+		if (card->options.isolation == ISOLATION_MODE_NONE) {
+			dev_info(&card->gdev->dev,
+			    "QDIO data connection isolation is deactivated\n");
+		} else {
+			dev_info(&card->gdev->dev,
+			    "QDIO data connection isolation is activated\n");
+		}
+		break;
+	case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
+		QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already "
+				"deactivated\n", dev_name(&card->gdev->dev));
+		if (fallback)
+			card->options.isolation = card->options.prev_isolation;
+		break;
+	case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
+		QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already"
+				" activated\n", dev_name(&card->gdev->dev));
+		if (fallback)
+			card->options.isolation = card->options.prev_isolation;
+		break;
+	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
+		dev_err(&card->gdev->dev, "Adapter does not "
+			"support QDIO data connection isolation\n");
+		break;
+	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
+		dev_err(&card->gdev->dev,
+			"Adapter is dedicated. "
+			"QDIO data connection isolation not supported\n");
+		if (fallback)
+			card->options.isolation = card->options.prev_isolation;
+		break;
+	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
+		dev_err(&card->gdev->dev,
+			"TSO does not permit QDIO data connection isolation\n");
+		if (fallback)
+			card->options.isolation = card->options.prev_isolation;
+		break;
+	case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
+		dev_err(&card->gdev->dev, "The adjacent switch port does not "
+			"support reflective relay mode\n");
+		if (fallback)
+			card->options.isolation = card->options.prev_isolation;
+		break;
+	case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
+		dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
+					"enabled at the adjacent switch port");
+		if (fallback)
+			card->options.isolation = card->options.prev_isolation;
+		break;
+	case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
+		dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
+					"at the adjacent switch failed\n");
+		break;
+	default:
+		/* this should never happen */
+		if (fallback)
+			card->options.isolation = card->options.prev_isolation;
+		break;
+	}
+	return 0;
+}
+
+static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
+		enum qeth_ipa_isolation_modes isolation, int fallback)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_set_access_ctrl *access_ctrl_req;
+
+	QETH_CARD_TEXT(card, 4, "setacctl");
+
+	QETH_DBF_TEXT_(SETUP, 2, "setacctl");
+	QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
+
+	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
+				   sizeof(struct qeth_ipacmd_setadpparms_hdr) +
+				   sizeof(struct qeth_set_access_ctrl));
+	if (!iob)
+		return -ENOMEM;
+	cmd = __ipa_cmd(iob);
+	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
+	access_ctrl_req->subcmd_code = isolation;
+
+	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
+			       &fallback);
+	QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
+	return rc;
+}
+
+int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
+{
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 4, "setactlo");
+
+	if ((card->info.type == QETH_CARD_TYPE_OSD ||
+	     card->info.type == QETH_CARD_TYPE_OSX) &&
+	     qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
+		rc = qeth_setadpparms_set_access_ctrl(card,
+			card->options.isolation, fallback);
+		if (rc) {
+			QETH_DBF_MESSAGE(3,
+				"IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n",
+				card->gdev->dev.kobj.name,
+				rc);
+			rc = -EOPNOTSUPP;
+		}
+	} else if (card->options.isolation != ISOLATION_MODE_NONE) {
+		card->options.isolation = ISOLATION_MODE_NONE;
+
+		dev_err(&card->gdev->dev, "Adapter does not "
+			"support QDIO data connection isolation\n");
+		rc = -EOPNOTSUPP;
+	}
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
+
+void qeth_tx_timeout(struct net_device *dev)
+{
+	struct qeth_card *card;
+
+	card = dev->ml_priv;
+	QETH_CARD_TEXT(card, 4, "txtimeo");
+	card->stats.tx_errors++;
+	qeth_schedule_recovery(card);
+}
+EXPORT_SYMBOL_GPL(qeth_tx_timeout);
+
+static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
+{
+	struct qeth_card *card = dev->ml_priv;
+	int rc = 0;
+
+	switch (regnum) {
+	case MII_BMCR: /* Basic mode control register */
+		rc = BMCR_FULLDPLX;
+		if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
+		    (card->info.link_type != QETH_LINK_TYPE_OSN) &&
+		    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
+			rc |= BMCR_SPEED100;
+		break;
+	case MII_BMSR: /* Basic mode status register */
+		rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
+		     BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
+		     BMSR_100BASE4;
+		break;
+	case MII_PHYSID1: /* PHYS ID 1 */
+		rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
+		     dev->dev_addr[2];
+		rc = (rc >> 5) & 0xFFFF;
+		break;
+	case MII_PHYSID2: /* PHYS ID 2 */
+		rc = (dev->dev_addr[2] << 10) & 0xFFFF;
+		break;
+	case MII_ADVERTISE: /* Advertisement control reg */
+		rc = ADVERTISE_ALL;
+		break;
+	case MII_LPA: /* Link partner ability reg */
+		rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
+		     LPA_100BASE4 | LPA_LPACK;
+		break;
+	case MII_EXPANSION: /* Expansion register */
+		break;
+	case MII_DCOUNTER: /* disconnect counter */
+		break;
+	case MII_FCSCOUNTER: /* false carrier counter */
+		break;
+	case MII_NWAYTEST: /* N-way auto-neg test register */
+		break;
+	case MII_RERRCOUNTER: /* rx error counter */
+		rc = card->stats.rx_errors;
+		break;
+	case MII_SREVISION: /* silicon revision */
+		break;
+	case MII_RESV1: /* reserved 1 */
+		break;
+	case MII_LBRERROR: /* loopback, rx, bypass error */
+		break;
+	case MII_PHYADDR: /* physical address */
+		break;
+	case MII_RESV2: /* reserved 2 */
+		break;
+	case MII_TPISTATUS: /* TPI status for 10mbps */
+		break;
+	case MII_NCONFIG: /* network interface config */
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
+		struct qeth_cmd_buffer *iob, int len,
+		int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
+			unsigned long),
+		void *reply_param)
+{
+	u16 s1, s2;
+
+	QETH_CARD_TEXT(card, 4, "sendsnmp");
+
+	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
+	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
+	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+	/* adjust PDU length fields in IPA_PDU_HEADER */
+	s1 = (u32) IPA_PDU_HEADER_SIZE + len;
+	s2 = (u32) len;
+	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
+	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
+	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
+	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
+	return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
+				      reply_cb, reply_param);
+}
+
+static int qeth_snmp_command_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long sdata)
+{
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_arp_query_info *qinfo;
+	unsigned char *data;
+	void *snmp_data;
+	__u16 data_len;
+
+	QETH_CARD_TEXT(card, 3, "snpcmdcb");
+
+	cmd = (struct qeth_ipa_cmd *) sdata;
+	data = (unsigned char *)((char *)cmd - reply->offset);
+	qinfo = (struct qeth_arp_query_info *) reply->param;
+
+	if (cmd->hdr.return_code) {
+		QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
+		return 0;
+	}
+	if (cmd->data.setadapterparms.hdr.return_code) {
+		cmd->hdr.return_code =
+			cmd->data.setadapterparms.hdr.return_code;
+		QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
+		return 0;
+	}
+	data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
+	if (cmd->data.setadapterparms.hdr.seq_no == 1) {
+		snmp_data = &cmd->data.setadapterparms.data.snmp;
+		data_len -= offsetof(struct qeth_ipa_cmd,
+				     data.setadapterparms.data.snmp);
+	} else {
+		snmp_data = &cmd->data.setadapterparms.data.snmp.request;
+		data_len -= offsetof(struct qeth_ipa_cmd,
+				     data.setadapterparms.data.snmp.request);
+	}
+
+	/* check if there is enough room in userspace */
+	if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
+		QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM);
+		cmd->hdr.return_code = IPA_RC_ENOMEM;
+		return 0;
+	}
+	QETH_CARD_TEXT_(card, 4, "snore%i",
+		       cmd->data.setadapterparms.hdr.used_total);
+	QETH_CARD_TEXT_(card, 4, "sseqn%i",
+		cmd->data.setadapterparms.hdr.seq_no);
+	/*copy entries to user buffer*/
+	memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
+	qinfo->udata_offset += data_len;
+
+	/* check if all replies received ... */
+		QETH_CARD_TEXT_(card, 4, "srtot%i",
+			       cmd->data.setadapterparms.hdr.used_total);
+		QETH_CARD_TEXT_(card, 4, "srseq%i",
+			       cmd->data.setadapterparms.hdr.seq_no);
+	if (cmd->data.setadapterparms.hdr.seq_no <
+	    cmd->data.setadapterparms.hdr.used_total)
+		return 1;
+	return 0;
+}
+
+static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_snmp_ureq *ureq;
+	unsigned int req_len;
+	struct qeth_arp_query_info qinfo = {0, };
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 3, "snmpcmd");
+
+	if (card->info.guestlan)
+		return -EOPNOTSUPP;
+
+	if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
+	    (!card->options.layer2)) {
+		return -EOPNOTSUPP;
+	}
+	/* skip 4 bytes (data_len struct member) to get req_len */
+	if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
+		return -EFAULT;
+	if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE -
+		       sizeof(struct qeth_ipacmd_hdr) -
+		       sizeof(struct qeth_ipacmd_setadpparms_hdr)))
+		return -EINVAL;
+	ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
+	if (IS_ERR(ureq)) {
+		QETH_CARD_TEXT(card, 2, "snmpnome");
+		return PTR_ERR(ureq);
+	}
+	qinfo.udata_len = ureq->hdr.data_len;
+	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
+	if (!qinfo.udata) {
+		kfree(ureq);
+		return -ENOMEM;
+	}
+	qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
+
+	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
+				   QETH_SNMP_SETADP_CMDLENGTH + req_len);
+	if (!iob) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	cmd = __ipa_cmd(iob);
+	memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
+	rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
+				    qeth_snmp_command_cb, (void *)&qinfo);
+	if (rc)
+		QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n",
+			   QETH_CARD_IFNAME(card), rc);
+	else {
+		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
+			rc = -EFAULT;
+	}
+out:
+	kfree(ureq);
+	kfree(qinfo.udata);
+	return rc;
+}
+
+static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
+	struct qeth_qoat_priv *priv;
+	char *resdata;
+	int resdatalen;
+
+	QETH_CARD_TEXT(card, 3, "qoatcb");
+	if (qeth_setadpparms_inspect_rc(cmd))
+		return 0;
+
+	priv = (struct qeth_qoat_priv *)reply->param;
+	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
+	resdata = (char *)data + 28;
+
+	if (resdatalen > (priv->buffer_len - priv->response_len)) {
+		cmd->hdr.return_code = IPA_RC_FFFF;
+		return 0;
+	}
+
+	memcpy((priv->buffer + priv->response_len), resdata,
+		resdatalen);
+	priv->response_len += resdatalen;
+
+	if (cmd->data.setadapterparms.hdr.seq_no <
+	    cmd->data.setadapterparms.hdr.used_total)
+		return 1;
+	return 0;
+}
+
+static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
+{
+	int rc = 0;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_query_oat *oat_req;
+	struct qeth_query_oat_data oat_data;
+	struct qeth_qoat_priv priv;
+	void __user *tmp;
+
+	QETH_CARD_TEXT(card, 3, "qoatcmd");
+
+	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
+		rc = -EOPNOTSUPP;
+		goto out;
+	}
+
+	if (copy_from_user(&oat_data, udata,
+	    sizeof(struct qeth_query_oat_data))) {
+			rc = -EFAULT;
+			goto out;
+	}
+
+	priv.buffer_len = oat_data.buffer_len;
+	priv.response_len = 0;
+	priv.buffer = vzalloc(oat_data.buffer_len);
+	if (!priv.buffer) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
+				   sizeof(struct qeth_ipacmd_setadpparms_hdr) +
+				   sizeof(struct qeth_query_oat));
+	if (!iob) {
+		rc = -ENOMEM;
+		goto out_free;
+	}
+	cmd = __ipa_cmd(iob);
+	oat_req = &cmd->data.setadapterparms.data.query_oat;
+	oat_req->subcmd_code = oat_data.command;
+
+	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
+			       &priv);
+	if (!rc) {
+		if (is_compat_task())
+			tmp = compat_ptr(oat_data.ptr);
+		else
+			tmp = (void __user *)(unsigned long)oat_data.ptr;
+
+		if (copy_to_user(tmp, priv.buffer,
+		    priv.response_len)) {
+			rc = -EFAULT;
+			goto out_free;
+		}
+
+		oat_data.response_len = priv.response_len;
+
+		if (copy_to_user(udata, &oat_data,
+		    sizeof(struct qeth_query_oat_data)))
+			rc = -EFAULT;
+	} else
+		if (rc == IPA_RC_FFFF)
+			rc = -EFAULT;
+
+out_free:
+	vfree(priv.buffer);
+out:
+	return rc;
+}
+
+static int qeth_query_card_info_cb(struct qeth_card *card,
+				   struct qeth_reply *reply, unsigned long data)
+{
+	struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
+	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
+	struct qeth_query_card_info *card_info;
+
+	QETH_CARD_TEXT(card, 2, "qcrdincb");
+	if (qeth_setadpparms_inspect_rc(cmd))
+		return 0;
+
+	card_info = &cmd->data.setadapterparms.data.card_info;
+	carrier_info->card_type = card_info->card_type;
+	carrier_info->port_mode = card_info->port_mode;
+	carrier_info->port_speed = card_info->port_speed;
+	return 0;
+}
+
+static int qeth_query_card_info(struct qeth_card *card,
+				struct carrier_info *carrier_info)
+{
+	struct qeth_cmd_buffer *iob;
+
+	QETH_CARD_TEXT(card, 2, "qcrdinfo");
+	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
+		return -EOPNOTSUPP;
+	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
+		sizeof(struct qeth_ipacmd_setadpparms_hdr));
+	if (!iob)
+		return -ENOMEM;
+	return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
+					(void *)carrier_info);
+}
+
+/**
+ * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
+ * @card: pointer to a qeth_card
+ *
+ * Returns
+ *	0, if a MAC address has been set for the card's netdevice
+ *	a return code, for various error conditions
+ */
+int qeth_vm_request_mac(struct qeth_card *card)
+{
+	struct diag26c_mac_resp *response;
+	struct diag26c_mac_req *request;
+	struct ccw_dev_id id;
+	int rc;
+
+	QETH_DBF_TEXT(SETUP, 2, "vmreqmac");
+
+	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
+	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
+	if (!request || !response) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	ccw_device_get_id(CARD_DDEV(card), &id);
+	request->resp_buf_len = sizeof(*response);
+	request->resp_version = DIAG26C_VERSION2;
+	request->op_code = DIAG26C_GET_MAC;
+	request->devno = id.devno;
+
+	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
+	rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
+	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
+	if (rc)
+		goto out;
+	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
+
+	if (request->resp_buf_len < sizeof(*response) ||
+	    response->version != request->resp_version) {
+		rc = -EIO;
+		QETH_DBF_TEXT(SETUP, 2, "badresp");
+		QETH_DBF_HEX(SETUP, 2, &request->resp_buf_len,
+			     sizeof(request->resp_buf_len));
+	} else if (!is_valid_ether_addr(response->mac)) {
+		rc = -EINVAL;
+		QETH_DBF_TEXT(SETUP, 2, "badmac");
+		QETH_DBF_HEX(SETUP, 2, response->mac, ETH_ALEN);
+	} else {
+		ether_addr_copy(card->dev->dev_addr, response->mac);
+	}
+
+out:
+	kfree(response);
+	kfree(request);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
+
+static int qeth_get_qdio_q_format(struct qeth_card *card)
+{
+	if (card->info.type == QETH_CARD_TYPE_IQD)
+		return QDIO_IQDIO_QFMT;
+	else
+		return QDIO_QETH_QFMT;
+}
+
+static void qeth_determine_capabilities(struct qeth_card *card)
+{
+	int rc;
+	int length;
+	char *prcd;
+	struct ccw_device *ddev;
+	int ddev_offline = 0;
+
+	QETH_DBF_TEXT(SETUP, 2, "detcapab");
+	ddev = CARD_DDEV(card);
+	if (!ddev->online) {
+		ddev_offline = 1;
+		rc = ccw_device_set_online(ddev);
+		if (rc) {
+			QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+			goto out;
+		}
+	}
+
+	rc = qeth_read_conf_data(card, (void **) &prcd, &length);
+	if (rc) {
+		QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
+			dev_name(&card->gdev->dev), rc);
+		QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+		goto out_offline;
+	}
+	qeth_configure_unitaddr(card, prcd);
+	if (ddev_offline)
+		qeth_configure_blkt_default(card, prcd);
+	kfree(prcd);
+
+	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
+	if (rc)
+		QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+
+	QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt);
+	QETH_DBF_TEXT_(SETUP, 2, "ac1:%02x", card->ssqd.qdioac1);
+	QETH_DBF_TEXT_(SETUP, 2, "ac2:%04x", card->ssqd.qdioac2);
+	QETH_DBF_TEXT_(SETUP, 2, "ac3:%04x", card->ssqd.qdioac3);
+	QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt);
+	if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
+	    ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
+	    ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
+		dev_info(&card->gdev->dev,
+			"Completion Queueing supported\n");
+	} else {
+		card->options.cq = QETH_CQ_NOTAVAILABLE;
+	}
+
+
+out_offline:
+	if (ddev_offline == 1)
+		ccw_device_set_offline(ddev);
+out:
+	return;
+}
+
+static void qeth_qdio_establish_cq(struct qeth_card *card,
+				   struct qdio_buffer **in_sbal_ptrs,
+				   void (**queue_start_poll)
+					(struct ccw_device *, int,
+					 unsigned long))
+{
+	int i;
+
+	if (card->options.cq == QETH_CQ_ENABLED) {
+		int offset = QDIO_MAX_BUFFERS_PER_Q *
+			     (card->qdio.no_in_queues - 1);
+		for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
+			in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
+				virt_to_phys(card->qdio.c_q->bufs[i].buffer);
+		}
+
+		queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
+	}
+}
+
+static int qeth_qdio_establish(struct qeth_card *card)
+{
+	struct qdio_initialize init_data;
+	char *qib_param_field;
+	struct qdio_buffer **in_sbal_ptrs;
+	void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
+	struct qdio_buffer **out_sbal_ptrs;
+	int i, j, k;
+	int rc = 0;
+
+	QETH_DBF_TEXT(SETUP, 2, "qdioest");
+
+	qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q,
+				  GFP_KERNEL);
+	if (!qib_param_field) {
+		rc =  -ENOMEM;
+		goto out_free_nothing;
+	}
+
+	qeth_create_qib_param_field(card, qib_param_field);
+	qeth_create_qib_param_field_blkt(card, qib_param_field);
+
+	in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q,
+			       sizeof(void *),
+			       GFP_KERNEL);
+	if (!in_sbal_ptrs) {
+		rc = -ENOMEM;
+		goto out_free_qib_param;
+	}
+	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
+		in_sbal_ptrs[i] = (struct qdio_buffer *)
+			virt_to_phys(card->qdio.in_q->bufs[i].buffer);
+	}
+
+	queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
+				   GFP_KERNEL);
+	if (!queue_start_poll) {
+		rc = -ENOMEM;
+		goto out_free_in_sbals;
+	}
+	for (i = 0; i < card->qdio.no_in_queues; ++i)
+		queue_start_poll[i] = qeth_qdio_start_poll;
+
+	qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
+
+	out_sbal_ptrs =
+		kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
+			sizeof(void *),
+			GFP_KERNEL);
+	if (!out_sbal_ptrs) {
+		rc = -ENOMEM;
+		goto out_free_queue_start_poll;
+	}
+	for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
+		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
+			out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
+				card->qdio.out_qs[i]->bufs[j]->buffer);
+		}
+
+	memset(&init_data, 0, sizeof(struct qdio_initialize));
+	init_data.cdev                   = CARD_DDEV(card);
+	init_data.q_format               = qeth_get_qdio_q_format(card);
+	init_data.qib_param_field_format = 0;
+	init_data.qib_param_field        = qib_param_field;
+	init_data.no_input_qs            = card->qdio.no_in_queues;
+	init_data.no_output_qs           = card->qdio.no_out_queues;
+	init_data.input_handler		 = qeth_qdio_input_handler;
+	init_data.output_handler	 = qeth_qdio_output_handler;
+	init_data.queue_start_poll_array = queue_start_poll;
+	init_data.int_parm               = (unsigned long) card;
+	init_data.input_sbal_addr_array  = (void **) in_sbal_ptrs;
+	init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
+	init_data.output_sbal_state_array = card->qdio.out_bufstates;
+	init_data.scan_threshold =
+		(card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32;
+
+	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
+		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
+		rc = qdio_allocate(&init_data);
+		if (rc) {
+			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
+			goto out;
+		}
+		rc = qdio_establish(&init_data);
+		if (rc) {
+			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
+			qdio_free(CARD_DDEV(card));
+		}
+	}
+
+	switch (card->options.cq) {
+	case QETH_CQ_ENABLED:
+		dev_info(&card->gdev->dev, "Completion Queue support enabled");
+		break;
+	case QETH_CQ_DISABLED:
+		dev_info(&card->gdev->dev, "Completion Queue support disabled");
+		break;
+	default:
+		break;
+	}
+out:
+	kfree(out_sbal_ptrs);
+out_free_queue_start_poll:
+	kfree(queue_start_poll);
+out_free_in_sbals:
+	kfree(in_sbal_ptrs);
+out_free_qib_param:
+	kfree(qib_param_field);
+out_free_nothing:
+	return rc;
+}
+
+static void qeth_core_free_card(struct qeth_card *card)
+{
+	QETH_DBF_TEXT(SETUP, 2, "freecrd");
+	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+	qeth_clean_channel(&card->read);
+	qeth_clean_channel(&card->write);
+	qeth_clean_channel(&card->data);
+	qeth_free_qdio_buffers(card);
+	unregister_service_level(&card->qeth_service_level);
+	kfree(card);
+}
+
+void qeth_trace_features(struct qeth_card *card)
+{
+	QETH_CARD_TEXT(card, 2, "features");
+	QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
+	QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
+	QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
+	QETH_CARD_HEX(card, 2, &card->info.diagass_support,
+		      sizeof(card->info.diagass_support));
+}
+EXPORT_SYMBOL_GPL(qeth_trace_features);
+
+static struct ccw_device_id qeth_ids[] = {
+	{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
+					.driver_info = QETH_CARD_TYPE_OSD},
+	{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
+					.driver_info = QETH_CARD_TYPE_IQD},
+	{CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
+					.driver_info = QETH_CARD_TYPE_OSN},
+	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
+					.driver_info = QETH_CARD_TYPE_OSM},
+	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
+					.driver_info = QETH_CARD_TYPE_OSX},
+	{},
+};
+MODULE_DEVICE_TABLE(ccw, qeth_ids);
+
+static struct ccw_driver qeth_ccw_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "qeth",
+	},
+	.ids = qeth_ids,
+	.probe = ccwgroup_probe_ccwdev,
+	.remove = ccwgroup_remove_ccwdev,
+};
+
+int qeth_core_hardsetup_card(struct qeth_card *card)
+{
+	int retries = 3;
+	int rc;
+
+	QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
+	atomic_set(&card->force_alloc_skb, 0);
+	qeth_update_from_chp_desc(card);
+retry:
+	if (retries < 3)
+		QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
+			dev_name(&card->gdev->dev));
+	rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
+	ccw_device_set_offline(CARD_DDEV(card));
+	ccw_device_set_offline(CARD_WDEV(card));
+	ccw_device_set_offline(CARD_RDEV(card));
+	qdio_free(CARD_DDEV(card));
+	rc = ccw_device_set_online(CARD_RDEV(card));
+	if (rc)
+		goto retriable;
+	rc = ccw_device_set_online(CARD_WDEV(card));
+	if (rc)
+		goto retriable;
+	rc = ccw_device_set_online(CARD_DDEV(card));
+	if (rc)
+		goto retriable;
+retriable:
+	if (rc == -ERESTARTSYS) {
+		QETH_DBF_TEXT(SETUP, 2, "break1");
+		return rc;
+	} else if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+		if (--retries < 0)
+			goto out;
+		else
+			goto retry;
+	}
+	qeth_determine_capabilities(card);
+	qeth_init_tokens(card);
+	qeth_init_func_level(card);
+	rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
+	if (rc == -ERESTARTSYS) {
+		QETH_DBF_TEXT(SETUP, 2, "break2");
+		return rc;
+	} else if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+		if (--retries < 0)
+			goto out;
+		else
+			goto retry;
+	}
+	rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
+	if (rc == -ERESTARTSYS) {
+		QETH_DBF_TEXT(SETUP, 2, "break3");
+		return rc;
+	} else if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+		if (--retries < 0)
+			goto out;
+		else
+			goto retry;
+	}
+	card->read_or_write_problem = 0;
+	rc = qeth_mpc_initialize(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+		goto out;
+	}
+
+	rc = qeth_send_startlan(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+		if (rc == IPA_RC_LAN_OFFLINE) {
+			dev_warn(&card->gdev->dev,
+				"The LAN is offline\n");
+			card->lan_online = 0;
+		} else {
+			rc = -ENODEV;
+			goto out;
+		}
+	} else
+		card->lan_online = 1;
+
+	card->options.ipa4.supported_funcs = 0;
+	card->options.ipa6.supported_funcs = 0;
+	card->options.adp.supported_funcs = 0;
+	card->options.sbp.supported_funcs = 0;
+	card->info.diagass_support = 0;
+	rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
+	if (rc == -ENOMEM)
+		goto out;
+	if (qeth_is_supported(card, IPA_IPV6)) {
+		rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
+		if (rc == -ENOMEM)
+			goto out;
+	}
+	if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
+		rc = qeth_query_setadapterparms(card);
+		if (rc < 0) {
+			QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+			goto out;
+		}
+	}
+	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
+		rc = qeth_query_setdiagass(card);
+		if (rc < 0) {
+			QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
+			goto out;
+		}
+	}
+	return 0;
+out:
+	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
+		"an error on the device\n");
+	QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n",
+		dev_name(&card->gdev->dev), rc);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
+
+static void qeth_create_skb_frag(struct qdio_buffer_element *element,
+				 struct sk_buff *skb, int offset, int data_len)
+{
+	struct page *page = virt_to_page(element->addr);
+	unsigned int next_frag;
+
+	/* first fill the linear space */
+	if (!skb->len) {
+		unsigned int linear = min(data_len, skb_tailroom(skb));
+
+		skb_put_data(skb, element->addr + offset, linear);
+		data_len -= linear;
+		if (!data_len)
+			return;
+		offset += linear;
+		/* fall through to add page frag for remaining data */
+	}
+
+	next_frag = skb_shinfo(skb)->nr_frags;
+	get_page(page);
+	skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len);
+}
+
+static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
+{
+	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
+}
+
+struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
+		struct qeth_qdio_buffer *qethbuffer,
+		struct qdio_buffer_element **__element, int *__offset,
+		struct qeth_hdr **hdr)
+{
+	struct qdio_buffer_element *element = *__element;
+	struct qdio_buffer *buffer = qethbuffer->buffer;
+	int offset = *__offset;
+	struct sk_buff *skb;
+	int skb_len = 0;
+	void *data_ptr;
+	int data_len;
+	int headroom = 0;
+	int use_rx_sg = 0;
+
+	/* qeth_hdr must not cross element boundaries */
+	while (element->length < offset + sizeof(struct qeth_hdr)) {
+		if (qeth_is_last_sbale(element))
+			return NULL;
+		element++;
+		offset = 0;
+	}
+	*hdr = element->addr + offset;
+
+	offset += sizeof(struct qeth_hdr);
+	switch ((*hdr)->hdr.l2.id) {
+	case QETH_HEADER_TYPE_LAYER2:
+		skb_len = (*hdr)->hdr.l2.pkt_length;
+		break;
+	case QETH_HEADER_TYPE_LAYER3:
+		skb_len = (*hdr)->hdr.l3.length;
+		headroom = ETH_HLEN;
+		break;
+	case QETH_HEADER_TYPE_OSN:
+		skb_len = (*hdr)->hdr.osn.pdu_length;
+		headroom = sizeof(struct qeth_hdr);
+		break;
+	default:
+		break;
+	}
+
+	if (!skb_len)
+		return NULL;
+
+	if (((skb_len >= card->options.rx_sg_cb) &&
+	     (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
+	     (!atomic_read(&card->force_alloc_skb))) ||
+	    (card->options.cq == QETH_CQ_ENABLED))
+		use_rx_sg = 1;
+
+	if (use_rx_sg && qethbuffer->rx_skb) {
+		/* QETH_CQ_ENABLED only: */
+		skb = qethbuffer->rx_skb;
+		qethbuffer->rx_skb = NULL;
+	} else {
+		unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len;
+
+		skb = napi_alloc_skb(&card->napi, linear + headroom);
+	}
+	if (!skb)
+		goto no_mem;
+	if (headroom)
+		skb_reserve(skb, headroom);
+
+	data_ptr = element->addr + offset;
+	while (skb_len) {
+		data_len = min(skb_len, (int)(element->length - offset));
+		if (data_len) {
+			if (use_rx_sg)
+				qeth_create_skb_frag(element, skb, offset,
+						     data_len);
+			else
+				skb_put_data(skb, data_ptr, data_len);
+		}
+		skb_len -= data_len;
+		if (skb_len) {
+			if (qeth_is_last_sbale(element)) {
+				QETH_CARD_TEXT(card, 4, "unexeob");
+				QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
+				dev_kfree_skb_any(skb);
+				card->stats.rx_errors++;
+				return NULL;
+			}
+			element++;
+			offset = 0;
+			data_ptr = element->addr;
+		} else {
+			offset += data_len;
+		}
+	}
+	*__element = element;
+	*__offset = offset;
+	if (use_rx_sg && card->options.performance_stats) {
+		card->perf_stats.sg_skbs_rx++;
+		card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
+	}
+	return skb;
+no_mem:
+	if (net_ratelimit()) {
+		QETH_CARD_TEXT(card, 2, "noskbmem");
+	}
+	card->stats.rx_dropped++;
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
+
+int qeth_poll(struct napi_struct *napi, int budget)
+{
+	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
+	int work_done = 0;
+	struct qeth_qdio_buffer *buffer;
+	int done;
+	int new_budget = budget;
+
+	if (card->options.performance_stats) {
+		card->perf_stats.inbound_cnt++;
+		card->perf_stats.inbound_start_time = qeth_get_micros();
+	}
+
+	while (1) {
+		if (!card->rx.b_count) {
+			card->rx.qdio_err = 0;
+			card->rx.b_count = qdio_get_next_buffers(
+				card->data.ccwdev, 0, &card->rx.b_index,
+				&card->rx.qdio_err);
+			if (card->rx.b_count <= 0) {
+				card->rx.b_count = 0;
+				break;
+			}
+			card->rx.b_element =
+				&card->qdio.in_q->bufs[card->rx.b_index]
+				.buffer->element[0];
+			card->rx.e_offset = 0;
+		}
+
+		while (card->rx.b_count) {
+			buffer = &card->qdio.in_q->bufs[card->rx.b_index];
+			if (!(card->rx.qdio_err &&
+			    qeth_check_qdio_errors(card, buffer->buffer,
+			    card->rx.qdio_err, "qinerr")))
+				work_done +=
+					card->discipline->process_rx_buffer(
+						card, new_budget, &done);
+			else
+				done = 1;
+
+			if (done) {
+				if (card->options.performance_stats)
+					card->perf_stats.bufs_rec++;
+				qeth_put_buffer_pool_entry(card,
+					buffer->pool_entry);
+				qeth_queue_input_buffer(card, card->rx.b_index);
+				card->rx.b_count--;
+				if (card->rx.b_count) {
+					card->rx.b_index =
+						(card->rx.b_index + 1) %
+						QDIO_MAX_BUFFERS_PER_Q;
+					card->rx.b_element =
+						&card->qdio.in_q
+						->bufs[card->rx.b_index]
+						.buffer->element[0];
+					card->rx.e_offset = 0;
+				}
+			}
+
+			if (work_done >= budget)
+				goto out;
+			else
+				new_budget = budget - work_done;
+		}
+	}
+
+	napi_complete_done(napi, work_done);
+	if (qdio_start_irq(card->data.ccwdev, 0))
+		napi_schedule(&card->napi);
+out:
+	if (card->options.performance_stats)
+		card->perf_stats.inbound_time += qeth_get_micros() -
+			card->perf_stats.inbound_start_time;
+	return work_done;
+}
+EXPORT_SYMBOL_GPL(qeth_poll);
+
+static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
+{
+	if (!cmd->hdr.return_code)
+		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+	return cmd->hdr.return_code;
+}
+
+int qeth_setassparms_cb(struct qeth_card *card,
+			struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "defadpcb");
+
+	cmd = (struct qeth_ipa_cmd *) data;
+	if (cmd->hdr.return_code == 0) {
+		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+			card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
+		if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+			card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
+
+struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
+						 enum qeth_ipa_funcs ipa_func,
+						 __u16 cmd_code, __u16 len,
+						 enum qeth_prot_versions prot)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "getasscm");
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
+
+	if (iob) {
+		cmd = __ipa_cmd(iob);
+		cmd->data.setassparms.hdr.assist_no = ipa_func;
+		cmd->data.setassparms.hdr.length = 8 + len;
+		cmd->data.setassparms.hdr.command_code = cmd_code;
+		cmd->data.setassparms.hdr.return_code = 0;
+		cmd->data.setassparms.hdr.seq_no = 0;
+	}
+
+	return iob;
+}
+EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
+
+int qeth_send_setassparms(struct qeth_card *card,
+			  struct qeth_cmd_buffer *iob, __u16 len, long data,
+			  int (*reply_cb)(struct qeth_card *,
+					  struct qeth_reply *, unsigned long),
+			  void *reply_param)
+{
+	int rc;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "sendassp");
+
+	cmd = __ipa_cmd(iob);
+	if (len <= sizeof(__u32))
+		cmd->data.setassparms.data.flags_32bit = (__u32) data;
+	else   /* (len > sizeof(__u32)) */
+		memcpy(&cmd->data.setassparms.data, (void *) data, len);
+
+	rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_send_setassparms);
+
+int qeth_send_simple_setassparms_prot(struct qeth_card *card,
+				      enum qeth_ipa_funcs ipa_func,
+				      u16 cmd_code, long data,
+				      enum qeth_prot_versions prot)
+{
+	int rc;
+	int length = 0;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
+	if (data)
+		length = sizeof(__u32);
+	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
+	if (!iob)
+		return -ENOMEM;
+	rc = qeth_send_setassparms(card, iob, length, data,
+				   qeth_setassparms_cb, NULL);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
+
+static void qeth_unregister_dbf_views(void)
+{
+	int x;
+	for (x = 0; x < QETH_DBF_INFOS; x++) {
+		debug_unregister(qeth_dbf[x].id);
+		qeth_dbf[x].id = NULL;
+	}
+}
+
+void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
+{
+	char dbf_txt_buf[32];
+	va_list args;
+
+	if (!debug_level_enabled(id, level))
+		return;
+	va_start(args, fmt);
+	vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
+	va_end(args);
+	debug_text_event(id, level, dbf_txt_buf);
+}
+EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
+
+static int qeth_register_dbf_views(void)
+{
+	int ret;
+	int x;
+
+	for (x = 0; x < QETH_DBF_INFOS; x++) {
+		/* register the areas */
+		qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
+						qeth_dbf[x].pages,
+						qeth_dbf[x].areas,
+						qeth_dbf[x].len);
+		if (qeth_dbf[x].id == NULL) {
+			qeth_unregister_dbf_views();
+			return -ENOMEM;
+		}
+
+		/* register a view */
+		ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
+		if (ret) {
+			qeth_unregister_dbf_views();
+			return ret;
+		}
+
+		/* set a passing level */
+		debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
+	}
+
+	return 0;
+}
+
+int qeth_core_load_discipline(struct qeth_card *card,
+		enum qeth_discipline_id discipline)
+{
+	int rc = 0;
+
+	mutex_lock(&qeth_mod_mutex);
+	switch (discipline) {
+	case QETH_DISCIPLINE_LAYER3:
+		card->discipline = try_then_request_module(
+			symbol_get(qeth_l3_discipline), "qeth_l3");
+		break;
+	case QETH_DISCIPLINE_LAYER2:
+		card->discipline = try_then_request_module(
+			symbol_get(qeth_l2_discipline), "qeth_l2");
+		break;
+	default:
+		break;
+	}
+
+	if (!card->discipline) {
+		dev_err(&card->gdev->dev, "There is no kernel module to "
+			"support discipline %d\n", discipline);
+		rc = -EINVAL;
+	}
+	mutex_unlock(&qeth_mod_mutex);
+	return rc;
+}
+
+void qeth_core_free_discipline(struct qeth_card *card)
+{
+	if (card->options.layer2)
+		symbol_put(qeth_l2_discipline);
+	else
+		symbol_put(qeth_l3_discipline);
+	card->discipline = NULL;
+}
+
+const struct device_type qeth_generic_devtype = {
+	.name = "qeth_generic",
+	.groups = qeth_generic_attr_groups,
+};
+EXPORT_SYMBOL_GPL(qeth_generic_devtype);
+
+static const struct device_type qeth_osn_devtype = {
+	.name = "qeth_osn",
+	.groups = qeth_osn_attr_groups,
+};
+
+#define DBF_NAME_LEN	20
+
+struct qeth_dbf_entry {
+	char dbf_name[DBF_NAME_LEN];
+	debug_info_t *dbf_info;
+	struct list_head dbf_list;
+};
+
+static LIST_HEAD(qeth_dbf_list);
+static DEFINE_MUTEX(qeth_dbf_list_mutex);
+
+static debug_info_t *qeth_get_dbf_entry(char *name)
+{
+	struct qeth_dbf_entry *entry;
+	debug_info_t *rc = NULL;
+
+	mutex_lock(&qeth_dbf_list_mutex);
+	list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
+		if (strcmp(entry->dbf_name, name) == 0) {
+			rc = entry->dbf_info;
+			break;
+		}
+	}
+	mutex_unlock(&qeth_dbf_list_mutex);
+	return rc;
+}
+
+static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
+{
+	struct qeth_dbf_entry *new_entry;
+
+	card->debug = debug_register(name, 2, 1, 8);
+	if (!card->debug) {
+		QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
+		goto err;
+	}
+	if (debug_register_view(card->debug, &debug_hex_ascii_view))
+		goto err_dbg;
+	new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
+	if (!new_entry)
+		goto err_dbg;
+	strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
+	new_entry->dbf_info = card->debug;
+	mutex_lock(&qeth_dbf_list_mutex);
+	list_add(&new_entry->dbf_list, &qeth_dbf_list);
+	mutex_unlock(&qeth_dbf_list_mutex);
+
+	return 0;
+
+err_dbg:
+	debug_unregister(card->debug);
+err:
+	return -ENOMEM;
+}
+
+static void qeth_clear_dbf_list(void)
+{
+	struct qeth_dbf_entry *entry, *tmp;
+
+	mutex_lock(&qeth_dbf_list_mutex);
+	list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
+		list_del(&entry->dbf_list);
+		debug_unregister(entry->dbf_info);
+		kfree(entry);
+	}
+	mutex_unlock(&qeth_dbf_list_mutex);
+}
+
+static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
+{
+	struct net_device *dev;
+
+	switch (card->info.type) {
+	case QETH_CARD_TYPE_IQD:
+		dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup);
+		break;
+	case QETH_CARD_TYPE_OSN:
+		dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
+		break;
+	default:
+		dev = alloc_etherdev(0);
+	}
+
+	if (!dev)
+		return NULL;
+
+	dev->ml_priv = card;
+	dev->watchdog_timeo = QETH_TX_TIMEOUT;
+	dev->min_mtu = IS_OSN(card) ? 64 : 576;
+	 /* initialized when device first goes online: */
+	dev->max_mtu = 0;
+	dev->mtu = 0;
+	SET_NETDEV_DEV(dev, &card->gdev->dev);
+	netif_carrier_off(dev);
+
+	if (!IS_OSN(card)) {
+		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+		dev->hw_features |= NETIF_F_SG;
+		dev->vlan_features |= NETIF_F_SG;
+		if (IS_IQD(card))
+			dev->features |= NETIF_F_SG;
+	}
+
+	return dev;
+}
+
+struct net_device *qeth_clone_netdev(struct net_device *orig)
+{
+	struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
+
+	if (!clone)
+		return NULL;
+
+	clone->dev_port = orig->dev_port;
+	return clone;
+}
+
+static int qeth_core_probe_device(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card;
+	struct device *dev;
+	int rc;
+	enum qeth_discipline_id enforced_disc;
+	unsigned long flags;
+	char dbf_name[DBF_NAME_LEN];
+
+	QETH_DBF_TEXT(SETUP, 2, "probedev");
+
+	dev = &gdev->dev;
+	if (!get_device(dev))
+		return -ENODEV;
+
+	QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
+
+	card = qeth_alloc_card();
+	if (!card) {
+		QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
+		rc = -ENOMEM;
+		goto err_dev;
+	}
+
+	snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
+		dev_name(&gdev->dev));
+	card->debug = qeth_get_dbf_entry(dbf_name);
+	if (!card->debug) {
+		rc = qeth_add_dbf_entry(card, dbf_name);
+		if (rc)
+			goto err_card;
+	}
+
+	card->read.ccwdev  = gdev->cdev[0];
+	card->write.ccwdev = gdev->cdev[1];
+	card->data.ccwdev  = gdev->cdev[2];
+	dev_set_drvdata(&gdev->dev, card);
+	card->gdev = gdev;
+	gdev->cdev[0]->handler = qeth_irq;
+	gdev->cdev[1]->handler = qeth_irq;
+	gdev->cdev[2]->handler = qeth_irq;
+
+	qeth_setup_card(card);
+	qeth_update_from_chp_desc(card);
+
+	card->dev = qeth_alloc_netdev(card);
+	if (!card->dev) {
+		rc = -ENOMEM;
+		goto err_card;
+	}
+
+	qeth_determine_capabilities(card);
+	enforced_disc = qeth_enforce_discipline(card);
+	switch (enforced_disc) {
+	case QETH_DISCIPLINE_UNDETERMINED:
+		gdev->dev.type = &qeth_generic_devtype;
+		break;
+	default:
+		card->info.layer_enforced = true;
+		rc = qeth_core_load_discipline(card, enforced_disc);
+		if (rc)
+			goto err_load;
+
+		gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
+					? card->discipline->devtype
+					: &qeth_osn_devtype;
+		rc = card->discipline->setup(card->gdev);
+		if (rc)
+			goto err_disc;
+		break;
+	}
+
+	write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
+	list_add_tail(&card->list, &qeth_core_card_list.list);
+	write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+	return 0;
+
+err_disc:
+	qeth_core_free_discipline(card);
+err_load:
+	free_netdev(card->dev);
+err_card:
+	qeth_core_free_card(card);
+err_dev:
+	put_device(dev);
+	return rc;
+}
+
+static void qeth_core_remove_device(struct ccwgroup_device *gdev)
+{
+	unsigned long flags;
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
+	QETH_DBF_TEXT(SETUP, 2, "removedv");
+
+	if (card->discipline) {
+		card->discipline->remove(gdev);
+		qeth_core_free_discipline(card);
+	}
+
+	write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
+	list_del(&card->list);
+	write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+	free_netdev(card->dev);
+	qeth_core_free_card(card);
+	dev_set_drvdata(&gdev->dev, NULL);
+	put_device(&gdev->dev);
+}
+
+static int qeth_core_set_online(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc = 0;
+	enum qeth_discipline_id def_discipline;
+
+	if (!card->discipline) {
+		if (card->info.type == QETH_CARD_TYPE_IQD)
+			def_discipline = QETH_DISCIPLINE_LAYER3;
+		else
+			def_discipline = QETH_DISCIPLINE_LAYER2;
+		rc = qeth_core_load_discipline(card, def_discipline);
+		if (rc)
+			goto err;
+		rc = card->discipline->setup(card->gdev);
+		if (rc) {
+			qeth_core_free_discipline(card);
+			goto err;
+		}
+	}
+	rc = card->discipline->set_online(gdev);
+err:
+	return rc;
+}
+
+static int qeth_core_set_offline(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	return card->discipline->set_offline(gdev);
+}
+
+static void qeth_core_shutdown(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	qeth_set_allowed_threads(card, 0, 1);
+	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
+		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+	qeth_qdio_clear_card(card, 0);
+	qeth_clear_qdio_buffers(card);
+	qdio_free(CARD_DDEV(card));
+}
+
+static int qeth_core_freeze(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	if (card->discipline && card->discipline->freeze)
+		return card->discipline->freeze(gdev);
+	return 0;
+}
+
+static int qeth_core_thaw(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	if (card->discipline && card->discipline->thaw)
+		return card->discipline->thaw(gdev);
+	return 0;
+}
+
+static int qeth_core_restore(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	if (card->discipline && card->discipline->restore)
+		return card->discipline->restore(gdev);
+	return 0;
+}
+
+static ssize_t group_store(struct device_driver *ddrv, const char *buf,
+			   size_t count)
+{
+	int err;
+
+	err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
+				  buf);
+
+	return err ? err : count;
+}
+static DRIVER_ATTR_WO(group);
+
+static struct attribute *qeth_drv_attrs[] = {
+	&driver_attr_group.attr,
+	NULL,
+};
+static struct attribute_group qeth_drv_attr_group = {
+	.attrs = qeth_drv_attrs,
+};
+static const struct attribute_group *qeth_drv_attr_groups[] = {
+	&qeth_drv_attr_group,
+	NULL,
+};
+
+static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
+	.driver = {
+		.groups = qeth_drv_attr_groups,
+		.owner = THIS_MODULE,
+		.name = "qeth",
+	},
+	.ccw_driver = &qeth_ccw_driver,
+	.setup = qeth_core_probe_device,
+	.remove = qeth_core_remove_device,
+	.set_online = qeth_core_set_online,
+	.set_offline = qeth_core_set_offline,
+	.shutdown = qeth_core_shutdown,
+	.prepare = NULL,
+	.complete = NULL,
+	.freeze = qeth_core_freeze,
+	.thaw = qeth_core_thaw,
+	.restore = qeth_core_restore,
+};
+
+int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct qeth_card *card = dev->ml_priv;
+	struct mii_ioctl_data *mii_data;
+	int rc = 0;
+
+	if (!card)
+		return -ENODEV;
+
+	if (!qeth_card_hw_is_reachable(card))
+		return -ENODEV;
+
+	if (card->info.type == QETH_CARD_TYPE_OSN)
+		return -EPERM;
+
+	switch (cmd) {
+	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
+		rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
+		break;
+	case SIOC_QETH_GET_CARD_TYPE:
+		if ((card->info.type == QETH_CARD_TYPE_OSD ||
+		     card->info.type == QETH_CARD_TYPE_OSM ||
+		     card->info.type == QETH_CARD_TYPE_OSX) &&
+		    !card->info.guestlan)
+			return 1;
+		else
+			return 0;
+	case SIOCGMIIPHY:
+		mii_data = if_mii(rq);
+		mii_data->phy_id = 0;
+		break;
+	case SIOCGMIIREG:
+		mii_data = if_mii(rq);
+		if (mii_data->phy_id != 0)
+			rc = -EINVAL;
+		else
+			mii_data->val_out = qeth_mdio_read(dev,
+				mii_data->phy_id, mii_data->reg_num);
+		break;
+	case SIOC_QETH_QUERY_OAT:
+		rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
+		break;
+	default:
+		if (card->discipline->do_ioctl)
+			rc = card->discipline->do_ioctl(dev, rq, cmd);
+		else
+			rc = -EOPNOTSUPP;
+	}
+	if (rc)
+		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_do_ioctl);
+
+static struct {
+	const char str[ETH_GSTRING_LEN];
+} qeth_ethtool_stats_keys[] = {
+/*  0 */{"rx skbs"},
+	{"rx buffers"},
+	{"tx skbs"},
+	{"tx buffers"},
+	{"tx skbs no packing"},
+	{"tx buffers no packing"},
+	{"tx skbs packing"},
+	{"tx buffers packing"},
+	{"tx sg skbs"},
+	{"tx buffer elements"},
+/* 10 */{"rx sg skbs"},
+	{"rx sg frags"},
+	{"rx sg page allocs"},
+	{"tx large kbytes"},
+	{"tx large count"},
+	{"tx pk state ch n->p"},
+	{"tx pk state ch p->n"},
+	{"tx pk watermark low"},
+	{"tx pk watermark high"},
+	{"queue 0 buffer usage"},
+/* 20 */{"queue 1 buffer usage"},
+	{"queue 2 buffer usage"},
+	{"queue 3 buffer usage"},
+	{"rx poll time"},
+	{"rx poll count"},
+	{"rx do_QDIO time"},
+	{"rx do_QDIO count"},
+	{"tx handler time"},
+	{"tx handler count"},
+	{"tx time"},
+/* 30 */{"tx count"},
+	{"tx do_QDIO time"},
+	{"tx do_QDIO count"},
+	{"tx csum"},
+	{"tx lin"},
+	{"tx linfail"},
+	{"cq handler count"},
+	{"cq handler time"},
+	{"rx csum"}
+};
+
+int qeth_core_get_sset_count(struct net_device *dev, int stringset)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
+	default:
+		return -EINVAL;
+	}
+}
+EXPORT_SYMBOL_GPL(qeth_core_get_sset_count);
+
+void qeth_core_get_ethtool_stats(struct net_device *dev,
+		struct ethtool_stats *stats, u64 *data)
+{
+	struct qeth_card *card = dev->ml_priv;
+	data[0] = card->stats.rx_packets -
+				card->perf_stats.initial_rx_packets;
+	data[1] = card->perf_stats.bufs_rec;
+	data[2] = card->stats.tx_packets -
+				card->perf_stats.initial_tx_packets;
+	data[3] = card->perf_stats.bufs_sent;
+	data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets
+			- card->perf_stats.skbs_sent_pack;
+	data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack;
+	data[6] = card->perf_stats.skbs_sent_pack;
+	data[7] = card->perf_stats.bufs_sent_pack;
+	data[8] = card->perf_stats.sg_skbs_sent;
+	data[9] = card->perf_stats.buf_elements_sent;
+	data[10] = card->perf_stats.sg_skbs_rx;
+	data[11] = card->perf_stats.sg_frags_rx;
+	data[12] = card->perf_stats.sg_alloc_page_rx;
+	data[13] = (card->perf_stats.large_send_bytes >> 10);
+	data[14] = card->perf_stats.large_send_cnt;
+	data[15] = card->perf_stats.sc_dp_p;
+	data[16] = card->perf_stats.sc_p_dp;
+	data[17] = QETH_LOW_WATERMARK_PACK;
+	data[18] = QETH_HIGH_WATERMARK_PACK;
+	data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers);
+	data[20] = (card->qdio.no_out_queues > 1) ?
+			atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0;
+	data[21] = (card->qdio.no_out_queues > 2) ?
+			atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0;
+	data[22] = (card->qdio.no_out_queues > 3) ?
+			atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0;
+	data[23] = card->perf_stats.inbound_time;
+	data[24] = card->perf_stats.inbound_cnt;
+	data[25] = card->perf_stats.inbound_do_qdio_time;
+	data[26] = card->perf_stats.inbound_do_qdio_cnt;
+	data[27] = card->perf_stats.outbound_handler_time;
+	data[28] = card->perf_stats.outbound_handler_cnt;
+	data[29] = card->perf_stats.outbound_time;
+	data[30] = card->perf_stats.outbound_cnt;
+	data[31] = card->perf_stats.outbound_do_qdio_time;
+	data[32] = card->perf_stats.outbound_do_qdio_cnt;
+	data[33] = card->perf_stats.tx_csum;
+	data[34] = card->perf_stats.tx_lin;
+	data[35] = card->perf_stats.tx_linfail;
+	data[36] = card->perf_stats.cq_cnt;
+	data[37] = card->perf_stats.cq_time;
+	data[38] = card->perf_stats.rx_csum;
+}
+EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
+
+void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(data, &qeth_ethtool_stats_keys,
+			sizeof(qeth_ethtool_stats_keys));
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(qeth_core_get_strings);
+
+void qeth_core_get_drvinfo(struct net_device *dev,
+		struct ethtool_drvinfo *info)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	strlcpy(info->driver, card->options.layer2 ? "qeth_l2" : "qeth_l3",
+		sizeof(info->driver));
+	strlcpy(info->version, "1.0", sizeof(info->version));
+	strlcpy(info->fw_version, card->info.mcl_level,
+		sizeof(info->fw_version));
+	snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
+		 CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
+}
+EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
+
+/* Helper function to fill 'advertising' and 'supported' which are the same. */
+/* Autoneg and full-duplex are supported and advertised unconditionally.     */
+/* Always advertise and support all speeds up to specified, and only one     */
+/* specified port type.							     */
+static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
+				int maxspeed, int porttype)
+{
+	ethtool_link_ksettings_zero_link_mode(cmd, supported);
+	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+	ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
+
+	ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+	ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+
+	switch (porttype) {
+	case PORT_TP:
+		ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+		ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+		break;
+	case PORT_FIBRE:
+		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+		ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+		break;
+	default:
+		ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+		ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+		WARN_ON_ONCE(1);
+	}
+
+	/* fallthrough from high to low, to select all legal speeds: */
+	switch (maxspeed) {
+	case SPEED_10000:
+		ethtool_link_ksettings_add_link_mode(cmd, supported,
+						     10000baseT_Full);
+		ethtool_link_ksettings_add_link_mode(cmd, advertising,
+						     10000baseT_Full);
+	case SPEED_1000:
+		ethtool_link_ksettings_add_link_mode(cmd, supported,
+						     1000baseT_Full);
+		ethtool_link_ksettings_add_link_mode(cmd, advertising,
+						     1000baseT_Full);
+		ethtool_link_ksettings_add_link_mode(cmd, supported,
+						     1000baseT_Half);
+		ethtool_link_ksettings_add_link_mode(cmd, advertising,
+						     1000baseT_Half);
+	case SPEED_100:
+		ethtool_link_ksettings_add_link_mode(cmd, supported,
+						     100baseT_Full);
+		ethtool_link_ksettings_add_link_mode(cmd, advertising,
+						     100baseT_Full);
+		ethtool_link_ksettings_add_link_mode(cmd, supported,
+						     100baseT_Half);
+		ethtool_link_ksettings_add_link_mode(cmd, advertising,
+						     100baseT_Half);
+	case SPEED_10:
+		ethtool_link_ksettings_add_link_mode(cmd, supported,
+						     10baseT_Full);
+		ethtool_link_ksettings_add_link_mode(cmd, advertising,
+						     10baseT_Full);
+		ethtool_link_ksettings_add_link_mode(cmd, supported,
+						     10baseT_Half);
+		ethtool_link_ksettings_add_link_mode(cmd, advertising,
+						     10baseT_Half);
+		/* end fallthrough */
+		break;
+	default:
+		ethtool_link_ksettings_add_link_mode(cmd, supported,
+						     10baseT_Full);
+		ethtool_link_ksettings_add_link_mode(cmd, advertising,
+						     10baseT_Full);
+		ethtool_link_ksettings_add_link_mode(cmd, supported,
+						     10baseT_Half);
+		ethtool_link_ksettings_add_link_mode(cmd, advertising,
+						     10baseT_Half);
+		WARN_ON_ONCE(1);
+	}
+}
+
+int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
+		struct ethtool_link_ksettings *cmd)
+{
+	struct qeth_card *card = netdev->ml_priv;
+	enum qeth_link_types link_type;
+	struct carrier_info carrier_info;
+	int rc;
+
+	if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
+		link_type = QETH_LINK_TYPE_10GBIT_ETH;
+	else
+		link_type = card->info.link_type;
+
+	cmd->base.duplex = DUPLEX_FULL;
+	cmd->base.autoneg = AUTONEG_ENABLE;
+	cmd->base.phy_address = 0;
+	cmd->base.mdio_support = 0;
+	cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+	cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+
+	switch (link_type) {
+	case QETH_LINK_TYPE_FAST_ETH:
+	case QETH_LINK_TYPE_LANE_ETH100:
+		cmd->base.speed = SPEED_100;
+		cmd->base.port = PORT_TP;
+		break;
+	case QETH_LINK_TYPE_GBIT_ETH:
+	case QETH_LINK_TYPE_LANE_ETH1000:
+		cmd->base.speed = SPEED_1000;
+		cmd->base.port = PORT_FIBRE;
+		break;
+	case QETH_LINK_TYPE_10GBIT_ETH:
+		cmd->base.speed = SPEED_10000;
+		cmd->base.port = PORT_FIBRE;
+		break;
+	default:
+		cmd->base.speed = SPEED_10;
+		cmd->base.port = PORT_TP;
+	}
+	qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port);
+
+	/* Check if we can obtain more accurate information.	 */
+	/* If QUERY_CARD_INFO command is not supported or fails, */
+	/* just return the heuristics that was filled above.	 */
+	if (!qeth_card_hw_is_reachable(card))
+		return -ENODEV;
+	rc = qeth_query_card_info(card, &carrier_info);
+	if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
+		return 0;
+	if (rc) /* report error from the hardware operation */
+		return rc;
+	/* on success, fill in the information got from the hardware */
+
+	netdev_dbg(netdev,
+	"card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
+			carrier_info.card_type,
+			carrier_info.port_mode,
+			carrier_info.port_speed);
+
+	/* Update attributes for which we've obtained more authoritative */
+	/* information, leave the rest the way they where filled above.  */
+	switch (carrier_info.card_type) {
+	case CARD_INFO_TYPE_1G_COPPER_A:
+	case CARD_INFO_TYPE_1G_COPPER_B:
+		cmd->base.port = PORT_TP;
+		qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
+		break;
+	case CARD_INFO_TYPE_1G_FIBRE_A:
+	case CARD_INFO_TYPE_1G_FIBRE_B:
+		cmd->base.port = PORT_FIBRE;
+		qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
+		break;
+	case CARD_INFO_TYPE_10G_FIBRE_A:
+	case CARD_INFO_TYPE_10G_FIBRE_B:
+		cmd->base.port = PORT_FIBRE;
+		qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port);
+		break;
+	}
+
+	switch (carrier_info.port_mode) {
+	case CARD_INFO_PORTM_FULLDUPLEX:
+		cmd->base.duplex = DUPLEX_FULL;
+		break;
+	case CARD_INFO_PORTM_HALFDUPLEX:
+		cmd->base.duplex = DUPLEX_HALF;
+		break;
+	}
+
+	switch (carrier_info.port_speed) {
+	case CARD_INFO_PORTS_10M:
+		cmd->base.speed = SPEED_10;
+		break;
+	case CARD_INFO_PORTS_100M:
+		cmd->base.speed = SPEED_100;
+		break;
+	case CARD_INFO_PORTS_1G:
+		cmd->base.speed = SPEED_1000;
+		break;
+	case CARD_INFO_PORTS_10G:
+		cmd->base.speed = SPEED_10000;
+		break;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_link_ksettings);
+
+/* Callback to handle checksum offload command reply from OSA card.
+ * Verify that required features have been enabled on the card.
+ * Return error in hdr->return_code as this value is checked by caller.
+ *
+ * Always returns zero to indicate no further messages from the OSA card.
+ */
+static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
+					struct qeth_reply *reply,
+					unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+	struct qeth_checksum_cmd *chksum_cb =
+				(struct qeth_checksum_cmd *)reply->param;
+
+	QETH_CARD_TEXT(card, 4, "chkdoccb");
+	if (qeth_setassparms_inspect_rc(cmd))
+		return 0;
+
+	memset(chksum_cb, 0, sizeof(*chksum_cb));
+	if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
+		chksum_cb->supported =
+				cmd->data.setassparms.data.chksum.supported;
+		QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported);
+	}
+	if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) {
+		chksum_cb->supported =
+				cmd->data.setassparms.data.chksum.supported;
+		chksum_cb->enabled =
+				cmd->data.setassparms.data.chksum.enabled;
+		QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported);
+		QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled);
+	}
+	return 0;
+}
+
+/* Send command to OSA card and check results. */
+static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
+				     enum qeth_ipa_funcs ipa_func,
+				     __u16 cmd_code, long data,
+				     struct qeth_checksum_cmd *chksum_cb,
+				     enum qeth_prot_versions prot)
+{
+	struct qeth_cmd_buffer *iob;
+	int rc = -ENOMEM;
+
+	QETH_CARD_TEXT(card, 4, "chkdocmd");
+	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
+				       sizeof(__u32), prot);
+	if (iob)
+		rc = qeth_send_setassparms(card, iob, sizeof(__u32), data,
+					   qeth_ipa_checksum_run_cmd_cb,
+					   chksum_cb);
+	return rc;
+}
+
+static int qeth_send_checksum_on(struct qeth_card *card, int cstype,
+				 enum qeth_prot_versions prot)
+{
+	u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
+	struct qeth_checksum_cmd chksum_cb;
+	int rc;
+
+	if (prot == QETH_PROT_IPV4)
+		required_features |= QETH_IPA_CHECKSUM_IP_HDR;
+	rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0,
+				       &chksum_cb, prot);
+	if (!rc) {
+		if ((required_features & chksum_cb.supported) !=
+		    required_features)
+			rc = -EIO;
+		else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) &&
+			 cstype == IPA_INBOUND_CHECKSUM)
+			dev_warn(&card->gdev->dev,
+				 "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
+				 QETH_CARD_IFNAME(card));
+	}
+	if (rc) {
+		qeth_send_simple_setassparms_prot(card, cstype,
+						  IPA_CMD_ASS_STOP, 0, prot);
+		dev_warn(&card->gdev->dev,
+			 "Starting HW IPv%d checksumming for %s failed, using SW checksumming\n",
+			 prot, QETH_CARD_IFNAME(card));
+		return rc;
+	}
+	rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
+				       chksum_cb.supported, &chksum_cb,
+				       prot);
+	if (!rc) {
+		if ((required_features & chksum_cb.enabled) !=
+		    required_features)
+			rc = -EIO;
+	}
+	if (rc) {
+		qeth_send_simple_setassparms_prot(card, cstype,
+						  IPA_CMD_ASS_STOP, 0, prot);
+		dev_warn(&card->gdev->dev,
+			 "Enabling HW IPv%d checksumming for %s failed, using SW checksumming\n",
+			 prot, QETH_CARD_IFNAME(card));
+		return rc;
+	}
+
+	dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
+		 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
+	return 0;
+}
+
+static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
+			     enum qeth_prot_versions prot)
+{
+	int rc = (on) ? qeth_send_checksum_on(card, cstype, prot)
+		      : qeth_send_simple_setassparms_prot(card, cstype,
+							  IPA_CMD_ASS_STOP, 0,
+							  prot);
+	return rc ? -EIO : 0;
+}
+
+static int qeth_set_ipa_tso(struct qeth_card *card, int on)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "sttso");
+
+	if (on) {
+		rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
+						  IPA_CMD_ASS_START, 0);
+		if (rc) {
+			dev_warn(&card->gdev->dev,
+				 "Starting outbound TCP segmentation offload for %s failed\n",
+				 QETH_CARD_IFNAME(card));
+			return -EIO;
+		}
+		dev_info(&card->gdev->dev, "Outbound TSO enabled\n");
+	} else {
+		rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
+						  IPA_CMD_ASS_STOP, 0);
+	}
+	return rc;
+}
+
+static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
+{
+	int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
+	int rc_ipv6;
+
+	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
+		rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
+					    QETH_PROT_IPV4);
+	if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
+		/* no/one Offload Assist available, so the rc is trivial */
+		return rc_ipv4;
+
+	rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
+				    QETH_PROT_IPV6);
+
+	if (on)
+		/* enable: success if any Assist is active */
+		return (rc_ipv6) ? rc_ipv4 : 0;
+
+	/* disable: failure if any Assist is still active */
+	return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
+}
+
+#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
+			  NETIF_F_IPV6_CSUM)
+/**
+ * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
+ * @dev:	a net_device
+ */
+void qeth_enable_hw_features(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+	netdev_features_t features;
+
+	rtnl_lock();
+	features = dev->features;
+	/* force-off any feature that needs an IPA sequence.
+	 * netdev_update_features() will restart them.
+	 */
+	dev->features &= ~QETH_HW_FEATURES;
+	netdev_update_features(dev);
+	if (features != dev->features)
+		dev_warn(&card->gdev->dev,
+			 "Device recovery failed to restore all offload features\n");
+	rtnl_unlock();
+}
+EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
+
+int qeth_set_features(struct net_device *dev, netdev_features_t features)
+{
+	struct qeth_card *card = dev->ml_priv;
+	netdev_features_t changed = dev->features ^ features;
+	int rc = 0;
+
+	QETH_DBF_TEXT(SETUP, 2, "setfeat");
+	QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
+
+	if ((changed & NETIF_F_IP_CSUM)) {
+		rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
+				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
+		if (rc)
+			changed ^= NETIF_F_IP_CSUM;
+	}
+	if (changed & NETIF_F_IPV6_CSUM) {
+		rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
+				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
+		if (rc)
+			changed ^= NETIF_F_IPV6_CSUM;
+	}
+	if (changed & NETIF_F_RXCSUM) {
+		rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
+		if (rc)
+			changed ^= NETIF_F_RXCSUM;
+	}
+	if ((changed & NETIF_F_TSO)) {
+		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO ? 1 : 0);
+		if (rc)
+			changed ^= NETIF_F_TSO;
+	}
+
+	/* everything changed successfully? */
+	if ((dev->features ^ features) == changed)
+		return 0;
+	/* something went wrong. save changed features and return error */
+	dev->features ^= changed;
+	return -EIO;
+}
+EXPORT_SYMBOL_GPL(qeth_set_features);
+
+netdev_features_t qeth_fix_features(struct net_device *dev,
+				    netdev_features_t features)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_DBF_TEXT(SETUP, 2, "fixfeat");
+	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
+		features &= ~NETIF_F_IP_CSUM;
+	if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
+		features &= ~NETIF_F_IPV6_CSUM;
+	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
+	    !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
+		features &= ~NETIF_F_RXCSUM;
+	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
+		features &= ~NETIF_F_TSO;
+	/* if the card isn't up, remove features that require hw changes */
+	if (card->state == CARD_STATE_DOWN ||
+	    card->state == CARD_STATE_RECOVER)
+		features &= ~QETH_HW_FEATURES;
+	QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
+	return features;
+}
+EXPORT_SYMBOL_GPL(qeth_fix_features);
+
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+				      struct net_device *dev,
+				      netdev_features_t features)
+{
+	/* GSO segmentation builds skbs with
+	 *	a (small) linear part for the headers, and
+	 *	page frags for the data.
+	 * Compared to a linear skb, the header-only part consumes an
+	 * additional buffer element. This reduces buffer utilization, and
+	 * hurts throughput. So compress small segments into one element.
+	 */
+	if (netif_needs_gso(skb, features)) {
+		/* match skb_segment(): */
+		unsigned int doffset = skb->data - skb_mac_header(skb);
+		unsigned int hsize = skb_shinfo(skb)->gso_size;
+		unsigned int hroom = skb_headroom(skb);
+
+		/* linearize only if resulting skb allocations are order-0: */
+		if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
+			features &= ~NETIF_F_SG;
+	}
+
+	return vlan_features_check(skb, features);
+}
+EXPORT_SYMBOL_GPL(qeth_features_check);
+
+static int __init qeth_core_init(void)
+{
+	int rc;
+
+	pr_info("loading core functions\n");
+	INIT_LIST_HEAD(&qeth_core_card_list.list);
+	INIT_LIST_HEAD(&qeth_dbf_list);
+	rwlock_init(&qeth_core_card_list.rwlock);
+	mutex_init(&qeth_mod_mutex);
+
+	qeth_wq = create_singlethread_workqueue("qeth_wq");
+	if (!qeth_wq) {
+		rc = -ENOMEM;
+		goto out_err;
+	}
+
+	rc = qeth_register_dbf_views();
+	if (rc)
+		goto dbf_err;
+	qeth_core_root_dev = root_device_register("qeth");
+	rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
+	if (rc)
+		goto register_err;
+	qeth_core_header_cache = kmem_cache_create("qeth_hdr",
+			sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL);
+	if (!qeth_core_header_cache) {
+		rc = -ENOMEM;
+		goto slab_err;
+	}
+	qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
+			sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
+	if (!qeth_qdio_outbuf_cache) {
+		rc = -ENOMEM;
+		goto cqslab_err;
+	}
+	rc = ccw_driver_register(&qeth_ccw_driver);
+	if (rc)
+		goto ccw_err;
+	rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
+	if (rc)
+		goto ccwgroup_err;
+
+	return 0;
+
+ccwgroup_err:
+	ccw_driver_unregister(&qeth_ccw_driver);
+ccw_err:
+	kmem_cache_destroy(qeth_qdio_outbuf_cache);
+cqslab_err:
+	kmem_cache_destroy(qeth_core_header_cache);
+slab_err:
+	root_device_unregister(qeth_core_root_dev);
+register_err:
+	qeth_unregister_dbf_views();
+dbf_err:
+	destroy_workqueue(qeth_wq);
+out_err:
+	pr_err("Initializing the qeth device driver failed\n");
+	return rc;
+}
+
+static void __exit qeth_core_exit(void)
+{
+	qeth_clear_dbf_list();
+	destroy_workqueue(qeth_wq);
+	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
+	ccw_driver_unregister(&qeth_ccw_driver);
+	kmem_cache_destroy(qeth_qdio_outbuf_cache);
+	kmem_cache_destroy(qeth_core_header_cache);
+	root_device_unregister(qeth_core_root_dev);
+	qeth_unregister_dbf_views();
+	pr_info("core functions removed\n");
+}
+
+module_init(qeth_core_init);
+module_exit(qeth_core_exit);
+MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
+MODULE_DESCRIPTION("qeth core functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
new file mode 100644
index 0000000..e891c0b
--- /dev/null
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <asm/cio.h>
+#include "qeth_core_mpc.h"
+
+unsigned char IDX_ACTIVATE_READ[] = {
+	0x00, 0x00, 0x80, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x19, 0x01, 0x01, 0x80,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0xc8, 0xc1,
+	0xd3, 0xd3, 0xd6, 0xd3,  0xc5, 0x40, 0x00, 0x00,
+	0x00, 0x00
+};
+
+unsigned char IDX_ACTIVATE_WRITE[] = {
+	0x00, 0x00, 0x80, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x15, 0x01, 0x01, 0x80,  0x00, 0x00, 0x00, 0x00,
+	0xff, 0xff, 0x00, 0x00,  0x00, 0x00, 0xc8, 0xc1,
+	0xd3, 0xd3, 0xd6, 0xd3,  0xc5, 0x40, 0x00, 0x00,
+	0x00, 0x00
+};
+
+unsigned char CM_ENABLE[] = {
+	0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x01,
+	0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x63,
+	0x10, 0x00, 0x00, 0x01,
+	0x00, 0x00, 0x00, 0x00,
+	0x81, 0x7e, 0x00, 0x01,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x24, 0x00, 0x23,
+	0x00, 0x00, 0x23, 0x05,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x01, 0x00, 0x00, 0x23,  0x00, 0x00, 0x00, 0x40,
+	0x00, 0x0c, 0x41, 0x02,  0x00, 0x17, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x0b, 0x04, 0x01,
+	0x7e, 0x04, 0x05, 0x00,  0x01, 0x01, 0x0f,
+	0x00,
+	0x0c, 0x04, 0x02, 0xff,  0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff
+};
+
+unsigned char CM_SETUP[] = {
+	0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x02,
+	0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x64,
+	0x10, 0x00, 0x00, 0x01,
+	0x00, 0x00, 0x00, 0x00,
+	0x81, 0x7e, 0x00, 0x01,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x24, 0x00, 0x24,
+	0x00, 0x00, 0x24, 0x05,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x01, 0x00, 0x00, 0x24,  0x00, 0x00, 0x00, 0x40,
+	0x00, 0x0c, 0x41, 0x04,  0x00, 0x18, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x09, 0x04, 0x04,
+	0x05, 0x00, 0x01, 0x01,  0x11,
+	0x00, 0x09, 0x04,
+	0x05, 0x05, 0x00, 0x00,  0x00, 0x00,
+	0x00, 0x06,
+	0x04, 0x06, 0xc8, 0x00
+};
+
+unsigned char ULP_ENABLE[] = {
+	0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x03,
+	0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x6b,
+	0x10, 0x00, 0x00, 0x01,
+	0x00, 0x00, 0x00, 0x00,
+	0x41, 0x7e, 0x00, 0x01,  0x00, 0x00, 0x00, 0x01,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x24, 0x00, 0x2b,
+	0x00, 0x00, 0x2b, 0x05,  0x20, 0x01, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x01, 0x00, 0x00, 0x2b,  0x00, 0x00, 0x00, 0x40,
+	0x00, 0x0c, 0x41, 0x02,  0x00, 0x1f, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x0b, 0x04, 0x01,
+	0x03, 0x04, 0x05, 0x00,  0x01, 0x01, 0x12,
+	0x00,
+	0x14, 0x04, 0x0a, 0x00,  0x20, 0x00, 0x00, 0xff,
+	0xff, 0x00, 0x08, 0xc8,  0xe8, 0xc4, 0xf1, 0xc7,
+	0xf1, 0x00, 0x00
+};
+
+unsigned char ULP_SETUP[] = {
+	0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x04,
+	0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x6c,
+	0x10, 0x00, 0x00, 0x01,
+	0x00, 0x00, 0x00, 0x00,
+	0x41, 0x7e, 0x00, 0x01,  0x00, 0x00, 0x00, 0x02,
+	0x00, 0x00, 0x00, 0x01,  0x00, 0x24, 0x00, 0x2c,
+	0x00, 0x00, 0x2c, 0x05,  0x20, 0x01, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x01, 0x00, 0x00, 0x2c,  0x00, 0x00, 0x00, 0x40,
+	0x00, 0x0c, 0x41, 0x04,  0x00, 0x20, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x09, 0x04, 0x04,
+	0x05, 0x00, 0x01, 0x01,  0x14,
+	0x00, 0x09, 0x04,
+	0x05, 0x05, 0x30, 0x01,  0x00, 0x00,
+	0x00, 0x06,
+	0x04, 0x06, 0x40, 0x00,
+	0x00, 0x08, 0x04, 0x0b,
+	0x00, 0x00, 0x00, 0x00
+};
+
+unsigned char DM_ACT[] = {
+	0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x05,
+	0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x55,
+	0x10, 0x00, 0x00, 0x01,
+	0x00, 0x00, 0x00, 0x00,
+	0x41, 0x7e, 0x00, 0x01,  0x00, 0x00, 0x00, 0x03,
+	0x00, 0x00, 0x00, 0x02,  0x00, 0x24, 0x00, 0x15,
+	0x00, 0x00, 0x2c, 0x05,  0x20, 0x01, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x01, 0x00, 0x00, 0x15,  0x00, 0x00, 0x00, 0x40,
+	0x00, 0x0c, 0x43, 0x60,  0x00, 0x09, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x09, 0x04, 0x04,
+	0x05, 0x40, 0x01, 0x01,  0x00
+};
+
+unsigned char IPA_PDU_HEADER[] = {
+	0x00, 0xe0, 0x00, 0x00,  0x77, 0x77, 0x77, 0x77,
+	0x00, 0x00, 0x00, 0x14,  0x00, 0x00,
+		(IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) / 256,
+		(IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) % 256,
+	0x10, 0x00, 0x00, 0x01,  0x00, 0x00, 0x00, 0x00,
+	0xc1, 0x03, 0x00, 0x01,  0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x24,
+		sizeof(struct qeth_ipa_cmd) / 256,
+		sizeof(struct qeth_ipa_cmd) % 256,
+	0x00,
+		sizeof(struct qeth_ipa_cmd) / 256,
+		sizeof(struct qeth_ipa_cmd) % 256,
+	0x05,
+	0x77, 0x77, 0x77, 0x77,
+	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0x00, 0x00,
+	0x01, 0x00,
+		sizeof(struct qeth_ipa_cmd) / 256,
+		sizeof(struct qeth_ipa_cmd) % 256,
+	0x00, 0x00, 0x00, 0x40,
+};
+EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
+
+struct ipa_rc_msg {
+	enum qeth_ipa_return_codes rc;
+	const char *msg;
+};
+
+static const struct ipa_rc_msg qeth_ipa_rc_msg[] = {
+	{IPA_RC_SUCCESS,		"success"},
+	{IPA_RC_NOTSUPP,		"Command not supported"},
+	{IPA_RC_IP_TABLE_FULL,		"Add Addr IP Table Full - ipv6"},
+	{IPA_RC_UNKNOWN_ERROR,		"IPA command failed - reason unknown"},
+	{IPA_RC_UNSUPPORTED_COMMAND,	"Command not supported"},
+	{IPA_RC_VNICC_OOSEQ,		"Command issued out of sequence"},
+	{IPA_RC_INVALID_FORMAT,		"invalid format or length"},
+	{IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"},
+	{IPA_RC_SBP_IQD_NOT_CONFIGURED,	"Not configured for bridgeport"},
+	{IPA_RC_DUP_IPV6_HOME,		"ipv6 address already registered"},
+	{IPA_RC_UNREGISTERED_ADDR,	"Address not registered"},
+	{IPA_RC_NO_ID_AVAILABLE,	"No identifiers available"},
+	{IPA_RC_ID_NOT_FOUND,		"Identifier not found"},
+	{IPA_RC_SBP_IQD_ANO_DEV_PRIMARY, "Primary bridgeport exists already"},
+	{IPA_RC_SBP_IQD_CURRENT_SECOND,	"Bridgeport is currently secondary"},
+	{IPA_RC_SBP_IQD_LIMIT_SECOND, "Limit of secondary bridgeports reached"},
+	{IPA_RC_INVALID_IP_VERSION,	"IP version incorrect"},
+	{IPA_RC_SBP_IQD_CURRENT_PRIMARY, "Bridgeport is currently primary"},
+	{IPA_RC_LAN_FRAME_MISMATCH,	"LAN and frame mismatch"},
+	{IPA_RC_SBP_IQD_NO_QDIO_QUEUES,	"QDIO queues not established"},
+	{IPA_RC_L2_UNSUPPORTED_CMD,	"Unsupported layer 2 command"},
+	{IPA_RC_L2_DUP_MAC,		"Duplicate MAC address"},
+	{IPA_RC_L2_ADDR_TABLE_FULL,	"Layer2 address table full"},
+	{IPA_RC_L2_DUP_LAYER3_MAC,	"Duplicate with layer 3 MAC"},
+	{IPA_RC_L2_GMAC_NOT_FOUND,	"GMAC not found"},
+	{IPA_RC_L2_MAC_NOT_AUTH_BY_HYP,	"L2 mac not authorized by hypervisor"},
+	{IPA_RC_L2_MAC_NOT_AUTH_BY_ADP,	"L2 mac not authorized by adapter"},
+	{IPA_RC_L2_MAC_NOT_FOUND,	"L2 mac address not found"},
+	{IPA_RC_L2_INVALID_VLAN_ID,	"L2 invalid vlan id"},
+	{IPA_RC_L2_DUP_VLAN_ID,		"L2 duplicate vlan id"},
+	{IPA_RC_L2_VLAN_ID_NOT_FOUND,	"L2 vlan id not found"},
+	{IPA_RC_VNICC_VNICBP,		"VNIC is BridgePort"},
+	{IPA_RC_SBP_OSA_NOT_CONFIGURED,	"Not configured for bridgeport"},
+	{IPA_RC_SBP_OSA_OS_MISMATCH,	"OS mismatch"},
+	{IPA_RC_SBP_OSA_ANO_DEV_PRIMARY, "Primary bridgeport exists already"},
+	{IPA_RC_SBP_OSA_CURRENT_SECOND,	"Bridgeport is currently secondary"},
+	{IPA_RC_SBP_OSA_LIMIT_SECOND, "Limit of secondary bridgeports reached"},
+	{IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN, "Not authorized by zManager"},
+	{IPA_RC_SBP_OSA_CURRENT_PRIMARY, "Bridgeport is currently primary"},
+	{IPA_RC_SBP_OSA_NO_QDIO_QUEUES,	"QDIO queues not established"},
+	{IPA_RC_DATA_MISMATCH,		"Data field mismatch (v4/v6 mixed)"},
+	{IPA_RC_INVALID_MTU_SIZE,	"Invalid MTU size"},
+	{IPA_RC_INVALID_LANTYPE,	"Invalid LAN type"},
+	{IPA_RC_INVALID_LANNUM,		"Invalid LAN num"},
+	{IPA_RC_DUPLICATE_IP_ADDRESS,	"Address already registered"},
+	{IPA_RC_IP_ADDR_TABLE_FULL,	"IP address table full"},
+	{IPA_RC_LAN_PORT_STATE_ERROR,	"LAN port state error"},
+	{IPA_RC_SETIP_NO_STARTLAN,	"Setip no startlan received"},
+	{IPA_RC_SETIP_ALREADY_RECEIVED,	"Setip already received"},
+	{IPA_RC_IP_ADDR_ALREADY_USED,	"IP address already in use on LAN"},
+	{IPA_RC_MC_ADDR_NOT_FOUND,	"Multicast address not found"},
+	{IPA_RC_SETIP_INVALID_VERSION,	"SETIP invalid IP version"},
+	{IPA_RC_UNSUPPORTED_SUBCMD,	"Unsupported assist subcommand"},
+	{IPA_RC_ARP_ASSIST_NO_ENABLE,	"Only partial success, no enable"},
+	{IPA_RC_PRIMARY_ALREADY_DEFINED, "Primary already defined"},
+	{IPA_RC_SECOND_ALREADY_DEFINED,	"Secondary already defined"},
+	{IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"},
+	{IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"},
+	{IPA_RC_LAN_OFFLINE,		"STRTLAN_LAN_DISABLED - LAN offline"},
+	{IPA_RC_VEPA_TO_VEB_TRANSITION,	"Adj. switch disabled port mode RR"},
+	{IPA_RC_INVALID_IP_VERSION2,	"Invalid IP version"},
+	{IPA_RC_ENOMEM,			"Memory problem"},
+	{IPA_RC_FFFF,			"Unknown Error"}
+};
+
+
+
+const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
+{
+	int x;
+
+	for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_msg) - 1; x++)
+		if (qeth_ipa_rc_msg[x].rc == rc)
+			return qeth_ipa_rc_msg[x].msg;
+	return qeth_ipa_rc_msg[x].msg;
+}
+
+
+struct ipa_cmd_names {
+	enum qeth_ipa_cmds cmd;
+	const char *name;
+};
+
+static const struct ipa_cmd_names qeth_ipa_cmd_names[] = {
+	{IPA_CMD_STARTLAN,	"startlan"},
+	{IPA_CMD_STOPLAN,	"stoplan"},
+	{IPA_CMD_SETVMAC,	"setvmac"},
+	{IPA_CMD_DELVMAC,	"delvmac"},
+	{IPA_CMD_SETGMAC,	"setgmac"},
+	{IPA_CMD_DELGMAC,	"delgmac"},
+	{IPA_CMD_SETVLAN,	"setvlan"},
+	{IPA_CMD_DELVLAN,	"delvlan"},
+	{IPA_CMD_VNICC,		"vnic_characteristics"},
+	{IPA_CMD_SETBRIDGEPORT_OSA,	"set_bridge_port(osa)"},
+	{IPA_CMD_SETCCID,	"setccid"},
+	{IPA_CMD_DELCCID,	"delccid"},
+	{IPA_CMD_MODCCID,	"modccid"},
+	{IPA_CMD_SETIP,		"setip"},
+	{IPA_CMD_QIPASSIST,	"qipassist"},
+	{IPA_CMD_SETASSPARMS,	"setassparms"},
+	{IPA_CMD_SETIPM,	"setipm"},
+	{IPA_CMD_DELIPM,	"delipm"},
+	{IPA_CMD_SETRTG,	"setrtg"},
+	{IPA_CMD_DELIP,		"delip"},
+	{IPA_CMD_SETADAPTERPARMS, "setadapterparms"},
+	{IPA_CMD_SET_DIAG_ASS,	"set_diag_ass"},
+	{IPA_CMD_SETBRIDGEPORT_IQD,	"set_bridge_port(hs)"},
+	{IPA_CMD_CREATE_ADDR,	"create_addr"},
+	{IPA_CMD_DESTROY_ADDR,	"destroy_addr"},
+	{IPA_CMD_REGISTER_LOCAL_ADDR,	"register_local_addr"},
+	{IPA_CMD_UNREGISTER_LOCAL_ADDR,	"unregister_local_addr"},
+	{IPA_CMD_ADDRESS_CHANGE_NOTIF, "address_change_notification"},
+	{IPA_CMD_UNKNOWN,	"unknown"},
+};
+
+const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
+{
+	int x;
+
+	for (x = 0; x < ARRAY_SIZE(qeth_ipa_cmd_names) - 1; x++)
+		if (qeth_ipa_cmd_names[x].cmd == cmd)
+			return qeth_ipa_cmd_names[x].name;
+	return qeth_ipa_cmd_names[x].name;
+}
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
new file mode 100644
index 0000000..aa5de1f
--- /dev/null
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -0,0 +1,910 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#ifndef __QETH_CORE_MPC_H__
+#define __QETH_CORE_MPC_H__
+
+#include <asm/qeth.h>
+#include <uapi/linux/if_ether.h>
+
+#define IPA_PDU_HEADER_SIZE	0x40
+#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
+#define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer + 0x26)
+#define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer + 0x29)
+#define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer + 0x3a)
+
+extern unsigned char IPA_PDU_HEADER[];
+#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c)
+
+#define IPA_CMD_LENGTH	(IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd))
+
+#define QETH_SEQ_NO_LENGTH	4
+#define QETH_MPC_TOKEN_LENGTH	4
+#define QETH_MCL_LENGTH		4
+
+#define QETH_TIMEOUT		(10 * HZ)
+#define QETH_IPA_TIMEOUT	(45 * HZ)
+#define QETH_IDX_COMMAND_SEQNO	0xffff0000
+
+#define QETH_CLEAR_CHANNEL_PARM	-10
+#define QETH_HALT_CHANNEL_PARM	-11
+#define QETH_RCD_PARM -12
+
+static inline bool qeth_intparm_is_iob(unsigned long intparm)
+{
+	switch (intparm) {
+	case QETH_CLEAR_CHANNEL_PARM:
+	case QETH_HALT_CHANNEL_PARM:
+	case QETH_RCD_PARM:
+	case 0:
+		return false;
+	}
+	return true;
+}
+
+/*****************************************************************************/
+/* IP Assist related definitions                                             */
+/*****************************************************************************/
+#define IPA_CMD_INITIATOR_HOST  0x00
+#define IPA_CMD_INITIATOR_OSA   0x01
+#define IPA_CMD_INITIATOR_HOST_REPLY  0x80
+#define IPA_CMD_INITIATOR_OSA_REPLY   0x81
+#define IPA_CMD_PRIM_VERSION_NO 0x01
+
+enum qeth_card_types {
+	QETH_CARD_TYPE_OSD     = 1,
+	QETH_CARD_TYPE_IQD     = 5,
+	QETH_CARD_TYPE_OSN     = 6,
+	QETH_CARD_TYPE_OSM     = 3,
+	QETH_CARD_TYPE_OSX     = 2,
+};
+
+#define IS_IQD(card)	((card)->info.type == QETH_CARD_TYPE_IQD)
+#define IS_OSN(card)	((card)->info.type == QETH_CARD_TYPE_OSN)
+
+#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
+/* only the first two bytes are looked at in qeth_get_cardname_short */
+enum qeth_link_types {
+	QETH_LINK_TYPE_FAST_ETH     = 0x01,
+	QETH_LINK_TYPE_HSTR         = 0x02,
+	QETH_LINK_TYPE_GBIT_ETH     = 0x03,
+	QETH_LINK_TYPE_OSN          = 0x04,
+	QETH_LINK_TYPE_10GBIT_ETH   = 0x10,
+	QETH_LINK_TYPE_LANE_ETH100  = 0x81,
+	QETH_LINK_TYPE_LANE_TR      = 0x82,
+	QETH_LINK_TYPE_LANE_ETH1000 = 0x83,
+	QETH_LINK_TYPE_LANE         = 0x88,
+};
+
+/*
+ * Routing stuff
+ */
+#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */
+enum qeth_routing_types {
+	/* TODO: set to bit flag used in IPA Command */
+	NO_ROUTER		= 0,
+	PRIMARY_ROUTER		= 1,
+	SECONDARY_ROUTER	= 2,
+	MULTICAST_ROUTER	= 3,
+	PRIMARY_CONNECTOR	= 4,
+	SECONDARY_CONNECTOR	= 5,
+};
+
+/* IPA Commands */
+enum qeth_ipa_cmds {
+	IPA_CMD_STARTLAN		= 0x01,
+	IPA_CMD_STOPLAN			= 0x02,
+	IPA_CMD_SETVMAC			= 0x21,
+	IPA_CMD_DELVMAC			= 0x22,
+	IPA_CMD_SETGMAC			= 0x23,
+	IPA_CMD_DELGMAC			= 0x24,
+	IPA_CMD_SETVLAN			= 0x25,
+	IPA_CMD_DELVLAN			= 0x26,
+	IPA_CMD_VNICC			= 0x2a,
+	IPA_CMD_SETBRIDGEPORT_OSA	= 0x2b,
+	IPA_CMD_SETCCID			= 0x41,
+	IPA_CMD_DELCCID			= 0x42,
+	IPA_CMD_MODCCID			= 0x43,
+	IPA_CMD_SETIP			= 0xb1,
+	IPA_CMD_QIPASSIST		= 0xb2,
+	IPA_CMD_SETASSPARMS		= 0xb3,
+	IPA_CMD_SETIPM			= 0xb4,
+	IPA_CMD_DELIPM			= 0xb5,
+	IPA_CMD_SETRTG			= 0xb6,
+	IPA_CMD_DELIP			= 0xb7,
+	IPA_CMD_SETADAPTERPARMS		= 0xb8,
+	IPA_CMD_SET_DIAG_ASS		= 0xb9,
+	IPA_CMD_SETBRIDGEPORT_IQD	= 0xbe,
+	IPA_CMD_CREATE_ADDR		= 0xc3,
+	IPA_CMD_DESTROY_ADDR		= 0xc4,
+	IPA_CMD_REGISTER_LOCAL_ADDR	= 0xd1,
+	IPA_CMD_UNREGISTER_LOCAL_ADDR	= 0xd2,
+	IPA_CMD_ADDRESS_CHANGE_NOTIF	= 0xd3,
+	IPA_CMD_UNKNOWN			= 0x00
+};
+
+enum qeth_ip_ass_cmds {
+	IPA_CMD_ASS_START	= 0x0001,
+	IPA_CMD_ASS_STOP	= 0x0002,
+	IPA_CMD_ASS_CONFIGURE	= 0x0003,
+	IPA_CMD_ASS_ENABLE	= 0x0004,
+};
+
+enum qeth_arp_process_subcmds {
+	IPA_CMD_ASS_ARP_SET_NO_ENTRIES	= 0x0003,
+	IPA_CMD_ASS_ARP_QUERY_CACHE	= 0x0004,
+	IPA_CMD_ASS_ARP_ADD_ENTRY	= 0x0005,
+	IPA_CMD_ASS_ARP_REMOVE_ENTRY	= 0x0006,
+	IPA_CMD_ASS_ARP_FLUSH_CACHE	= 0x0007,
+	IPA_CMD_ASS_ARP_QUERY_INFO	= 0x0104,
+	IPA_CMD_ASS_ARP_QUERY_STATS	= 0x0204,
+};
+
+
+/* Return Codes for IPA Commands
+ * according to OSA card Specs */
+
+enum qeth_ipa_return_codes {
+	IPA_RC_SUCCESS			= 0x0000,
+	IPA_RC_NOTSUPP			= 0x0001,
+	IPA_RC_IP_TABLE_FULL		= 0x0002,
+	IPA_RC_UNKNOWN_ERROR		= 0x0003,
+	IPA_RC_UNSUPPORTED_COMMAND	= 0x0004,
+	IPA_RC_TRACE_ALREADY_ACTIVE	= 0x0005,
+	IPA_RC_INVALID_FORMAT		= 0x0006,
+	IPA_RC_DUP_IPV6_REMOTE		= 0x0008,
+	IPA_RC_SBP_IQD_NOT_CONFIGURED	= 0x000C,
+	IPA_RC_DUP_IPV6_HOME		= 0x0010,
+	IPA_RC_UNREGISTERED_ADDR	= 0x0011,
+	IPA_RC_NO_ID_AVAILABLE		= 0x0012,
+	IPA_RC_ID_NOT_FOUND		= 0x0013,
+	IPA_RC_SBP_IQD_ANO_DEV_PRIMARY	= 0x0014,
+	IPA_RC_SBP_IQD_CURRENT_SECOND	= 0x0018,
+	IPA_RC_SBP_IQD_LIMIT_SECOND	= 0x001C,
+	IPA_RC_INVALID_IP_VERSION	= 0x0020,
+	IPA_RC_SBP_IQD_CURRENT_PRIMARY	= 0x0024,
+	IPA_RC_LAN_FRAME_MISMATCH	= 0x0040,
+	IPA_RC_SBP_IQD_NO_QDIO_QUEUES	= 0x00EB,
+	IPA_RC_L2_UNSUPPORTED_CMD	= 0x2003,
+	IPA_RC_L2_DUP_MAC		= 0x2005,
+	IPA_RC_L2_ADDR_TABLE_FULL	= 0x2006,
+	IPA_RC_L2_DUP_LAYER3_MAC	= 0x200a,
+	IPA_RC_L2_GMAC_NOT_FOUND	= 0x200b,
+	IPA_RC_L2_MAC_NOT_AUTH_BY_HYP	= 0x200c,
+	IPA_RC_L2_MAC_NOT_AUTH_BY_ADP	= 0x200d,
+	IPA_RC_L2_MAC_NOT_FOUND		= 0x2010,
+	IPA_RC_L2_INVALID_VLAN_ID	= 0x2015,
+	IPA_RC_L2_DUP_VLAN_ID		= 0x2016,
+	IPA_RC_L2_VLAN_ID_NOT_FOUND	= 0x2017,
+	IPA_RC_L2_VLAN_ID_NOT_ALLOWED	= 0x2050,
+	IPA_RC_VNICC_VNICBP		= 0x20B0,
+	IPA_RC_SBP_OSA_NOT_CONFIGURED	= 0x2B0C,
+	IPA_RC_SBP_OSA_OS_MISMATCH	= 0x2B10,
+	IPA_RC_SBP_OSA_ANO_DEV_PRIMARY	= 0x2B14,
+	IPA_RC_SBP_OSA_CURRENT_SECOND	= 0x2B18,
+	IPA_RC_SBP_OSA_LIMIT_SECOND	= 0x2B1C,
+	IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN = 0x2B20,
+	IPA_RC_SBP_OSA_CURRENT_PRIMARY	= 0x2B24,
+	IPA_RC_SBP_OSA_NO_QDIO_QUEUES	= 0x2BEB,
+	IPA_RC_DATA_MISMATCH		= 0xe001,
+	IPA_RC_INVALID_MTU_SIZE		= 0xe002,
+	IPA_RC_INVALID_LANTYPE		= 0xe003,
+	IPA_RC_INVALID_LANNUM		= 0xe004,
+	IPA_RC_DUPLICATE_IP_ADDRESS	= 0xe005,
+	IPA_RC_IP_ADDR_TABLE_FULL	= 0xe006,
+	IPA_RC_LAN_PORT_STATE_ERROR	= 0xe007,
+	IPA_RC_SETIP_NO_STARTLAN	= 0xe008,
+	IPA_RC_SETIP_ALREADY_RECEIVED	= 0xe009,
+	IPA_RC_IP_ADDR_ALREADY_USED	= 0xe00a,
+	IPA_RC_MC_ADDR_NOT_FOUND	= 0xe00b,
+	IPA_RC_SETIP_INVALID_VERSION	= 0xe00d,
+	IPA_RC_UNSUPPORTED_SUBCMD	= 0xe00e,
+	IPA_RC_ARP_ASSIST_NO_ENABLE	= 0xe00f,
+	IPA_RC_PRIMARY_ALREADY_DEFINED	= 0xe010,
+	IPA_RC_SECOND_ALREADY_DEFINED	= 0xe011,
+	IPA_RC_INVALID_SETRTG_INDICATOR	= 0xe012,
+	IPA_RC_MC_ADDR_ALREADY_DEFINED	= 0xe013,
+	IPA_RC_LAN_OFFLINE		= 0xe080,
+	IPA_RC_VEPA_TO_VEB_TRANSITION	= 0xe090,
+	IPA_RC_INVALID_IP_VERSION2	= 0xf001,
+	IPA_RC_ENOMEM			= 0xfffe,
+	IPA_RC_FFFF			= 0xffff
+};
+/* for VNIC Characteristics */
+#define IPA_RC_VNICC_OOSEQ 0x0005
+
+/* for SET_DIAGNOSTIC_ASSIST */
+#define IPA_RC_INVALID_SUBCMD		IPA_RC_IP_TABLE_FULL
+#define IPA_RC_HARDWARE_AUTH_ERROR	IPA_RC_UNKNOWN_ERROR
+
+/* for SETBRIDGEPORT (double occupancies) */
+#define IPA_RC_SBP_IQD_OS_MISMATCH	 IPA_RC_DUP_IPV6_HOME
+#define IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN IPA_RC_INVALID_IP_VERSION
+
+/* IPA function flags; each flag marks availability of respective function */
+enum qeth_ipa_funcs {
+	IPA_ARP_PROCESSING      = 0x00000001L,
+	IPA_INBOUND_CHECKSUM    = 0x00000002L,
+	IPA_OUTBOUND_CHECKSUM   = 0x00000004L,
+	/* RESERVED		= 0x00000008L,*/
+	IPA_FILTERING           = 0x00000010L,
+	IPA_IPV6                = 0x00000020L,
+	IPA_MULTICASTING        = 0x00000040L,
+	IPA_IP_REASSEMBLY       = 0x00000080L,
+	IPA_QUERY_ARP_COUNTERS  = 0x00000100L,
+	IPA_QUERY_ARP_ADDR_INFO = 0x00000200L,
+	IPA_SETADAPTERPARMS     = 0x00000400L,
+	IPA_VLAN_PRIO           = 0x00000800L,
+	IPA_PASSTHRU            = 0x00001000L,
+	IPA_FLUSH_ARP_SUPPORT   = 0x00002000L,
+	IPA_FULL_VLAN           = 0x00004000L,
+	IPA_INBOUND_PASSTHRU    = 0x00008000L,
+	IPA_SOURCE_MAC          = 0x00010000L,
+	IPA_OSA_MC_ROUTER       = 0x00020000L,
+	IPA_QUERY_ARP_ASSIST	= 0x00040000L,
+	IPA_INBOUND_TSO         = 0x00080000L,
+	IPA_OUTBOUND_TSO        = 0x00100000L,
+	IPA_INBOUND_CHECKSUM_V6 = 0x00400000L,
+	IPA_OUTBOUND_CHECKSUM_V6 = 0x00800000L,
+};
+
+/* SETIP/DELIP IPA Command: ***************************************************/
+enum qeth_ipa_setdelip_flags {
+	QETH_IPA_SETDELIP_DEFAULT          = 0x00L, /* default */
+	QETH_IPA_SETIP_VIPA_FLAG           = 0x01L, /* no grat. ARP */
+	QETH_IPA_SETIP_TAKEOVER_FLAG       = 0x02L, /* nofail on grat. ARP */
+	QETH_IPA_DELIP_ADDR_2_B_TAKEN_OVER = 0x20L,
+	QETH_IPA_DELIP_VIPA_FLAG           = 0x40L,
+	QETH_IPA_DELIP_ADDR_NEEDS_SETIP    = 0x80L,
+};
+
+/* SETADAPTER IPA Command: ****************************************************/
+enum qeth_ipa_setadp_cmd {
+	IPA_SETADP_QUERY_COMMANDS_SUPPORTED	= 0x00000001L,
+	IPA_SETADP_ALTER_MAC_ADDRESS		= 0x00000002L,
+	IPA_SETADP_ADD_DELETE_GROUP_ADDRESS	= 0x00000004L,
+	IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR	= 0x00000008L,
+	IPA_SETADP_SET_ADDRESSING_MODE		= 0x00000010L,
+	IPA_SETADP_SET_CONFIG_PARMS		= 0x00000020L,
+	IPA_SETADP_SET_CONFIG_PARMS_EXTENDED	= 0x00000040L,
+	IPA_SETADP_SET_BROADCAST_MODE		= 0x00000080L,
+	IPA_SETADP_SEND_OSA_MESSAGE		= 0x00000100L,
+	IPA_SETADP_SET_SNMP_CONTROL		= 0x00000200L,
+	IPA_SETADP_QUERY_CARD_INFO		= 0x00000400L,
+	IPA_SETADP_SET_PROMISC_MODE		= 0x00000800L,
+	IPA_SETADP_SET_DIAG_ASSIST		= 0x00002000L,
+	IPA_SETADP_SET_ACCESS_CONTROL		= 0x00010000L,
+	IPA_SETADP_QUERY_OAT			= 0x00080000L,
+	IPA_SETADP_QUERY_SWITCH_ATTRIBUTES	= 0x00100000L,
+};
+enum qeth_ipa_mac_ops {
+	CHANGE_ADDR_READ_MAC		= 0,
+	CHANGE_ADDR_REPLACE_MAC		= 1,
+	CHANGE_ADDR_ADD_MAC		= 2,
+	CHANGE_ADDR_DEL_MAC		= 4,
+	CHANGE_ADDR_RESET_MAC		= 8,
+};
+enum qeth_ipa_addr_ops {
+	CHANGE_ADDR_READ_ADDR		= 0,
+	CHANGE_ADDR_ADD_ADDR		= 1,
+	CHANGE_ADDR_DEL_ADDR		= 2,
+	CHANGE_ADDR_FLUSH_ADDR_TABLE	= 4,
+};
+enum qeth_ipa_promisc_modes {
+	SET_PROMISC_MODE_OFF		= 0,
+	SET_PROMISC_MODE_ON		= 1,
+};
+enum qeth_ipa_isolation_modes {
+	ISOLATION_MODE_NONE		= 0x00000000L,
+	ISOLATION_MODE_FWD		= 0x00000001L,
+	ISOLATION_MODE_DROP		= 0x00000002L,
+};
+enum qeth_ipa_set_access_mode_rc {
+	SET_ACCESS_CTRL_RC_SUCCESS		= 0x0000,
+	SET_ACCESS_CTRL_RC_NOT_SUPPORTED	= 0x0004,
+	SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED	= 0x0008,
+	SET_ACCESS_CTRL_RC_ALREADY_ISOLATED	= 0x0010,
+	SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER	= 0x0014,
+	SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF	= 0x0018,
+	SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED	= 0x0022,
+	SET_ACCESS_CTRL_RC_REFLREL_FAILED	= 0x0024,
+	SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED	= 0x0028,
+};
+enum qeth_card_info_card_type {
+	CARD_INFO_TYPE_1G_COPPER_A	= 0x61,
+	CARD_INFO_TYPE_1G_FIBRE_A	= 0x71,
+	CARD_INFO_TYPE_10G_FIBRE_A	= 0x91,
+	CARD_INFO_TYPE_1G_COPPER_B	= 0xb1,
+	CARD_INFO_TYPE_1G_FIBRE_B	= 0xa1,
+	CARD_INFO_TYPE_10G_FIBRE_B	= 0xc1,
+};
+enum qeth_card_info_port_mode {
+	CARD_INFO_PORTM_HALFDUPLEX	= 0x0002,
+	CARD_INFO_PORTM_FULLDUPLEX	= 0x0003,
+};
+enum qeth_card_info_port_speed {
+	CARD_INFO_PORTS_10M		= 0x00000005,
+	CARD_INFO_PORTS_100M		= 0x00000006,
+	CARD_INFO_PORTS_1G		= 0x00000007,
+	CARD_INFO_PORTS_10G		= 0x00000008,
+};
+
+/* (SET)DELIP(M) IPA stuff ***************************************************/
+struct qeth_ipacmd_setdelip4 {
+	__u8   ip_addr[4];
+	__u8   mask[4];
+	__u32  flags;
+} __attribute__ ((packed));
+
+struct qeth_ipacmd_setdelip6 {
+	__u8   ip_addr[16];
+	__u8   mask[16];
+	__u32  flags;
+} __attribute__ ((packed));
+
+struct qeth_ipacmd_setdelipm {
+	__u8 mac[6];
+	__u8 padding[2];
+	__u8 ip6[12];
+	__u8 ip4[4];
+} __attribute__ ((packed));
+
+struct qeth_ipacmd_layer2setdelmac {
+	__u32 mac_length;
+	__u8 mac[6];
+} __attribute__ ((packed));
+
+struct qeth_ipacmd_layer2setdelvlan {
+	__u16 vlan_id;
+} __attribute__ ((packed));
+
+
+struct qeth_ipacmd_setassparms_hdr {
+	__u32 assist_no;
+	__u16 length;
+	__u16 command_code;
+	__u16 return_code;
+	__u8 number_of_replies;
+	__u8 seq_no;
+} __attribute__((packed));
+
+struct qeth_arp_query_data {
+	__u16 request_bits;
+	__u16 reply_bits;
+	__u32 no_entries;
+	char data; /* only for replies */
+} __attribute__((packed));
+
+/* used as parameter for arp_query reply */
+struct qeth_arp_query_info {
+	__u32 udata_len;
+	__u16 mask_bits;
+	__u32 udata_offset;
+	__u32 no_entries;
+	char *udata;
+};
+
+/* IPA set assist segmentation bit definitions for receive and
+ * transmit checksum offloading.
+ */
+enum qeth_ipa_checksum_bits {
+	QETH_IPA_CHECKSUM_IP_HDR	= 0x0002,
+	QETH_IPA_CHECKSUM_UDP		= 0x0008,
+	QETH_IPA_CHECKSUM_TCP		= 0x0010,
+	QETH_IPA_CHECKSUM_LP2LP		= 0x0020
+};
+
+/* IPA Assist checksum offload reply layout. */
+struct qeth_checksum_cmd {
+	__u32 supported;
+	__u32 enabled;
+} __packed;
+
+/* SETASSPARMS IPA Command: */
+struct qeth_ipacmd_setassparms {
+	struct qeth_ipacmd_setassparms_hdr hdr;
+	union {
+		__u32 flags_32bit;
+		struct qeth_checksum_cmd chksum;
+		struct qeth_arp_cache_entry add_arp_entry;
+		struct qeth_arp_query_data query_arp;
+		__u8 ip[16];
+	} data;
+} __attribute__ ((packed));
+
+
+/* SETRTG IPA Command:    ****************************************************/
+struct qeth_set_routing {
+	__u8 type;
+};
+
+/* SETADAPTERPARMS IPA Command:    *******************************************/
+struct qeth_query_cmds_supp {
+	__u32 no_lantypes_supp;
+	__u8 lan_type;
+	__u8 reserved1[3];
+	__u32 supported_cmds;
+	__u8 reserved2[8];
+} __attribute__ ((packed));
+
+struct qeth_change_addr {
+	u32 cmd;
+	u32 addr_size;
+	u32 no_macs;
+	u8 addr[ETH_ALEN];
+};
+
+struct qeth_snmp_cmd {
+	__u8  token[16];
+	__u32 request;
+	__u32 interface;
+	__u32 returncode;
+	__u32 firmwarelevel;
+	__u32 seqno;
+	__u8  data;
+} __attribute__ ((packed));
+
+struct qeth_snmp_ureq_hdr {
+	__u32   data_len;
+	__u32   req_len;
+	__u32   reserved1;
+	__u32   reserved2;
+} __attribute__ ((packed));
+
+struct qeth_snmp_ureq {
+	struct qeth_snmp_ureq_hdr hdr;
+	struct qeth_snmp_cmd cmd;
+} __attribute__((packed));
+
+/* SET_ACCESS_CONTROL: same format for request and reply */
+struct qeth_set_access_ctrl {
+	__u32 subcmd_code;
+	__u8 reserved[8];
+} __attribute__((packed));
+
+struct qeth_query_oat {
+	__u32 subcmd_code;
+	__u8 reserved[12];
+} __packed;
+
+struct qeth_qoat_priv {
+	__u32 buffer_len;
+	__u32 response_len;
+	char *buffer;
+};
+
+struct qeth_query_card_info {
+	__u8	card_type;
+	__u8	reserved1;
+	__u16	port_mode;
+	__u32	port_speed;
+	__u32	reserved2;
+};
+
+#define QETH_SWITCH_FORW_802_1		0x00000001
+#define QETH_SWITCH_FORW_REFL_RELAY	0x00000002
+#define QETH_SWITCH_CAP_RTE		0x00000004
+#define QETH_SWITCH_CAP_ECP		0x00000008
+#define QETH_SWITCH_CAP_VDP		0x00000010
+
+struct qeth_query_switch_attributes {
+	__u8  version;
+	__u8  reserved1;
+	__u16 reserved2;
+	__u32 capabilities;
+	__u32 settings;
+	__u8  reserved3[8];
+};
+
+struct qeth_ipacmd_setadpparms_hdr {
+	__u32 supp_hw_cmds;
+	__u32 reserved1;
+	__u16 cmdlength;
+	__u16 reserved2;
+	__u32 command_code;
+	__u16 return_code;
+	__u8  used_total;
+	__u8  seq_no;
+	__u32 reserved3;
+} __attribute__ ((packed));
+
+struct qeth_ipacmd_setadpparms {
+	struct qeth_ipacmd_setadpparms_hdr hdr;
+	union {
+		struct qeth_query_cmds_supp query_cmds_supp;
+		struct qeth_change_addr change_addr;
+		struct qeth_snmp_cmd snmp;
+		struct qeth_set_access_ctrl set_access_ctrl;
+		struct qeth_query_oat query_oat;
+		struct qeth_query_card_info card_info;
+		struct qeth_query_switch_attributes query_switch_attributes;
+		__u32 mode;
+	} data;
+} __attribute__ ((packed));
+
+/* CREATE_ADDR IPA Command:    ***********************************************/
+struct qeth_create_destroy_address {
+	__u8 unique_id[8];
+} __attribute__ ((packed));
+
+/* SET DIAGNOSTIC ASSIST IPA Command:	 *************************************/
+
+enum qeth_diags_cmds {
+	QETH_DIAGS_CMD_QUERY	= 0x0001,
+	QETH_DIAGS_CMD_TRAP	= 0x0002,
+	QETH_DIAGS_CMD_TRACE	= 0x0004,
+	QETH_DIAGS_CMD_NOLOG	= 0x0008,
+	QETH_DIAGS_CMD_DUMP	= 0x0010,
+};
+
+enum qeth_diags_trace_types {
+	QETH_DIAGS_TYPE_HIPERSOCKET	= 0x02,
+};
+
+enum qeth_diags_trace_cmds {
+	QETH_DIAGS_CMD_TRACE_ENABLE	= 0x0001,
+	QETH_DIAGS_CMD_TRACE_DISABLE	= 0x0002,
+	QETH_DIAGS_CMD_TRACE_MODIFY	= 0x0004,
+	QETH_DIAGS_CMD_TRACE_REPLACE	= 0x0008,
+	QETH_DIAGS_CMD_TRACE_QUERY	= 0x0010,
+};
+
+enum qeth_diags_trap_action {
+	QETH_DIAGS_TRAP_ARM	= 0x01,
+	QETH_DIAGS_TRAP_DISARM	= 0x02,
+	QETH_DIAGS_TRAP_CAPTURE = 0x04,
+};
+
+struct qeth_ipacmd_diagass {
+	__u32  host_tod2;
+	__u32:32;
+	__u16  subcmd_len;
+	__u16:16;
+	__u32  subcmd;
+	__u8   type;
+	__u8   action;
+	__u16  options;
+	__u32  ext;
+	__u8   cdata[64];
+} __attribute__ ((packed));
+
+/* VNIC Characteristics IPA Command: *****************************************/
+/* IPA commands/sub commands for VNICC */
+#define IPA_VNICC_QUERY_CHARS		0x00000000L
+#define IPA_VNICC_QUERY_CMDS		0x00000001L
+#define IPA_VNICC_ENABLE		0x00000002L
+#define IPA_VNICC_DISABLE		0x00000004L
+#define IPA_VNICC_SET_TIMEOUT		0x00000008L
+#define IPA_VNICC_GET_TIMEOUT		0x00000010L
+
+/* VNICC flags */
+#define QETH_VNICC_FLOODING		0x80000000
+#define QETH_VNICC_MCAST_FLOODING	0x40000000
+#define QETH_VNICC_LEARNING		0x20000000
+#define QETH_VNICC_TAKEOVER_SETVMAC	0x10000000
+#define QETH_VNICC_TAKEOVER_LEARNING	0x08000000
+#define QETH_VNICC_BRIDGE_INVISIBLE	0x04000000
+#define QETH_VNICC_RX_BCAST		0x02000000
+
+/* VNICC default values */
+#define QETH_VNICC_ALL			0xff000000
+#define QETH_VNICC_DEFAULT		QETH_VNICC_RX_BCAST
+/* default VNICC timeout in seconds */
+#define QETH_VNICC_DEFAULT_TIMEOUT	600
+
+/* VNICC header */
+struct qeth_ipacmd_vnicc_hdr {
+	u32 sup;
+	u32 cur;
+};
+
+/* VNICC sub command header */
+struct qeth_vnicc_sub_hdr {
+	u16 data_length;
+	u16 reserved;
+	u32 sub_command;
+};
+
+/* query supported commands for VNIC characteristic */
+struct qeth_vnicc_query_cmds {
+	u32 vnic_char;
+	u32 sup_cmds;
+};
+
+/* enable/disable VNIC characteristic */
+struct qeth_vnicc_set_char {
+	u32 vnic_char;
+};
+
+/* get/set timeout for VNIC characteristic */
+struct qeth_vnicc_getset_timeout {
+	u32 vnic_char;
+	u32 timeout;
+};
+
+/* complete VNICC IPA command message */
+struct qeth_ipacmd_vnicc {
+	struct qeth_ipacmd_vnicc_hdr hdr;
+	struct qeth_vnicc_sub_hdr sub_hdr;
+	union {
+		struct qeth_vnicc_query_cmds query_cmds;
+		struct qeth_vnicc_set_char set_char;
+		struct qeth_vnicc_getset_timeout getset_timeout;
+	};
+};
+
+/* SETBRIDGEPORT IPA Command:	 *********************************************/
+enum qeth_ipa_sbp_cmd {
+	IPA_SBP_QUERY_COMMANDS_SUPPORTED	= 0x00000000L,
+	IPA_SBP_RESET_BRIDGE_PORT_ROLE		= 0x00000001L,
+	IPA_SBP_SET_PRIMARY_BRIDGE_PORT		= 0x00000002L,
+	IPA_SBP_SET_SECONDARY_BRIDGE_PORT	= 0x00000004L,
+	IPA_SBP_QUERY_BRIDGE_PORTS		= 0x00000008L,
+	IPA_SBP_BRIDGE_PORT_STATE_CHANGE	= 0x00000010L,
+};
+
+struct net_if_token {
+	__u16 devnum;
+	__u8 cssid;
+	__u8 iid;
+	__u8 ssid;
+	__u8 chpid;
+	__u16 chid;
+} __packed;
+
+struct mac_addr_lnid {
+	__u8 mac[6];
+	__u16 lnid;
+} __packed;
+
+struct qeth_ipacmd_sbp_hdr {
+	__u32 supported_sbp_cmds;
+	__u32 enabled_sbp_cmds;
+	__u16 cmdlength;
+	__u16 reserved1;
+	__u32 command_code;
+	__u16 return_code;
+	__u8  used_total;
+	__u8  seq_no;
+	__u32 reserved2;
+} __packed;
+
+struct qeth_sbp_query_cmds_supp {
+	__u32 supported_cmds;
+	__u32 reserved;
+} __packed;
+
+struct qeth_sbp_reset_role {
+} __packed;
+
+struct qeth_sbp_set_primary {
+	struct net_if_token token;
+} __packed;
+
+struct qeth_sbp_set_secondary {
+} __packed;
+
+struct qeth_sbp_port_entry {
+		__u8 role;
+		__u8 state;
+		__u8 reserved1;
+		__u8 reserved2;
+		struct net_if_token token;
+} __packed;
+
+struct qeth_sbp_query_ports {
+	__u8 primary_bp_supported;
+	__u8 secondary_bp_supported;
+	__u8 num_entries;
+	__u8 entry_length;
+	struct qeth_sbp_port_entry entry[];
+} __packed;
+
+struct qeth_sbp_state_change {
+	__u8 primary_bp_supported;
+	__u8 secondary_bp_supported;
+	__u8 num_entries;
+	__u8 entry_length;
+	struct qeth_sbp_port_entry entry[];
+} __packed;
+
+struct qeth_ipacmd_setbridgeport {
+	struct qeth_ipacmd_sbp_hdr hdr;
+	union {
+		struct qeth_sbp_query_cmds_supp query_cmds_supp;
+		struct qeth_sbp_reset_role reset_role;
+		struct qeth_sbp_set_primary set_primary;
+		struct qeth_sbp_set_secondary set_secondary;
+		struct qeth_sbp_query_ports query_ports;
+		struct qeth_sbp_state_change state_change;
+	} data;
+} __packed;
+
+/* ADDRESS_CHANGE_NOTIFICATION adapter-initiated "command" *******************/
+/* Bitmask for entry->change_code. Both bits may be raised.		     */
+enum qeth_ipa_addr_change_code {
+	IPA_ADDR_CHANGE_CODE_VLANID		= 0x01,
+	IPA_ADDR_CHANGE_CODE_MACADDR		= 0x02,
+	IPA_ADDR_CHANGE_CODE_REMOVAL		= 0x80,	/* else addition */
+};
+
+struct qeth_ipacmd_addr_change_entry {
+	struct net_if_token token;
+	struct mac_addr_lnid addr_lnid;
+	__u8 change_code;
+	__u8 reserved1;
+	__u16 reserved2;
+} __packed;
+
+struct qeth_ipacmd_addr_change {
+	__u8 lost_event_mask;
+	__u8 reserved;
+	__u16 num_entries;
+	struct qeth_ipacmd_addr_change_entry entry[];
+} __packed;
+
+/* Header for each IPA command */
+struct qeth_ipacmd_hdr {
+	__u8   command;
+	__u8   initiator;
+	__u16  seqno;
+	__u16  return_code;
+	__u8   adapter_type;
+	__u8   rel_adapter_no;
+	__u8   prim_version_no;
+	__u8   param_count;
+	__u16  prot_version;
+	__u32  ipa_supported;
+	__u32  ipa_enabled;
+} __attribute__ ((packed));
+
+/* The IPA command itself */
+struct qeth_ipa_cmd {
+	struct qeth_ipacmd_hdr hdr;
+	union {
+		struct qeth_ipacmd_setdelip4		setdelip4;
+		struct qeth_ipacmd_setdelip6		setdelip6;
+		struct qeth_ipacmd_setdelipm		setdelipm;
+		struct qeth_ipacmd_setassparms		setassparms;
+		struct qeth_ipacmd_layer2setdelmac	setdelmac;
+		struct qeth_ipacmd_layer2setdelvlan	setdelvlan;
+		struct qeth_create_destroy_address	create_destroy_addr;
+		struct qeth_ipacmd_setadpparms		setadapterparms;
+		struct qeth_set_routing			setrtg;
+		struct qeth_ipacmd_diagass		diagass;
+		struct qeth_ipacmd_setbridgeport	sbp;
+		struct qeth_ipacmd_addr_change		addrchange;
+		struct qeth_ipacmd_vnicc		vnicc;
+	} data;
+} __attribute__ ((packed));
+
+/*
+ * special command for ARP processing.
+ * this is not included in setassparms command before, because we get
+ * problem with the size of struct qeth_ipacmd_setassparms otherwise
+ */
+enum qeth_ipa_arp_return_codes {
+	QETH_IPA_ARP_RC_SUCCESS      = 0x0000,
+	QETH_IPA_ARP_RC_FAILED       = 0x0001,
+	QETH_IPA_ARP_RC_NOTSUPP      = 0x0002,
+	QETH_IPA_ARP_RC_OUT_OF_RANGE = 0x0003,
+	QETH_IPA_ARP_RC_Q_NOTSUPP    = 0x0004,
+	QETH_IPA_ARP_RC_Q_NO_DATA    = 0x0008,
+};
+
+extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
+extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
+
+#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
+			       sizeof(struct qeth_ipacmd_setassparms_hdr))
+#define QETH_IPA_ARP_DATA_POS(buffer) (buffer + IPA_PDU_HEADER_SIZE + \
+				       QETH_SETASS_BASE_LEN)
+#define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
+			      sizeof(struct qeth_ipacmd_setadpparms_hdr))
+#define QETH_SNMP_SETADP_CMDLENGTH 16
+
+#define QETH_ARP_DATA_SIZE 3968
+#define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8)
+/* Helper functions */
+#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \
+			   (cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY))
+
+/*****************************************************************************/
+/* END OF   IP Assist related definitions                                    */
+/*****************************************************************************/
+
+extern unsigned char CM_ENABLE[];
+#define CM_ENABLE_SIZE 0x63
+#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer + 0x2c)
+#define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53)
+#define QETH_CM_ENABLE_USER_DATA(buffer) (buffer + 0x5b)
+
+#define QETH_CM_ENABLE_RESP_FILTER_TOKEN(buffer) \
+		(PDU_ENCAPSULATION(buffer) + 0x13)
+
+
+extern unsigned char CM_SETUP[];
+#define CM_SETUP_SIZE 0x64
+#define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
+#define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
+#define QETH_CM_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a)
+
+#define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \
+		(PDU_ENCAPSULATION(buffer) + 0x1a)
+
+extern unsigned char ULP_ENABLE[];
+#define ULP_ENABLE_SIZE 0x6b
+#define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer + 0x61)
+#define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer + 0x2c)
+#define QETH_ULP_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53)
+#define QETH_ULP_ENABLE_PORTNAME_AND_LL(buffer) (buffer + 0x62)
+#define QETH_ULP_ENABLE_RESP_FILTER_TOKEN(buffer) \
+		(PDU_ENCAPSULATION(buffer) + 0x13)
+#define QETH_ULP_ENABLE_RESP_MAX_MTU(buffer) \
+		(PDU_ENCAPSULATION(buffer) + 0x1f)
+#define QETH_ULP_ENABLE_RESP_DIFINFO_LEN(buffer) \
+		(PDU_ENCAPSULATION(buffer) + 0x17)
+#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \
+		(PDU_ENCAPSULATION(buffer) + 0x2b)
+/* Layer 2 definitions */
+#define QETH_PROT_LAYER2 0x08
+#define QETH_PROT_TCPIP  0x03
+#define QETH_PROT_OSN2   0x0a
+#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer + 0x50)
+#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer + 0x19)
+
+extern unsigned char ULP_SETUP[];
+#define ULP_SETUP_SIZE 0x6c
+#define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
+#define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
+#define QETH_ULP_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a)
+#define QETH_ULP_SETUP_CUA(buffer) (buffer + 0x68)
+#define QETH_ULP_SETUP_REAL_DEVADDR(buffer) (buffer + 0x6a)
+
+#define QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(buffer) \
+		(PDU_ENCAPSULATION(buffer) + 0x1a)
+
+
+extern unsigned char DM_ACT[];
+#define DM_ACT_SIZE 0x55
+#define QETH_DM_ACT_DEST_ADDR(buffer) (buffer + 0x2c)
+#define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer + 0x51)
+
+
+
+#define QETH_TRANSPORT_HEADER_SEQ_NO(buffer) (buffer + 4)
+#define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer + 0x1c)
+#define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer + 0x20)
+
+extern unsigned char IDX_ACTIVATE_READ[];
+extern unsigned char IDX_ACTIVATE_WRITE[];
+
+#define IDX_ACTIVATE_SIZE	0x22
+#define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b)
+#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c)
+#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b] & 0x80)
+#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer + 0x10)
+#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer + 0x16)
+#define QETH_IDX_ACT_QDIO_DEV_CUA(buffer) (buffer + 0x1e)
+#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer + 0x20)
+#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2)
+#define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12)
+#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09]
+#define QETH_IDX_ACT_ERR_EXCL		0x19
+#define QETH_IDX_ACT_ERR_AUTH		0x1E
+#define QETH_IDX_ACT_ERR_AUTH_USER	0x20
+
+#define PDU_ENCAPSULATION(buffer) \
+	(buffer + *(buffer + (*(buffer + 0x0b)) + \
+	 *(buffer + *(buffer + 0x0b) + 0x11) + 0x07))
+
+#define IS_IPA(buffer) \
+	((buffer) && \
+	 (*(buffer + ((*(buffer + 0x0b)) + 4)) == 0xc1))
+
+#endif
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
new file mode 100644
index 0000000..25d0be2
--- /dev/null
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -0,0 +1,773 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+ *		 Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "qeth"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/list.h>
+#include <linux/rwsem.h>
+#include <asm/ebcdic.h>
+
+#include "qeth_core.h"
+
+static ssize_t qeth_dev_state_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	if (!card)
+		return -EINVAL;
+
+	switch (card->state) {
+	case CARD_STATE_DOWN:
+		return sprintf(buf, "DOWN\n");
+	case CARD_STATE_HARDSETUP:
+		return sprintf(buf, "HARDSETUP\n");
+	case CARD_STATE_SOFTSETUP:
+		return sprintf(buf, "SOFTSETUP\n");
+	case CARD_STATE_UP:
+		if (card->lan_online)
+		return sprintf(buf, "UP (LAN ONLINE)\n");
+		else
+			return sprintf(buf, "UP (LAN OFFLINE)\n");
+	case CARD_STATE_RECOVER:
+		return sprintf(buf, "RECOVER\n");
+	default:
+		return sprintf(buf, "UNKNOWN\n");
+	}
+}
+
+static DEVICE_ATTR(state, 0444, qeth_dev_state_show, NULL);
+
+static ssize_t qeth_dev_chpid_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%02X\n", card->info.chpid);
+}
+
+static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL);
+
+static ssize_t qeth_dev_if_name_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	if (!card)
+		return -EINVAL;
+	return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
+}
+
+static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
+
+static ssize_t qeth_dev_card_type_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
+}
+
+static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
+
+static const char *qeth_get_bufsize_str(struct qeth_card *card)
+{
+	if (card->qdio.in_buf_size == 16384)
+		return "16k";
+	else if (card->qdio.in_buf_size == 24576)
+		return "24k";
+	else if (card->qdio.in_buf_size == 32768)
+		return "32k";
+	else if (card->qdio.in_buf_size == 40960)
+		return "40k";
+	else
+		return "64k";
+}
+
+static ssize_t qeth_dev_inbuf_size_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%s\n", qeth_get_bufsize_str(card));
+}
+
+static DEVICE_ATTR(inbuf_size, 0444, qeth_dev_inbuf_size_show, NULL);
+
+static ssize_t qeth_dev_portno_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->dev->dev_port);
+}
+
+static ssize_t qeth_dev_portno_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	unsigned int portno, limit;
+	int rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	portno = simple_strtoul(buf, &tmp, 16);
+	if (portno > QETH_MAX_PORTNO) {
+		rc = -EINVAL;
+		goto out;
+	}
+	limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
+	if (portno > limit) {
+		rc = -EINVAL;
+		goto out;
+	}
+	card->dev->dev_port = portno;
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
+
+static ssize_t qeth_dev_portname_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "no portname required\n");
+}
+
+static ssize_t qeth_dev_portname_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	dev_warn_once(&card->gdev->dev,
+		      "portname is deprecated and is ignored\n");
+	return count;
+}
+
+static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
+		qeth_dev_portname_store);
+
+static ssize_t qeth_dev_prioqing_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	switch (card->qdio.do_prio_queueing) {
+	case QETH_PRIO_Q_ING_PREC:
+		return sprintf(buf, "%s\n", "by precedence");
+	case QETH_PRIO_Q_ING_TOS:
+		return sprintf(buf, "%s\n", "by type of service");
+	case QETH_PRIO_Q_ING_SKB:
+		return sprintf(buf, "%s\n", "by skb-priority");
+	case QETH_PRIO_Q_ING_VLAN:
+		return sprintf(buf, "%s\n", "by VLAN headers");
+	default:
+		return sprintf(buf, "always queue %i\n",
+			       card->qdio.default_out_queue);
+	}
+}
+
+static ssize_t qeth_dev_prioqing_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	int rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	/* check if 1920 devices are supported ,
+	 * if though we have to permit priority queueing
+	 */
+	if (card->qdio.no_out_queues == 1) {
+		card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
+		rc = -EPERM;
+		goto out;
+	}
+
+	if (sysfs_streq(buf, "prio_queueing_prec")) {
+		card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
+		card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+	} else if (sysfs_streq(buf, "prio_queueing_skb")) {
+		card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_SKB;
+		card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+	} else if (sysfs_streq(buf, "prio_queueing_tos")) {
+		card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
+		card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+	} else if (sysfs_streq(buf, "prio_queueing_vlan")) {
+		if (!card->options.layer2) {
+			rc = -ENOTSUPP;
+			goto out;
+		}
+		card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
+		card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+	} else if (sysfs_streq(buf, "no_prio_queueing:0")) {
+		card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+		card->qdio.default_out_queue = 0;
+	} else if (sysfs_streq(buf, "no_prio_queueing:1")) {
+		card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+		card->qdio.default_out_queue = 1;
+	} else if (sysfs_streq(buf, "no_prio_queueing:2")) {
+		card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+		card->qdio.default_out_queue = 2;
+	} else if (sysfs_streq(buf, "no_prio_queueing:3")) {
+		if (card->info.type == QETH_CARD_TYPE_IQD) {
+			rc = -EPERM;
+			goto out;
+		}
+		card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+		card->qdio.default_out_queue = 3;
+	} else if (sysfs_streq(buf, "no_prio_queueing")) {
+		card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+		card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+	} else
+		rc = -EINVAL;
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show,
+		qeth_dev_prioqing_store);
+
+static ssize_t qeth_dev_bufcnt_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
+}
+
+static ssize_t qeth_dev_bufcnt_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int cnt, old_cnt;
+	int rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	old_cnt = card->qdio.in_buf_pool.buf_count;
+	cnt = simple_strtoul(buf, &tmp, 10);
+	cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
+		((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
+	if (old_cnt != cnt) {
+		rc = qeth_realloc_buffer_pool(card, cnt);
+	}
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
+		qeth_dev_bufcnt_store);
+
+static ssize_t qeth_dev_recover_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int i;
+
+	if (!card)
+		return -EINVAL;
+
+	if (card->state != CARD_STATE_UP)
+		return -EPERM;
+
+	i = simple_strtoul(buf, &tmp, 16);
+	if (i == 1)
+		qeth_schedule_recovery(card);
+
+	return count;
+}
+
+static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
+
+static ssize_t qeth_dev_performance_stats_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0);
+}
+
+static ssize_t qeth_dev_performance_stats_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int i, rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	i = simple_strtoul(buf, &tmp, 16);
+	if ((i == 0) || (i == 1)) {
+		if (i == card->options.performance_stats)
+			goto out;
+		card->options.performance_stats = i;
+		if (i == 0)
+			memset(&card->perf_stats, 0,
+				sizeof(struct qeth_perf_stats));
+		card->perf_stats.initial_rx_packets = card->stats.rx_packets;
+		card->perf_stats.initial_tx_packets = card->stats.tx_packets;
+	} else
+		rc = -EINVAL;
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
+		   qeth_dev_performance_stats_store);
+
+static ssize_t qeth_dev_layer2_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->options.layer2);
+}
+
+static ssize_t qeth_dev_layer2_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	struct net_device *ndev;
+	char *tmp;
+	int i, rc = 0;
+	enum qeth_discipline_id newdis;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->discipline_mutex);
+	if (card->state != CARD_STATE_DOWN) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	i = simple_strtoul(buf, &tmp, 16);
+	switch (i) {
+	case 0:
+		newdis = QETH_DISCIPLINE_LAYER3;
+		break;
+	case 1:
+		newdis = QETH_DISCIPLINE_LAYER2;
+		break;
+	default:
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (card->options.layer2 == newdis)
+		goto out;
+	if (card->info.layer_enforced) {
+		/* fixed layer, can't switch */
+		rc = -EOPNOTSUPP;
+		goto out;
+	}
+
+	card->info.mac_bits = 0;
+	if (card->discipline) {
+		/* start with a new, pristine netdevice: */
+		ndev = qeth_clone_netdev(card->dev);
+		if (!ndev) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		card->discipline->remove(card->gdev);
+		qeth_core_free_discipline(card);
+		card->options.layer2 = -1;
+
+		free_netdev(card->dev);
+		card->dev = ndev;
+	}
+
+	rc = qeth_core_load_discipline(card, newdis);
+	if (rc)
+		goto out;
+
+	rc = card->discipline->setup(card->gdev);
+	if (rc)
+		qeth_core_free_discipline(card);
+out:
+	mutex_unlock(&card->discipline_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
+		   qeth_dev_layer2_store);
+
+#define ATTR_QETH_ISOLATION_NONE	("none")
+#define ATTR_QETH_ISOLATION_FWD		("forward")
+#define ATTR_QETH_ISOLATION_DROP	("drop")
+
+static ssize_t qeth_dev_isolation_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	switch (card->options.isolation) {
+	case ISOLATION_MODE_NONE:
+		return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE);
+	case ISOLATION_MODE_FWD:
+		return snprintf(buf, 9, "%s\n", ATTR_QETH_ISOLATION_FWD);
+	case ISOLATION_MODE_DROP:
+		return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_DROP);
+	default:
+		return snprintf(buf, 5, "%s\n", "N/A");
+	}
+}
+
+static ssize_t qeth_dev_isolation_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	enum qeth_ipa_isolation_modes isolation;
+	int rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if (card->info.type != QETH_CARD_TYPE_OSD &&
+	    card->info.type != QETH_CARD_TYPE_OSX) {
+		rc = -EOPNOTSUPP;
+		dev_err(&card->gdev->dev, "Adapter does not "
+			"support QDIO data connection isolation\n");
+		goto out;
+	}
+
+	/* parse input into isolation mode */
+	if (sysfs_streq(buf, ATTR_QETH_ISOLATION_NONE)) {
+		isolation = ISOLATION_MODE_NONE;
+	} else if (sysfs_streq(buf, ATTR_QETH_ISOLATION_FWD)) {
+		isolation = ISOLATION_MODE_FWD;
+	} else if (sysfs_streq(buf, ATTR_QETH_ISOLATION_DROP)) {
+		isolation = ISOLATION_MODE_DROP;
+	} else {
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = count;
+
+	/* defer IP assist if device is offline (until discipline->set_online)*/
+	card->options.prev_isolation = card->options.isolation;
+	card->options.isolation = isolation;
+	if (qeth_card_hw_is_reachable(card)) {
+		int ipa_rc = qeth_set_access_ctrl_online(card, 1);
+		if (ipa_rc != 0)
+			rc = ipa_rc;
+	}
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc;
+}
+
+static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
+			qeth_dev_isolation_store);
+
+static ssize_t qeth_dev_switch_attrs_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	struct qeth_switch_info sw_info;
+	int	rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	if (!qeth_card_hw_is_reachable(card))
+		return sprintf(buf, "n/a\n");
+
+	rc = qeth_query_switch_attributes(card, &sw_info);
+	if (rc)
+		return rc;
+
+	if (!sw_info.capabilities)
+		rc = sprintf(buf, "unknown");
+
+	if (sw_info.capabilities & QETH_SWITCH_FORW_802_1)
+		rc = sprintf(buf, (sw_info.settings & QETH_SWITCH_FORW_802_1 ?
+							"[802.1]" : "802.1"));
+	if (sw_info.capabilities & QETH_SWITCH_FORW_REFL_RELAY)
+		rc += sprintf(buf + rc,
+			(sw_info.settings & QETH_SWITCH_FORW_REFL_RELAY ?
+							" [rr]" : " rr"));
+	rc += sprintf(buf + rc, "\n");
+
+	return rc;
+}
+
+static DEVICE_ATTR(switch_attrs, 0444,
+		   qeth_dev_switch_attrs_show, NULL);
+
+static ssize_t qeth_hw_trap_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+	if (card->info.hwtrap)
+		return snprintf(buf, 5, "arm\n");
+	else
+		return snprintf(buf, 8, "disarm\n");
+}
+
+static ssize_t qeth_hw_trap_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	int rc = 0;
+	int state = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if (qeth_card_hw_is_reachable(card))
+		state = 1;
+
+	if (sysfs_streq(buf, "arm") && !card->info.hwtrap) {
+		if (state) {
+			if (qeth_is_diagass_supported(card,
+			    QETH_DIAGS_CMD_TRAP)) {
+				rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM);
+				if (!rc)
+					card->info.hwtrap = 1;
+			} else
+				rc = -EINVAL;
+		} else
+			card->info.hwtrap = 1;
+	} else if (sysfs_streq(buf, "disarm") && card->info.hwtrap) {
+		if (state) {
+			rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+			if (!rc)
+				card->info.hwtrap = 0;
+		} else
+			card->info.hwtrap = 0;
+	} else if (sysfs_streq(buf, "trap") && state && card->info.hwtrap)
+		rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_CAPTURE);
+	else
+		rc = -EINVAL;
+
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show,
+		   qeth_hw_trap_store);
+
+static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
+{
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", value);
+}
+
+static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
+		const char *buf, size_t count, int *value, int max_value)
+{
+	char *tmp;
+	int i, rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+	i = simple_strtoul(buf, &tmp, 10);
+	if (i <= max_value)
+		*value = i;
+	else
+		rc = -EINVAL;
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static ssize_t qeth_dev_blkt_total_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total);
+}
+
+static ssize_t qeth_dev_blkt_total_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	return qeth_dev_blkt_store(card, buf, count,
+				   &card->info.blkt.time_total, 5000);
+}
+
+
+
+static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
+		   qeth_dev_blkt_total_store);
+
+static ssize_t qeth_dev_blkt_inter_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet);
+}
+
+static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	return qeth_dev_blkt_store(card, buf, count,
+				   &card->info.blkt.inter_packet, 1000);
+}
+
+static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
+		   qeth_dev_blkt_inter_store);
+
+static ssize_t qeth_dev_blkt_inter_jumbo_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	return qeth_dev_blkt_show(buf, card,
+				  card->info.blkt.inter_packet_jumbo);
+}
+
+static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	return qeth_dev_blkt_store(card, buf, count,
+				   &card->info.blkt.inter_packet_jumbo, 1000);
+}
+
+static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
+		   qeth_dev_blkt_inter_jumbo_store);
+
+static struct attribute *qeth_blkt_device_attrs[] = {
+	&dev_attr_total.attr,
+	&dev_attr_inter.attr,
+	&dev_attr_inter_jumbo.attr,
+	NULL,
+};
+const struct attribute_group qeth_device_blkt_group = {
+	.name = "blkt",
+	.attrs = qeth_blkt_device_attrs,
+};
+EXPORT_SYMBOL_GPL(qeth_device_blkt_group);
+
+static struct attribute *qeth_device_attrs[] = {
+	&dev_attr_state.attr,
+	&dev_attr_chpid.attr,
+	&dev_attr_if_name.attr,
+	&dev_attr_card_type.attr,
+	&dev_attr_inbuf_size.attr,
+	&dev_attr_portno.attr,
+	&dev_attr_portname.attr,
+	&dev_attr_priority_queueing.attr,
+	&dev_attr_buffer_count.attr,
+	&dev_attr_recover.attr,
+	&dev_attr_performance_stats.attr,
+	&dev_attr_layer2.attr,
+	&dev_attr_isolation.attr,
+	&dev_attr_hw_trap.attr,
+	&dev_attr_switch_attrs.attr,
+	NULL,
+};
+const struct attribute_group qeth_device_attr_group = {
+	.attrs = qeth_device_attrs,
+};
+EXPORT_SYMBOL_GPL(qeth_device_attr_group);
+
+const struct attribute_group *qeth_generic_attr_groups[] = {
+	&qeth_device_attr_group,
+	&qeth_device_blkt_group,
+	NULL,
+};
+
+static struct attribute *qeth_osn_device_attrs[] = {
+	&dev_attr_state.attr,
+	&dev_attr_chpid.attr,
+	&dev_attr_if_name.attr,
+	&dev_attr_card_type.attr,
+	&dev_attr_buffer_count.attr,
+	&dev_attr_recover.attr,
+	NULL,
+};
+static struct attribute_group qeth_osn_device_attr_group = {
+	.attrs = qeth_osn_device_attrs,
+};
+const struct attribute_group *qeth_osn_attr_groups[] = {
+	&qeth_osn_device_attr_group,
+	NULL,
+};
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
new file mode 100644
index 0000000..ddc615b
--- /dev/null
+++ b/drivers/s390/net/qeth_l2.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Eugene Crosser <eugene.crosser@ru.ibm.com>
+ */
+
+#ifndef __QETH_L2_H__
+#define __QETH_L2_H__
+
+#include "qeth_core.h"
+
+extern const struct attribute_group *qeth_l2_attr_groups[];
+
+int qeth_l2_create_device_attributes(struct device *);
+void qeth_l2_remove_device_attributes(struct device *);
+void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
+int qeth_bridgeport_query_ports(struct qeth_card *card,
+				enum qeth_sbp_roles *role,
+				enum qeth_sbp_states *state);
+int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
+int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
+
+int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state);
+int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state);
+int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout);
+int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout);
+bool qeth_l2_vnicc_is_in_use(struct qeth_card *card);
+
+struct qeth_mac {
+	u8 mac_addr[ETH_ALEN];
+	u8 disp_flag:2;
+	struct hlist_node hnode;
+};
+
+#endif /* __QETH_L2_H__ */
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
new file mode 100644
index 0000000..76b2fba
--- /dev/null
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -0,0 +1,2394 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    Copyright IBM Corp. 2007, 2009
+ *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+ *		 Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "qeth"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/hashtable.h>
+#include <asm/setup.h>
+#include "qeth_core.h"
+#include "qeth_l2.h"
+
+static int qeth_l2_set_offline(struct ccwgroup_device *);
+static int qeth_l2_stop(struct net_device *);
+static void qeth_bridgeport_query_support(struct qeth_card *card);
+static void qeth_bridge_state_change(struct qeth_card *card,
+					struct qeth_ipa_cmd *cmd);
+static void qeth_bridge_host_event(struct qeth_card *card,
+					struct qeth_ipa_cmd *cmd);
+static void qeth_l2_vnicc_set_defaults(struct qeth_card *card);
+static void qeth_l2_vnicc_init(struct qeth_card *card);
+static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
+					  u32 *timeout);
+
+static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
+{
+	struct qeth_card *card;
+	struct net_device *ndev;
+	__u16 temp_dev_no;
+	unsigned long flags;
+	struct ccw_dev_id read_devid;
+
+	ndev = NULL;
+	memcpy(&temp_dev_no, read_dev_no, 2);
+	read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
+	list_for_each_entry(card, &qeth_core_card_list.list, list) {
+		ccw_device_get_id(CARD_RDEV(card), &read_devid);
+		if (read_devid.devno == temp_dev_no) {
+			ndev = card->dev;
+			break;
+		}
+	}
+	read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+	return ndev;
+}
+
+static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode)
+{
+	int rc;
+
+	if (retcode)
+		QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
+	switch (retcode) {
+	case IPA_RC_SUCCESS:
+		rc = 0;
+		break;
+	case IPA_RC_L2_UNSUPPORTED_CMD:
+		rc = -EOPNOTSUPP;
+		break;
+	case IPA_RC_L2_ADDR_TABLE_FULL:
+		rc = -ENOSPC;
+		break;
+	case IPA_RC_L2_DUP_MAC:
+	case IPA_RC_L2_DUP_LAYER3_MAC:
+		rc = -EEXIST;
+		break;
+	case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
+	case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+		rc = -EPERM;
+		break;
+	case IPA_RC_L2_MAC_NOT_FOUND:
+		rc = -ENOENT;
+		break;
+	case -ENOMEM:
+		rc = -ENOMEM;
+		break;
+	default:
+		rc = -EIO;
+		break;
+	}
+	return rc;
+}
+
+static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
+			   enum qeth_ipa_cmds ipacmd)
+{
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_CARD_TEXT(card, 2, "L2sdmac");
+	iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+	if (!iob)
+		return -ENOMEM;
+	cmd = __ipa_cmd(iob);
+	cmd->data.setdelmac.mac_length = ETH_ALEN;
+	ether_addr_copy(cmd->data.setdelmac.mac, mac);
+	return qeth_setdelmac_makerc(card, qeth_send_ipa_cmd(card, iob,
+					   NULL, NULL));
+}
+
+static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 2, "L2Setmac");
+	rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
+	if (rc == 0) {
+		dev_info(&card->gdev->dev,
+			 "MAC address %pM successfully registered on device %s\n",
+			 mac, card->dev->name);
+	} else {
+		switch (rc) {
+		case -EEXIST:
+			dev_warn(&card->gdev->dev,
+				"MAC address %pM already exists\n", mac);
+			break;
+		case -EPERM:
+			dev_warn(&card->gdev->dev,
+				"MAC address %pM is not authorized\n", mac);
+			break;
+		}
+	}
+	return rc;
+}
+
+static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
+{
+	enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
+					IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
+	int rc;
+
+	QETH_CARD_TEXT(card, 2, "L2Wmac");
+	rc = qeth_l2_send_setdelmac(card, mac, cmd);
+	if (rc == -EEXIST)
+		QETH_DBF_MESSAGE(2, "MAC %pM already registered on %s\n",
+				 mac, QETH_CARD_IFNAME(card));
+	else if (rc)
+		QETH_DBF_MESSAGE(2, "Failed to register MAC %pM on %s: %d\n",
+				 mac, QETH_CARD_IFNAME(card), rc);
+	return rc;
+}
+
+static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
+{
+	enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
+					IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
+	int rc;
+
+	QETH_CARD_TEXT(card, 2, "L2Rmac");
+	rc = qeth_l2_send_setdelmac(card, mac, cmd);
+	if (rc)
+		QETH_DBF_MESSAGE(2, "Failed to delete MAC %pM on %s: %d\n",
+				 mac, QETH_CARD_IFNAME(card), rc);
+	return rc;
+}
+
+static void qeth_l2_del_all_macs(struct qeth_card *card)
+{
+	struct qeth_mac *mac;
+	struct hlist_node *tmp;
+	int i;
+
+	spin_lock_bh(&card->mclock);
+	hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
+		hash_del(&mac->hnode);
+		kfree(mac);
+	}
+	spin_unlock_bh(&card->mclock);
+}
+
+static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
+{
+	if (card->info.type == QETH_CARD_TYPE_OSN)
+		return RTN_UNICAST;
+	if (is_broadcast_ether_addr(skb->data))
+		return RTN_BROADCAST;
+	if (is_multicast_ether_addr(skb->data))
+		return RTN_MULTICAST;
+	return RTN_UNICAST;
+}
+
+static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb,
+				int cast_type, unsigned int data_len)
+{
+	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+
+	memset(hdr, 0, sizeof(struct qeth_hdr));
+	hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
+	hdr->hdr.l2.pkt_length = data_len;
+
+	/* set byte byte 3 to casting flags */
+	if (cast_type == RTN_MULTICAST)
+		hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
+	else if (cast_type == RTN_BROADCAST)
+		hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
+	else
+		hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
+
+	/* VSWITCH relies on the VLAN
+	 * information to be present in
+	 * the QDIO header */
+	if (veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
+		hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
+		hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
+	}
+}
+
+static int qeth_setdelvlan_makerc(struct qeth_card *card, int retcode)
+{
+	if (retcode)
+		QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
+
+	switch (retcode) {
+	case IPA_RC_SUCCESS:
+		return 0;
+	case IPA_RC_L2_INVALID_VLAN_ID:
+		return -EINVAL;
+	case IPA_RC_L2_DUP_VLAN_ID:
+		return -EEXIST;
+	case IPA_RC_L2_VLAN_ID_NOT_FOUND:
+		return -ENOENT;
+	case IPA_RC_L2_VLAN_ID_NOT_ALLOWED:
+		return -EPERM;
+	case -ENOMEM:
+		return -ENOMEM;
+	default:
+		return -EIO;
+	}
+}
+
+static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
+				      struct qeth_reply *reply,
+				      unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+
+	QETH_CARD_TEXT(card, 2, "L2sdvcb");
+	if (cmd->hdr.return_code) {
+		QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x.\n",
+				 cmd->data.setdelvlan.vlan_id,
+				 QETH_CARD_IFNAME(card), cmd->hdr.return_code);
+		QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
+		QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
+	}
+	return 0;
+}
+
+static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
+				   enum qeth_ipa_cmds ipacmd)
+{
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
+	iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+	if (!iob)
+		return -ENOMEM;
+	cmd = __ipa_cmd(iob);
+	cmd->data.setdelvlan.vlan_id = i;
+	return qeth_setdelvlan_makerc(card, qeth_send_ipa_cmd(card, iob,
+					    qeth_l2_send_setdelvlan_cb, NULL));
+}
+
+static void qeth_l2_process_vlans(struct qeth_card *card)
+{
+	struct qeth_vlan_vid *id;
+
+	QETH_CARD_TEXT(card, 3, "L2prcvln");
+	mutex_lock(&card->vid_list_mutex);
+	list_for_each_entry(id, &card->vid_list, list) {
+		qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN);
+	}
+	mutex_unlock(&card->vid_list_mutex);
+}
+
+static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
+				   __be16 proto, u16 vid)
+{
+	struct qeth_card *card = dev->ml_priv;
+	struct qeth_vlan_vid *id;
+	int rc;
+
+	QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
+	if (!vid)
+		return 0;
+	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+		QETH_CARD_TEXT(card, 3, "aidREC");
+		return 0;
+	}
+	id = kmalloc(sizeof(*id), GFP_KERNEL);
+	if (id) {
+		id->vid = vid;
+		rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+		if (rc) {
+			kfree(id);
+			return rc;
+		}
+		mutex_lock(&card->vid_list_mutex);
+		list_add_tail(&id->list, &card->vid_list);
+		mutex_unlock(&card->vid_list_mutex);
+	} else {
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
+				    __be16 proto, u16 vid)
+{
+	struct qeth_vlan_vid *id, *tmpid = NULL;
+	struct qeth_card *card = dev->ml_priv;
+	int rc = 0;
+
+	QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
+	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+		QETH_CARD_TEXT(card, 3, "kidREC");
+		return 0;
+	}
+	mutex_lock(&card->vid_list_mutex);
+	list_for_each_entry(id, &card->vid_list, list) {
+		if (id->vid == vid) {
+			list_del(&id->list);
+			tmpid = id;
+			break;
+		}
+	}
+	mutex_unlock(&card->vid_list_mutex);
+	if (tmpid) {
+		rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
+		kfree(tmpid);
+	}
+	return rc;
+}
+
+static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
+{
+	QETH_DBF_TEXT(SETUP , 2, "stopcard");
+	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+
+	qeth_set_allowed_threads(card, 0, 1);
+	if (card->read.state == CH_STATE_UP &&
+	    card->write.state == CH_STATE_UP &&
+	    (card->state == CARD_STATE_UP)) {
+		if (recovery_mode &&
+		    card->info.type != QETH_CARD_TYPE_OSN) {
+			qeth_l2_stop(card->dev);
+		} else {
+			rtnl_lock();
+			dev_close(card->dev);
+			rtnl_unlock();
+		}
+		card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+		card->state = CARD_STATE_SOFTSETUP;
+	}
+	if (card->state == CARD_STATE_SOFTSETUP) {
+		qeth_l2_del_all_macs(card);
+		qeth_clear_ipacmd_list(card);
+		card->state = CARD_STATE_HARDSETUP;
+	}
+	if (card->state == CARD_STATE_HARDSETUP) {
+		qeth_qdio_clear_card(card, 0);
+		qeth_clear_qdio_buffers(card);
+		qeth_clear_working_pool_list(card);
+		card->state = CARD_STATE_DOWN;
+	}
+	if (card->state == CARD_STATE_DOWN) {
+		qeth_clear_cmd_buffers(&card->read);
+		qeth_clear_cmd_buffers(&card->write);
+	}
+}
+
+static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
+				int budget, int *done)
+{
+	int work_done = 0;
+	struct sk_buff *skb;
+	struct qeth_hdr *hdr;
+	unsigned int len;
+
+	*done = 0;
+	WARN_ON_ONCE(!budget);
+	while (budget) {
+		skb = qeth_core_get_next_skb(card,
+			&card->qdio.in_q->bufs[card->rx.b_index],
+			&card->rx.b_element, &card->rx.e_offset, &hdr);
+		if (!skb) {
+			*done = 1;
+			break;
+		}
+		switch (hdr->hdr.l2.id) {
+		case QETH_HEADER_TYPE_LAYER2:
+			skb->protocol = eth_type_trans(skb, skb->dev);
+			qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]);
+			if (skb->protocol == htons(ETH_P_802_2))
+				*((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
+			len = skb->len;
+			napi_gro_receive(&card->napi, skb);
+			break;
+		case QETH_HEADER_TYPE_OSN:
+			if (card->info.type == QETH_CARD_TYPE_OSN) {
+				skb_push(skb, sizeof(struct qeth_hdr));
+				skb_copy_to_linear_data(skb, hdr,
+						sizeof(struct qeth_hdr));
+				len = skb->len;
+				card->osn_info.data_cb(skb);
+				break;
+			}
+			/* else unknown */
+		default:
+			dev_kfree_skb_any(skb);
+			QETH_CARD_TEXT(card, 3, "inbunkno");
+			QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
+			continue;
+		}
+		work_done++;
+		budget--;
+		card->stats.rx_packets++;
+		card->stats.rx_bytes += len;
+	}
+	return work_done;
+}
+
+static int qeth_l2_request_initial_mac(struct qeth_card *card)
+{
+	int rc = 0;
+
+	QETH_DBF_TEXT(SETUP, 2, "l2reqmac");
+	QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
+
+	if (MACHINE_IS_VM) {
+		rc = qeth_vm_request_mac(card);
+		if (!rc)
+			goto out;
+		QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %s: x%x\n",
+				 CARD_BUS_ID(card), rc);
+		QETH_DBF_TEXT_(SETUP, 2, "err%04x", rc);
+		/* fall back to alternative mechanism: */
+	}
+
+	if (card->info.type == QETH_CARD_TYPE_IQD ||
+	    card->info.type == QETH_CARD_TYPE_OSM ||
+	    card->info.type == QETH_CARD_TYPE_OSX ||
+	    card->info.guestlan) {
+		rc = qeth_setadpparms_change_macaddr(card);
+		if (!rc)
+			goto out;
+		QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %s: x%x\n",
+				 CARD_BUS_ID(card), rc);
+		QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
+		/* fall back once more: */
+	}
+
+	/* some devices don't support a custom MAC address: */
+	if (card->info.type == QETH_CARD_TYPE_OSM ||
+	    card->info.type == QETH_CARD_TYPE_OSX)
+		return (rc) ? rc : -EADDRNOTAVAIL;
+	eth_hw_addr_random(card->dev);
+
+out:
+	QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, card->dev->addr_len);
+	return 0;
+}
+
+static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+	struct qeth_card *card = dev->ml_priv;
+	u8 old_addr[ETH_ALEN];
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 3, "setmac");
+
+	if (card->info.type == QETH_CARD_TYPE_OSN ||
+	    card->info.type == QETH_CARD_TYPE_OSM ||
+	    card->info.type == QETH_CARD_TYPE_OSX) {
+		QETH_CARD_TEXT(card, 3, "setmcTYP");
+		return -EOPNOTSUPP;
+	}
+	QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+		QETH_CARD_TEXT(card, 3, "setmcREC");
+		return -ERESTARTSYS;
+	}
+
+	/* avoid racing against concurrent state change: */
+	if (!mutex_trylock(&card->conf_mutex))
+		return -EAGAIN;
+
+	if (!qeth_card_hw_is_reachable(card)) {
+		ether_addr_copy(dev->dev_addr, addr->sa_data);
+		goto out_unlock;
+	}
+
+	/* don't register the same address twice */
+	if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
+	    (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
+		goto out_unlock;
+
+	/* add the new address, switch over, drop the old */
+	rc = qeth_l2_send_setmac(card, addr->sa_data);
+	if (rc)
+		goto out_unlock;
+	ether_addr_copy(old_addr, dev->dev_addr);
+	ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+	if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
+		qeth_l2_remove_mac(card, old_addr);
+	card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+
+out_unlock:
+	mutex_unlock(&card->conf_mutex);
+	return rc;
+}
+
+static void qeth_promisc_to_bridge(struct qeth_card *card)
+{
+	struct net_device *dev = card->dev;
+	enum qeth_ipa_promisc_modes promisc_mode;
+	int role;
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "pmisc2br");
+
+	if (!card->options.sbp.reflect_promisc)
+		return;
+	promisc_mode = (dev->flags & IFF_PROMISC) ? SET_PROMISC_MODE_ON
+						: SET_PROMISC_MODE_OFF;
+	if (promisc_mode == card->info.promisc_mode)
+		return;
+
+	if (promisc_mode == SET_PROMISC_MODE_ON) {
+		if (card->options.sbp.reflect_promisc_primary)
+			role = QETH_SBP_ROLE_PRIMARY;
+		else
+			role = QETH_SBP_ROLE_SECONDARY;
+	} else
+		role = QETH_SBP_ROLE_NONE;
+
+	rc = qeth_bridgeport_setrole(card, role);
+	QETH_DBF_TEXT_(SETUP, 2, "bpm%c%04x",
+			(promisc_mode == SET_PROMISC_MODE_ON) ? '+' : '-', rc);
+	if (!rc) {
+		card->options.sbp.role = role;
+		card->info.promisc_mode = promisc_mode;
+	}
+
+}
+/* New MAC address is added to the hash table and marked to be written on card
+ * only if there is not in the hash table storage already
+ *
+*/
+static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
+{
+	u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2]));
+	struct qeth_mac *mac;
+
+	hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) {
+		if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) {
+			mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+			return;
+		}
+	}
+
+	mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC);
+	if (!mac)
+		return;
+
+	ether_addr_copy(mac->mac_addr, ha->addr);
+	mac->disp_flag = QETH_DISP_ADDR_ADD;
+
+	hash_add(card->mac_htable, &mac->hnode, mac_hash);
+}
+
+static void qeth_l2_set_rx_mode(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+	struct netdev_hw_addr *ha;
+	struct qeth_mac *mac;
+	struct hlist_node *tmp;
+	int i;
+	int rc;
+
+	if (card->info.type == QETH_CARD_TYPE_OSN)
+		return;
+
+	QETH_CARD_TEXT(card, 3, "setmulti");
+	if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
+	    (card->state != CARD_STATE_UP))
+		return;
+
+	spin_lock_bh(&card->mclock);
+
+	netdev_for_each_mc_addr(ha, dev)
+		qeth_l2_add_mac(card, ha);
+	netdev_for_each_uc_addr(ha, dev)
+		qeth_l2_add_mac(card, ha);
+
+	hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
+		switch (mac->disp_flag) {
+		case QETH_DISP_ADDR_DELETE:
+			qeth_l2_remove_mac(card, mac->mac_addr);
+			hash_del(&mac->hnode);
+			kfree(mac);
+			break;
+		case QETH_DISP_ADDR_ADD:
+			rc = qeth_l2_write_mac(card, mac->mac_addr);
+			if (rc) {
+				hash_del(&mac->hnode);
+				kfree(mac);
+				break;
+			}
+			/* fall through */
+		default:
+			/* for next call to set_rx_mode(): */
+			mac->disp_flag = QETH_DISP_ADDR_DELETE;
+		}
+	}
+
+	spin_unlock_bh(&card->mclock);
+
+	if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+		qeth_setadp_promisc_mode(card);
+	else
+		qeth_promisc_to_bridge(card);
+}
+
+static int qeth_l2_xmit(struct qeth_card *card, struct sk_buff *skb,
+			struct qeth_qdio_out_q *queue, int cast_type, int ipv)
+{
+	const unsigned int proto_len = IS_IQD(card) ? ETH_HLEN : 0;
+	const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
+	unsigned int frame_len = skb->len;
+	unsigned int data_offset = 0;
+	struct qeth_hdr *hdr = NULL;
+	unsigned int hd_len = 0;
+	unsigned int elements;
+	int push_len, rc;
+	bool is_sg;
+
+	rc = skb_cow_head(skb, hw_hdr_len);
+	if (rc)
+		return rc;
+
+	push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
+				      &elements);
+	if (push_len < 0)
+		return push_len;
+	if (!push_len) {
+		/* HW header needs its own buffer element. */
+		hd_len = hw_hdr_len + proto_len;
+		data_offset = proto_len;
+	}
+	qeth_l2_fill_header(hdr, skb, cast_type, frame_len);
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
+		if (card->options.performance_stats)
+			card->perf_stats.tx_csum++;
+	}
+
+	is_sg = skb_is_nonlinear(skb);
+	if (IS_IQD(card)) {
+		rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
+					      hd_len);
+	} else {
+		/* TODO: drop skb_orphan() once TX completion is fast enough */
+		skb_orphan(skb);
+		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
+					 hd_len, elements);
+	}
+
+	if (!rc) {
+		if (card->options.performance_stats) {
+			card->perf_stats.buf_elements_sent += elements;
+			if (is_sg)
+				card->perf_stats.sg_skbs_sent++;
+		}
+	} else {
+		if (!push_len)
+			kmem_cache_free(qeth_core_header_cache, hdr);
+		if (rc == -EBUSY)
+			/* roll back to ETH header */
+			skb_pull(skb, push_len);
+	}
+	return rc;
+}
+
+static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
+			    struct qeth_qdio_out_q *queue)
+{
+	unsigned int elements;
+	struct qeth_hdr *hdr;
+
+	if (skb->protocol == htons(ETH_P_IPV6))
+		return -EPROTONOSUPPORT;
+
+	hdr = (struct qeth_hdr *)skb->data;
+	elements = qeth_get_elements_no(card, skb, 0, 0);
+	if (!elements)
+		return -E2BIG;
+	if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr)))
+		return -EINVAL;
+	return qeth_do_send_packet(card, queue, skb, hdr, 0, 0, elements);
+}
+
+static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
+					   struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+	int cast_type = qeth_l2_get_cast_type(card, skb);
+	int ipv = qeth_get_ip_version(skb);
+	struct qeth_qdio_out_q *queue;
+	int tx_bytes = skb->len;
+	int rc;
+
+	if ((card->state != CARD_STATE_UP) || !card->lan_online) {
+		card->stats.tx_carrier_errors++;
+		goto tx_drop;
+	}
+
+	queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
+
+	if (card->options.performance_stats) {
+		card->perf_stats.outbound_cnt++;
+		card->perf_stats.outbound_start_time = qeth_get_micros();
+	}
+	netif_stop_queue(dev);
+
+	if (IS_OSN(card))
+		rc = qeth_l2_xmit_osn(card, skb, queue);
+	else
+		rc = qeth_l2_xmit(card, skb, queue, cast_type, ipv);
+
+	if (!rc) {
+		card->stats.tx_packets++;
+		card->stats.tx_bytes += tx_bytes;
+		if (card->options.performance_stats)
+			card->perf_stats.outbound_time += qeth_get_micros() -
+				card->perf_stats.outbound_start_time;
+		netif_wake_queue(dev);
+		return NETDEV_TX_OK;
+	} else if (rc == -EBUSY) {
+		return NETDEV_TX_BUSY;
+	} /* else fall through */
+
+tx_drop:
+	card->stats.tx_dropped++;
+	card->stats.tx_errors++;
+	dev_kfree_skb_any(skb);
+	netif_wake_queue(dev);
+	return NETDEV_TX_OK;
+}
+
+static int __qeth_l2_open(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 4, "qethopen");
+	if (card->state == CARD_STATE_UP)
+		return rc;
+	if (card->state != CARD_STATE_SOFTSETUP)
+		return -ENODEV;
+
+	if ((card->info.type != QETH_CARD_TYPE_OSN) &&
+	     (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
+		QETH_CARD_TEXT(card, 4, "nomacadr");
+		return -EPERM;
+	}
+	card->data.state = CH_STATE_UP;
+	card->state = CARD_STATE_UP;
+	netif_start_queue(dev);
+
+	if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
+		napi_enable(&card->napi);
+		napi_schedule(&card->napi);
+	} else
+		rc = -EIO;
+	return rc;
+}
+
+static int qeth_l2_open(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_CARD_TEXT(card, 5, "qethope_");
+	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+		QETH_CARD_TEXT(card, 3, "openREC");
+		return -ERESTARTSYS;
+	}
+	return __qeth_l2_open(dev);
+}
+
+static int qeth_l2_stop(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_CARD_TEXT(card, 4, "qethstop");
+	netif_tx_disable(dev);
+	if (card->state == CARD_STATE_UP) {
+		card->state = CARD_STATE_SOFTSETUP;
+		napi_disable(&card->napi);
+	}
+	return 0;
+}
+
+static const struct device_type qeth_l2_devtype = {
+	.name = "qeth_layer2",
+	.groups = qeth_l2_attr_groups,
+};
+
+static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc;
+
+	if (gdev->dev.type == &qeth_generic_devtype) {
+		rc = qeth_l2_create_device_attributes(&gdev->dev);
+		if (rc)
+			return rc;
+	}
+	INIT_LIST_HEAD(&card->vid_list);
+	hash_init(card->mac_htable);
+	card->options.layer2 = 1;
+	card->info.hwtrap = 0;
+	qeth_l2_vnicc_set_defaults(card);
+	return 0;
+}
+
+static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
+
+	if (cgdev->dev.type == &qeth_generic_devtype)
+		qeth_l2_remove_device_attributes(&cgdev->dev);
+	qeth_set_allowed_threads(card, 0, 1);
+	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+
+	if (cgdev->state == CCWGROUP_ONLINE)
+		qeth_l2_set_offline(cgdev);
+	if (qeth_netdev_is_registered(card->dev))
+		unregister_netdev(card->dev);
+}
+
+static const struct ethtool_ops qeth_l2_ethtool_ops = {
+	.get_link = ethtool_op_get_link,
+	.get_strings = qeth_core_get_strings,
+	.get_ethtool_stats = qeth_core_get_ethtool_stats,
+	.get_sset_count = qeth_core_get_sset_count,
+	.get_drvinfo = qeth_core_get_drvinfo,
+	.get_link_ksettings = qeth_core_ethtool_get_link_ksettings,
+};
+
+static const struct ethtool_ops qeth_l2_osn_ops = {
+	.get_strings = qeth_core_get_strings,
+	.get_ethtool_stats = qeth_core_get_ethtool_stats,
+	.get_sset_count = qeth_core_get_sset_count,
+	.get_drvinfo = qeth_core_get_drvinfo,
+};
+
+static const struct net_device_ops qeth_l2_netdev_ops = {
+	.ndo_open		= qeth_l2_open,
+	.ndo_stop		= qeth_l2_stop,
+	.ndo_get_stats		= qeth_get_stats,
+	.ndo_start_xmit		= qeth_l2_hard_start_xmit,
+	.ndo_features_check	= qeth_features_check,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_rx_mode	= qeth_l2_set_rx_mode,
+	.ndo_do_ioctl		= qeth_do_ioctl,
+	.ndo_set_mac_address    = qeth_l2_set_mac_address,
+	.ndo_vlan_rx_add_vid	= qeth_l2_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid   = qeth_l2_vlan_rx_kill_vid,
+	.ndo_tx_timeout	   	= qeth_tx_timeout,
+	.ndo_fix_features	= qeth_fix_features,
+	.ndo_set_features	= qeth_set_features
+};
+
+static int qeth_l2_setup_netdev(struct qeth_card *card)
+{
+	int rc;
+
+	if (qeth_netdev_is_registered(card->dev))
+		return 0;
+
+	card->dev->priv_flags |= IFF_UNICAST_FLT;
+	card->dev->netdev_ops = &qeth_l2_netdev_ops;
+	if (card->info.type == QETH_CARD_TYPE_OSN) {
+		card->dev->ethtool_ops = &qeth_l2_osn_ops;
+		card->dev->flags |= IFF_NOARP;
+	} else {
+		card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
+		card->dev->needed_headroom = sizeof(struct qeth_hdr);
+	}
+
+	if (card->info.type == QETH_CARD_TYPE_OSM)
+		card->dev->features |= NETIF_F_VLAN_CHALLENGED;
+	else
+		card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+	if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
+		card->dev->features |= NETIF_F_SG;
+		/* OSA 3S and earlier has no RX/TX support */
+		if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
+			card->dev->hw_features |= NETIF_F_IP_CSUM;
+			card->dev->vlan_features |= NETIF_F_IP_CSUM;
+		}
+	}
+	if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
+		card->dev->hw_features |= NETIF_F_IPV6_CSUM;
+		card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
+	}
+	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM) ||
+	    qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) {
+		card->dev->hw_features |= NETIF_F_RXCSUM;
+		card->dev->vlan_features |= NETIF_F_RXCSUM;
+	}
+
+	qeth_l2_request_initial_mac(card);
+	netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
+	rc = register_netdev(card->dev);
+	if (rc)
+		card->dev->netdev_ops = NULL;
+	return rc;
+}
+
+static int qeth_l2_start_ipassists(struct qeth_card *card)
+{
+	/* configure isolation level */
+	if (qeth_set_access_ctrl_online(card, 0))
+		return -ENODEV;
+	return 0;
+}
+
+static void qeth_l2_trace_features(struct qeth_card *card)
+{
+	/* Set BridgePort features */
+	QETH_CARD_TEXT(card, 2, "featuSBP");
+	QETH_CARD_HEX(card, 2, &card->options.sbp.supported_funcs,
+		      sizeof(card->options.sbp.supported_funcs));
+	/* VNIC Characteristics features */
+	QETH_CARD_TEXT(card, 2, "feaVNICC");
+	QETH_CARD_HEX(card, 2, &card->options.vnicc.sup_chars,
+		      sizeof(card->options.vnicc.sup_chars));
+}
+
+static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc = 0;
+	enum qeth_card_states recover_flag;
+
+	mutex_lock(&card->discipline_mutex);
+	mutex_lock(&card->conf_mutex);
+	QETH_DBF_TEXT(SETUP, 2, "setonlin");
+	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+
+	recover_flag = card->state;
+	rc = qeth_core_hardsetup_card(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
+		rc = -ENODEV;
+		goto out_remove;
+	}
+	qeth_bridgeport_query_support(card);
+	if (card->options.sbp.supported_funcs)
+		dev_info(&card->gdev->dev,
+		"The device represents a Bridge Capable Port\n");
+
+	rc = qeth_l2_setup_netdev(card);
+	if (rc)
+		goto out_remove;
+
+	if (card->info.type != QETH_CARD_TYPE_OSN &&
+	    !qeth_l2_send_setmac(card, card->dev->dev_addr))
+		card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+
+	if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
+		if (card->info.hwtrap &&
+		    qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
+			card->info.hwtrap = 0;
+	} else
+		card->info.hwtrap = 0;
+
+	/* for the rx_bcast characteristic, init VNICC after setmac */
+	qeth_l2_vnicc_init(card);
+
+	qeth_trace_features(card);
+	qeth_l2_trace_features(card);
+
+	qeth_l2_setup_bridgeport_attrs(card);
+
+	card->state = CARD_STATE_HARDSETUP;
+	qeth_print_status_message(card);
+
+	/* softsetup */
+	QETH_DBF_TEXT(SETUP, 2, "softsetp");
+
+	if ((card->info.type == QETH_CARD_TYPE_OSD) ||
+	    (card->info.type == QETH_CARD_TYPE_OSX)) {
+		rc = qeth_l2_start_ipassists(card);
+		if (rc)
+			goto out_remove;
+	}
+
+	if (card->info.type != QETH_CARD_TYPE_OSN)
+		qeth_l2_process_vlans(card);
+
+	netif_tx_disable(card->dev);
+
+	rc = qeth_init_qdio_queues(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+		rc = -ENODEV;
+		goto out_remove;
+	}
+	card->state = CARD_STATE_SOFTSETUP;
+	if (card->lan_online)
+		netif_carrier_on(card->dev);
+	else
+		netif_carrier_off(card->dev);
+
+	qeth_set_allowed_threads(card, 0xffffffff, 0);
+
+	qeth_enable_hw_features(card->dev);
+	if (recover_flag == CARD_STATE_RECOVER) {
+		if (recovery_mode &&
+		    card->info.type != QETH_CARD_TYPE_OSN) {
+			__qeth_l2_open(card->dev);
+			qeth_l2_set_rx_mode(card->dev);
+		} else {
+			rtnl_lock();
+			dev_open(card->dev);
+			rtnl_unlock();
+		}
+	}
+	/* let user_space know that device is online */
+	kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
+	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
+	return 0;
+
+out_remove:
+	qeth_l2_stop_card(card, 0);
+	ccw_device_set_offline(CARD_DDEV(card));
+	ccw_device_set_offline(CARD_WDEV(card));
+	ccw_device_set_offline(CARD_RDEV(card));
+	qdio_free(CARD_DDEV(card));
+	if (recover_flag == CARD_STATE_RECOVER)
+		card->state = CARD_STATE_RECOVER;
+	else
+		card->state = CARD_STATE_DOWN;
+	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
+	return rc;
+}
+
+static int qeth_l2_set_online(struct ccwgroup_device *gdev)
+{
+	return __qeth_l2_set_online(gdev, 0);
+}
+
+static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
+					int recovery_mode)
+{
+	struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
+	int rc = 0, rc2 = 0, rc3 = 0;
+	enum qeth_card_states recover_flag;
+
+	mutex_lock(&card->discipline_mutex);
+	mutex_lock(&card->conf_mutex);
+	QETH_DBF_TEXT(SETUP, 3, "setoffl");
+	QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
+
+	netif_carrier_off(card->dev);
+	recover_flag = card->state;
+	if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
+		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+		card->info.hwtrap = 1;
+	}
+	qeth_l2_stop_card(card, recovery_mode);
+	rc  = ccw_device_set_offline(CARD_DDEV(card));
+	rc2 = ccw_device_set_offline(CARD_WDEV(card));
+	rc3 = ccw_device_set_offline(CARD_RDEV(card));
+	if (!rc)
+		rc = (rc2) ? rc2 : rc3;
+	if (rc)
+		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+	qdio_free(CARD_DDEV(card));
+	if (recover_flag == CARD_STATE_UP)
+		card->state = CARD_STATE_RECOVER;
+	/* let user_space know that device is offline */
+	kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
+	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
+	return 0;
+}
+
+static int qeth_l2_set_offline(struct ccwgroup_device *cgdev)
+{
+	return __qeth_l2_set_offline(cgdev, 0);
+}
+
+static int qeth_l2_recover(void *ptr)
+{
+	struct qeth_card *card;
+	int rc = 0;
+
+	card = (struct qeth_card *) ptr;
+	QETH_CARD_TEXT(card, 2, "recover1");
+	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
+		return 0;
+	QETH_CARD_TEXT(card, 2, "recover2");
+	dev_warn(&card->gdev->dev,
+		"A recovery process has been started for the device\n");
+	qeth_set_recovery_task(card);
+	__qeth_l2_set_offline(card->gdev, 1);
+	rc = __qeth_l2_set_online(card->gdev, 1);
+	if (!rc)
+		dev_info(&card->gdev->dev,
+			"Device successfully recovered!\n");
+	else {
+		qeth_close_dev(card);
+		dev_warn(&card->gdev->dev, "The qeth device driver "
+				"failed to recover an error on the device\n");
+	}
+	qeth_clear_recovery_task(card);
+	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
+	return 0;
+}
+
+static int __init qeth_l2_init(void)
+{
+	pr_info("register layer 2 discipline\n");
+	return 0;
+}
+
+static void __exit qeth_l2_exit(void)
+{
+	pr_info("unregister layer 2 discipline\n");
+}
+
+static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
+	netif_device_detach(card->dev);
+	qeth_set_allowed_threads(card, 0, 1);
+	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+	if (gdev->state == CCWGROUP_OFFLINE)
+		return 0;
+	if (card->state == CARD_STATE_UP) {
+		if (card->info.hwtrap)
+			qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+		__qeth_l2_set_offline(card->gdev, 1);
+	} else
+		__qeth_l2_set_offline(card->gdev, 0);
+	return 0;
+}
+
+static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc = 0;
+
+	if (gdev->state == CCWGROUP_OFFLINE)
+		goto out;
+
+	if (card->state == CARD_STATE_RECOVER) {
+		rc = __qeth_l2_set_online(card->gdev, 1);
+		if (rc) {
+			rtnl_lock();
+			dev_close(card->dev);
+			rtnl_unlock();
+		}
+	} else
+		rc = __qeth_l2_set_online(card->gdev, 0);
+out:
+	qeth_set_allowed_threads(card, 0xffffffff, 0);
+	netif_device_attach(card->dev);
+	if (rc)
+		dev_warn(&card->gdev->dev, "The qeth device driver "
+			"failed to recover an error on the device\n");
+	return rc;
+}
+
+/* Returns zero if the command is successfully "consumed" */
+static int qeth_l2_control_event(struct qeth_card *card,
+					struct qeth_ipa_cmd *cmd)
+{
+	switch (cmd->hdr.command) {
+	case IPA_CMD_SETBRIDGEPORT_OSA:
+	case IPA_CMD_SETBRIDGEPORT_IQD:
+		if (cmd->data.sbp.hdr.command_code ==
+				IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
+			qeth_bridge_state_change(card, cmd);
+			return 0;
+		} else
+			return 1;
+	case IPA_CMD_ADDRESS_CHANGE_NOTIF:
+		qeth_bridge_host_event(card, cmd);
+		return 0;
+	default:
+		return 1;
+	}
+}
+
+struct qeth_discipline qeth_l2_discipline = {
+	.devtype = &qeth_l2_devtype,
+	.process_rx_buffer = qeth_l2_process_inbound_buffer,
+	.recover = qeth_l2_recover,
+	.setup = qeth_l2_probe_device,
+	.remove = qeth_l2_remove_device,
+	.set_online = qeth_l2_set_online,
+	.set_offline = qeth_l2_set_offline,
+	.freeze = qeth_l2_pm_suspend,
+	.thaw = qeth_l2_pm_resume,
+	.restore = qeth_l2_pm_resume,
+	.do_ioctl = NULL,
+	.control_event_handler = qeth_l2_control_event,
+};
+EXPORT_SYMBOL_GPL(qeth_l2_discipline);
+
+static int qeth_osn_send_control_data(struct qeth_card *card, int len,
+			   struct qeth_cmd_buffer *iob)
+{
+	struct qeth_channel *channel = iob->channel;
+	unsigned long flags;
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 5, "osndctrd");
+
+	wait_event(card->wait_q,
+		   atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
+	qeth_prepare_control_data(card, len, iob);
+	QETH_CARD_TEXT(card, 6, "osnoirqp");
+	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+	rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
+				      (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
+	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+	if (rc) {
+		QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
+			   "ccw_device_start rc = %i\n", rc);
+		QETH_CARD_TEXT_(card, 2, " err%d", rc);
+		qeth_release_buffer(channel, iob);
+		atomic_set(&channel->irq_pending, 0);
+		wake_up(&card->wait_q);
+	}
+	return rc;
+}
+
+static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
+			struct qeth_cmd_buffer *iob, int data_len)
+{
+	u16 s1, s2;
+
+	QETH_CARD_TEXT(card, 4, "osndipa");
+
+	qeth_prepare_ipa_cmd(card, iob);
+	s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
+	s2 = (u16)data_len;
+	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
+	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
+	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
+	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
+	return qeth_osn_send_control_data(card, s1, iob);
+}
+
+int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_card *card;
+
+	if (!dev)
+		return -ENODEV;
+	card = dev->ml_priv;
+	if (!card)
+		return -ENODEV;
+	QETH_CARD_TEXT(card, 2, "osnsdmc");
+	if (!qeth_card_hw_is_reachable(card))
+		return -ENODEV;
+	iob = qeth_wait_for_buffer(&card->write);
+	memcpy(__ipa_cmd(iob), data, data_len);
+	return qeth_osn_send_ipa_cmd(card, iob, data_len);
+}
+EXPORT_SYMBOL(qeth_osn_assist);
+
+int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
+		  int (*assist_cb)(struct net_device *, void *),
+		  int (*data_cb)(struct sk_buff *))
+{
+	struct qeth_card *card;
+
+	*dev = qeth_l2_netdev_by_devno(read_dev_no);
+	if (*dev == NULL)
+		return -ENODEV;
+	card = (*dev)->ml_priv;
+	if (!card)
+		return -ENODEV;
+	QETH_CARD_TEXT(card, 2, "osnreg");
+	if ((assist_cb == NULL) || (data_cb == NULL))
+		return -EINVAL;
+	card->osn_info.assist_cb = assist_cb;
+	card->osn_info.data_cb = data_cb;
+	return 0;
+}
+EXPORT_SYMBOL(qeth_osn_register);
+
+void qeth_osn_deregister(struct net_device *dev)
+{
+	struct qeth_card *card;
+
+	if (!dev)
+		return;
+	card = dev->ml_priv;
+	if (!card)
+		return;
+	QETH_CARD_TEXT(card, 2, "osndereg");
+	card->osn_info.assist_cb = NULL;
+	card->osn_info.data_cb = NULL;
+	return;
+}
+EXPORT_SYMBOL(qeth_osn_deregister);
+
+/* SETBRIDGEPORT support, async notifications */
+
+enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset};
+
+/**
+ * qeth_bridge_emit_host_event() - bridgeport address change notification
+ * @card:  qeth_card structure pointer, for udev events.
+ * @evtype:  "normal" register/unregister, or abort, or reset. For abort
+ *	      and reset token and addr_lnid are unused and may be NULL.
+ * @code:  event bitmask: high order bit 0x80 value 1 means removal of an
+ *			  object, 0 - addition of an object.
+ *			  0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC.
+ * @token: "network token" structure identifying physical address of the port.
+ * @addr_lnid: pointer to structure with MAC address and VLAN ID.
+ *
+ * This function is called when registrations and deregistrations are
+ * reported by the hardware, and also when notifications are enabled -
+ * for all currently registered addresses.
+ */
+static void qeth_bridge_emit_host_event(struct qeth_card *card,
+	enum qeth_an_event_type evtype,
+	u8 code, struct net_if_token *token, struct mac_addr_lnid *addr_lnid)
+{
+	char str[7][32];
+	char *env[8];
+	int i = 0;
+
+	switch (evtype) {
+	case anev_reg_unreg:
+		snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=%s",
+				(code & IPA_ADDR_CHANGE_CODE_REMOVAL)
+				? "deregister" : "register");
+		env[i] = str[i]; i++;
+		if (code & IPA_ADDR_CHANGE_CODE_VLANID) {
+			snprintf(str[i], sizeof(str[i]), "VLAN=%d",
+				addr_lnid->lnid);
+			env[i] = str[i]; i++;
+		}
+		if (code & IPA_ADDR_CHANGE_CODE_MACADDR) {
+			snprintf(str[i], sizeof(str[i]), "MAC=%pM",
+				addr_lnid->mac);
+			env[i] = str[i]; i++;
+		}
+		snprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x",
+			token->cssid, token->ssid, token->devnum);
+		env[i] = str[i]; i++;
+		snprintf(str[i], sizeof(str[i]), "NTOK_IID=%02x", token->iid);
+		env[i] = str[i]; i++;
+		snprintf(str[i], sizeof(str[i]), "NTOK_CHPID=%02x",
+				token->chpid);
+		env[i] = str[i]; i++;
+		snprintf(str[i], sizeof(str[i]), "NTOK_CHID=%04x", token->chid);
+		env[i] = str[i]; i++;
+		break;
+	case anev_abort:
+		snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=abort");
+		env[i] = str[i]; i++;
+		break;
+	case anev_reset:
+		snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=reset");
+		env[i] = str[i]; i++;
+		break;
+	}
+	env[i] = NULL;
+	kobject_uevent_env(&card->gdev->dev.kobj, KOBJ_CHANGE, env);
+}
+
+struct qeth_bridge_state_data {
+	struct work_struct worker;
+	struct qeth_card *card;
+	struct qeth_sbp_state_change qports;
+};
+
+static void qeth_bridge_state_change_worker(struct work_struct *work)
+{
+	struct qeth_bridge_state_data *data =
+		container_of(work, struct qeth_bridge_state_data, worker);
+	/* We are only interested in the first entry - local port */
+	struct qeth_sbp_port_entry *entry = &data->qports.entry[0];
+	char env_locrem[32];
+	char env_role[32];
+	char env_state[32];
+	char *env[] = {
+		env_locrem,
+		env_role,
+		env_state,
+		NULL
+	};
+
+	/* Role should not change by itself, but if it did, */
+	/* information from the hardware is authoritative.  */
+	mutex_lock(&data->card->conf_mutex);
+	data->card->options.sbp.role = entry->role;
+	mutex_unlock(&data->card->conf_mutex);
+
+	snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
+	snprintf(env_role, sizeof(env_role), "ROLE=%s",
+		(entry->role == QETH_SBP_ROLE_NONE) ? "none" :
+		(entry->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
+		(entry->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
+		"<INVALID>");
+	snprintf(env_state, sizeof(env_state), "STATE=%s",
+		(entry->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
+		(entry->state == QETH_SBP_STATE_STANDBY) ? "standby" :
+		(entry->state == QETH_SBP_STATE_ACTIVE) ? "active" :
+		"<INVALID>");
+	kobject_uevent_env(&data->card->gdev->dev.kobj,
+				KOBJ_CHANGE, env);
+	kfree(data);
+}
+
+static void qeth_bridge_state_change(struct qeth_card *card,
+					struct qeth_ipa_cmd *cmd)
+{
+	struct qeth_sbp_state_change *qports =
+		 &cmd->data.sbp.data.state_change;
+	struct qeth_bridge_state_data *data;
+	int extrasize;
+
+	QETH_CARD_TEXT(card, 2, "brstchng");
+	if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
+		QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length);
+		return;
+	}
+	extrasize = sizeof(struct qeth_sbp_port_entry) * qports->num_entries;
+	data = kzalloc(sizeof(struct qeth_bridge_state_data) + extrasize,
+		GFP_ATOMIC);
+	if (!data) {
+		QETH_CARD_TEXT(card, 2, "BPSalloc");
+		return;
+	}
+	INIT_WORK(&data->worker, qeth_bridge_state_change_worker);
+	data->card = card;
+	memcpy(&data->qports, qports,
+			sizeof(struct qeth_sbp_state_change) + extrasize);
+	queue_work(qeth_wq, &data->worker);
+}
+
+struct qeth_bridge_host_data {
+	struct work_struct worker;
+	struct qeth_card *card;
+	struct qeth_ipacmd_addr_change hostevs;
+};
+
+static void qeth_bridge_host_event_worker(struct work_struct *work)
+{
+	struct qeth_bridge_host_data *data =
+		container_of(work, struct qeth_bridge_host_data, worker);
+	int i;
+
+	if (data->hostevs.lost_event_mask) {
+		dev_info(&data->card->gdev->dev,
+"Address notification from the Bridge Port stopped %s (%s)\n",
+			data->card->dev->name,
+			(data->hostevs.lost_event_mask == 0x01)
+			? "Overflow"
+			: (data->hostevs.lost_event_mask == 0x02)
+			? "Bridge port state change"
+			: "Unknown reason");
+		mutex_lock(&data->card->conf_mutex);
+		data->card->options.sbp.hostnotification = 0;
+		mutex_unlock(&data->card->conf_mutex);
+		qeth_bridge_emit_host_event(data->card, anev_abort,
+			0, NULL, NULL);
+	} else
+		for (i = 0; i < data->hostevs.num_entries; i++) {
+			struct qeth_ipacmd_addr_change_entry *entry =
+					&data->hostevs.entry[i];
+			qeth_bridge_emit_host_event(data->card,
+					anev_reg_unreg,
+					entry->change_code,
+					&entry->token, &entry->addr_lnid);
+		}
+	kfree(data);
+}
+
+static void qeth_bridge_host_event(struct qeth_card *card,
+					struct qeth_ipa_cmd *cmd)
+{
+	struct qeth_ipacmd_addr_change *hostevs =
+		 &cmd->data.addrchange;
+	struct qeth_bridge_host_data *data;
+	int extrasize;
+
+	QETH_CARD_TEXT(card, 2, "brhostev");
+	if (cmd->hdr.return_code != 0x0000) {
+		if (cmd->hdr.return_code == 0x0010) {
+			if (hostevs->lost_event_mask == 0x00)
+				hostevs->lost_event_mask = 0xff;
+		} else {
+			QETH_CARD_TEXT_(card, 2, "BPHe%04x",
+				cmd->hdr.return_code);
+			return;
+		}
+	}
+	extrasize = sizeof(struct qeth_ipacmd_addr_change_entry) *
+						hostevs->num_entries;
+	data = kzalloc(sizeof(struct qeth_bridge_host_data) + extrasize,
+		GFP_ATOMIC);
+	if (!data) {
+		QETH_CARD_TEXT(card, 2, "BPHalloc");
+		return;
+	}
+	INIT_WORK(&data->worker, qeth_bridge_host_event_worker);
+	data->card = card;
+	memcpy(&data->hostevs, hostevs,
+			sizeof(struct qeth_ipacmd_addr_change) + extrasize);
+	queue_work(qeth_wq, &data->worker);
+}
+
+/* SETBRIDGEPORT support; sending commands */
+
+struct _qeth_sbp_cbctl {
+	u16 ipa_rc;
+	u16 cmd_rc;
+	union {
+		u32 supported;
+		struct {
+			enum qeth_sbp_roles *role;
+			enum qeth_sbp_states *state;
+		} qports;
+	} data;
+};
+
+/**
+ * qeth_bridgeport_makerc() - derive "traditional" error from hardware codes.
+ * @card:		      qeth_card structure pointer, for debug messages.
+ * @cbctl:		      state structure with hardware return codes.
+ * @setcmd:		      IPA command code
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ */
+static int qeth_bridgeport_makerc(struct qeth_card *card,
+	struct _qeth_sbp_cbctl *cbctl, enum qeth_ipa_sbp_cmd setcmd)
+{
+	int rc;
+	int is_iqd = (card->info.type == QETH_CARD_TYPE_IQD);
+
+	if ((is_iqd && (cbctl->ipa_rc == IPA_RC_SUCCESS)) ||
+	    (!is_iqd && (cbctl->ipa_rc == cbctl->cmd_rc)))
+		switch (cbctl->cmd_rc) {
+		case IPA_RC_SUCCESS:
+			rc = 0;
+			break;
+		case IPA_RC_L2_UNSUPPORTED_CMD:
+		case IPA_RC_UNSUPPORTED_COMMAND:
+			rc = -EOPNOTSUPP;
+			break;
+		case IPA_RC_SBP_OSA_NOT_CONFIGURED:
+		case IPA_RC_SBP_IQD_NOT_CONFIGURED:
+			rc = -ENODEV; /* maybe not the best code here? */
+			dev_err(&card->gdev->dev,
+	"The device is not configured as a Bridge Port\n");
+			break;
+		case IPA_RC_SBP_OSA_OS_MISMATCH:
+		case IPA_RC_SBP_IQD_OS_MISMATCH:
+			rc = -EPERM;
+			dev_err(&card->gdev->dev,
+	"A Bridge Port is already configured by a different operating system\n");
+			break;
+		case IPA_RC_SBP_OSA_ANO_DEV_PRIMARY:
+		case IPA_RC_SBP_IQD_ANO_DEV_PRIMARY:
+			switch (setcmd) {
+			case IPA_SBP_SET_PRIMARY_BRIDGE_PORT:
+				rc = -EEXIST;
+				dev_err(&card->gdev->dev,
+	"The LAN already has a primary Bridge Port\n");
+				break;
+			case IPA_SBP_SET_SECONDARY_BRIDGE_PORT:
+				rc = -EBUSY;
+				dev_err(&card->gdev->dev,
+	"The device is already a primary Bridge Port\n");
+				break;
+			default:
+				rc = -EIO;
+			}
+			break;
+		case IPA_RC_SBP_OSA_CURRENT_SECOND:
+		case IPA_RC_SBP_IQD_CURRENT_SECOND:
+			rc = -EBUSY;
+			dev_err(&card->gdev->dev,
+	"The device is already a secondary Bridge Port\n");
+			break;
+		case IPA_RC_SBP_OSA_LIMIT_SECOND:
+		case IPA_RC_SBP_IQD_LIMIT_SECOND:
+			rc = -EEXIST;
+			dev_err(&card->gdev->dev,
+	"The LAN cannot have more secondary Bridge Ports\n");
+			break;
+		case IPA_RC_SBP_OSA_CURRENT_PRIMARY:
+		case IPA_RC_SBP_IQD_CURRENT_PRIMARY:
+			rc = -EBUSY;
+			dev_err(&card->gdev->dev,
+	"The device is already a primary Bridge Port\n");
+			break;
+		case IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN:
+		case IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN:
+			rc = -EACCES;
+			dev_err(&card->gdev->dev,
+	"The device is not authorized to be a Bridge Port\n");
+			break;
+		default:
+			rc = -EIO;
+		}
+	else
+		switch (cbctl->ipa_rc) {
+		case IPA_RC_NOTSUPP:
+			rc = -EOPNOTSUPP;
+			break;
+		case IPA_RC_UNSUPPORTED_COMMAND:
+			rc = -EOPNOTSUPP;
+			break;
+		default:
+			rc = -EIO;
+		}
+
+	if (rc) {
+		QETH_CARD_TEXT_(card, 2, "SBPi%04x", cbctl->ipa_rc);
+		QETH_CARD_TEXT_(card, 2, "SBPc%04x", cbctl->cmd_rc);
+	}
+	return rc;
+}
+
+static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card,
+						  enum qeth_ipa_sbp_cmd sbp_cmd,
+						  unsigned int cmd_length)
+{
+	enum qeth_ipa_cmds ipa_cmd = (card->info.type == QETH_CARD_TYPE_IQD) ?
+					IPA_CMD_SETBRIDGEPORT_IQD :
+					IPA_CMD_SETBRIDGEPORT_OSA;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	iob = qeth_get_ipacmd_buffer(card, ipa_cmd, 0);
+	if (!iob)
+		return iob;
+	cmd = __ipa_cmd(iob);
+	cmd->data.sbp.hdr.cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
+				      cmd_length;
+	cmd->data.sbp.hdr.command_code = sbp_cmd;
+	cmd->data.sbp.hdr.used_total = 1;
+	cmd->data.sbp.hdr.seq_no = 1;
+	return iob;
+}
+
+static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
+	struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+	struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+	QETH_CARD_TEXT(card, 2, "brqsupcb");
+	cbctl->ipa_rc = cmd->hdr.return_code;
+	cbctl->cmd_rc = cmd->data.sbp.hdr.return_code;
+	if ((cbctl->ipa_rc == 0) && (cbctl->cmd_rc == 0)) {
+		cbctl->data.supported =
+			cmd->data.sbp.data.query_cmds_supp.supported_cmds;
+	} else {
+		cbctl->data.supported = 0;
+	}
+	return 0;
+}
+
+/**
+ * qeth_bridgeport_query_support() - store bitmask of supported subfunctions.
+ * @card:			     qeth_card structure pointer.
+ *
+ * Sets bitmask of supported setbridgeport subfunctions in the qeth_card
+ * strucutre: card->options.sbp.supported_funcs.
+ */
+static void qeth_bridgeport_query_support(struct qeth_card *card)
+{
+	struct qeth_cmd_buffer *iob;
+	struct _qeth_sbp_cbctl cbctl;
+
+	QETH_CARD_TEXT(card, 2, "brqsuppo");
+	iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_COMMANDS_SUPPORTED,
+				 sizeof(struct qeth_sbp_query_cmds_supp));
+	if (!iob)
+		return;
+	if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb,
+							(void *)&cbctl) ||
+	    qeth_bridgeport_makerc(card, &cbctl,
+					IPA_SBP_QUERY_COMMANDS_SUPPORTED)) {
+		/* non-zero makerc signifies failure, and produce messages */
+		card->options.sbp.role = QETH_SBP_ROLE_NONE;
+		return;
+	}
+	card->options.sbp.supported_funcs = cbctl.data.supported;
+}
+
+static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
+	struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+	struct qeth_sbp_query_ports *qports = &cmd->data.sbp.data.query_ports;
+	struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+
+	QETH_CARD_TEXT(card, 2, "brqprtcb");
+	cbctl->ipa_rc = cmd->hdr.return_code;
+	cbctl->cmd_rc = cmd->data.sbp.hdr.return_code;
+	if ((cbctl->ipa_rc != 0) || (cbctl->cmd_rc != 0))
+		return 0;
+	if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
+		cbctl->cmd_rc = 0xffff;
+		QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length);
+		return 0;
+	}
+	/* first entry contains the state of the local port */
+	if (qports->num_entries > 0) {
+		if (cbctl->data.qports.role)
+			*cbctl->data.qports.role = qports->entry[0].role;
+		if (cbctl->data.qports.state)
+			*cbctl->data.qports.state = qports->entry[0].state;
+	}
+	return 0;
+}
+
+/**
+ * qeth_bridgeport_query_ports() - query local bridgeport status.
+ * @card:			   qeth_card structure pointer.
+ * @role:   Role of the port: 0-none, 1-primary, 2-secondary.
+ * @state:  State of the port: 0-inactive, 1-standby, 2-active.
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ *
+ * 'role' and 'state' are not updated in case of hardware operation failure.
+ */
+int qeth_bridgeport_query_ports(struct qeth_card *card,
+	enum qeth_sbp_roles *role, enum qeth_sbp_states *state)
+{
+	int rc = 0;
+	struct qeth_cmd_buffer *iob;
+	struct _qeth_sbp_cbctl cbctl = {
+		.data = {
+			.qports = {
+				.role = role,
+				.state = state,
+			},
+		},
+	};
+
+	QETH_CARD_TEXT(card, 2, "brqports");
+	if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
+		return -EOPNOTSUPP;
+	iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0);
+	if (!iob)
+		return -ENOMEM;
+	rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
+				(void *)&cbctl);
+	if (rc < 0)
+		return rc;
+	return qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
+}
+
+static int qeth_bridgeport_set_cb(struct qeth_card *card,
+	struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
+	struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+	QETH_CARD_TEXT(card, 2, "brsetrcb");
+	cbctl->ipa_rc = cmd->hdr.return_code;
+	cbctl->cmd_rc = cmd->data.sbp.hdr.return_code;
+	return 0;
+}
+
+/**
+ * qeth_bridgeport_setrole() - Assign primary role to the port.
+ * @card:		       qeth_card structure pointer.
+ * @role:		       Role to assign.
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ */
+int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
+{
+	int rc = 0;
+	int cmdlength;
+	struct qeth_cmd_buffer *iob;
+	struct _qeth_sbp_cbctl cbctl;
+	enum qeth_ipa_sbp_cmd setcmd;
+
+	QETH_CARD_TEXT(card, 2, "brsetrol");
+	switch (role) {
+	case QETH_SBP_ROLE_NONE:
+		setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE;
+		cmdlength = sizeof(struct qeth_sbp_reset_role);
+		break;
+	case QETH_SBP_ROLE_PRIMARY:
+		setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT;
+		cmdlength = sizeof(struct qeth_sbp_set_primary);
+		break;
+	case QETH_SBP_ROLE_SECONDARY:
+		setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT;
+		cmdlength = sizeof(struct qeth_sbp_set_secondary);
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (!(card->options.sbp.supported_funcs & setcmd))
+		return -EOPNOTSUPP;
+	iob = qeth_sbp_build_cmd(card, setcmd, cmdlength);
+	if (!iob)
+		return -ENOMEM;
+	rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb,
+				(void *)&cbctl);
+	if (rc < 0)
+		return rc;
+	return qeth_bridgeport_makerc(card, &cbctl, setcmd);
+}
+
+/**
+ * qeth_anset_makerc() - derive "traditional" error from hardware codes.
+ * @card:		      qeth_card structure pointer, for debug messages.
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ */
+static int qeth_anset_makerc(struct qeth_card *card, int pnso_rc, u16 response)
+{
+	int rc;
+
+	if (pnso_rc == 0)
+		switch (response) {
+		case 0x0001:
+			rc = 0;
+			break;
+		case 0x0004:
+		case 0x0100:
+		case 0x0106:
+			rc = -EOPNOTSUPP;
+			dev_err(&card->gdev->dev,
+				"Setting address notification failed\n");
+			break;
+		case 0x0107:
+			rc = -EAGAIN;
+			break;
+		default:
+			rc = -EIO;
+		}
+	else
+		rc = -EIO;
+
+	if (rc) {
+		QETH_CARD_TEXT_(card, 2, "SBPp%04x", pnso_rc);
+		QETH_CARD_TEXT_(card, 2, "SBPr%04x", response);
+	}
+	return rc;
+}
+
+static void qeth_bridgeport_an_set_cb(void *priv,
+		enum qdio_brinfo_entry_type type, void *entry)
+{
+	struct qeth_card *card = (struct qeth_card *)priv;
+	struct qdio_brinfo_entry_l2 *l2entry;
+	u8 code;
+
+	if (type != l2_addr_lnid) {
+		WARN_ON_ONCE(1);
+		return;
+	}
+
+	l2entry = (struct qdio_brinfo_entry_l2 *)entry;
+	code = IPA_ADDR_CHANGE_CODE_MACADDR;
+	if (l2entry->addr_lnid.lnid)
+		code |= IPA_ADDR_CHANGE_CODE_VLANID;
+	qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
+		(struct net_if_token *)&l2entry->nit,
+		(struct mac_addr_lnid *)&l2entry->addr_lnid);
+}
+
+/**
+ * qeth_bridgeport_an_set() - Enable or disable bridgeport address notification
+ * @card:		      qeth_card structure pointer.
+ * @enable:		      0 - disable, non-zero - enable notifications
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ *
+ * On enable, emits a series of address notifications udev events for all
+ * currently registered hosts.
+ */
+int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
+{
+	int rc;
+	u16 response;
+	struct ccw_device *ddev;
+	struct subchannel_id schid;
+
+	if (!card)
+		return -EINVAL;
+	if (!card->options.sbp.supported_funcs)
+		return -EOPNOTSUPP;
+	ddev = CARD_DDEV(card);
+	ccw_device_get_schid(ddev, &schid);
+
+	if (enable) {
+		qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
+		rc = qdio_pnso_brinfo(schid, 1, &response,
+			qeth_bridgeport_an_set_cb, card);
+	} else
+		rc = qdio_pnso_brinfo(schid, 0, &response, NULL, NULL);
+	return qeth_anset_makerc(card, rc, response);
+}
+
+static bool qeth_bridgeport_is_in_use(struct qeth_card *card)
+{
+	return (card->options.sbp.role || card->options.sbp.reflect_promisc ||
+		card->options.sbp.hostnotification);
+}
+
+/* VNIC Characteristics support */
+
+/* handle VNICC IPA command return codes; convert to error codes */
+static int qeth_l2_vnicc_makerc(struct qeth_card *card, int ipa_rc)
+{
+	int rc;
+
+	switch (ipa_rc) {
+	case IPA_RC_SUCCESS:
+		return ipa_rc;
+	case IPA_RC_L2_UNSUPPORTED_CMD:
+	case IPA_RC_NOTSUPP:
+		rc = -EOPNOTSUPP;
+		break;
+	case IPA_RC_VNICC_OOSEQ:
+		rc = -EALREADY;
+		break;
+	case IPA_RC_VNICC_VNICBP:
+		rc = -EBUSY;
+		break;
+	case IPA_RC_L2_ADDR_TABLE_FULL:
+		rc = -ENOSPC;
+		break;
+	case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+		rc = -EACCES;
+		break;
+	default:
+		rc = -EIO;
+	}
+
+	QETH_CARD_TEXT_(card, 2, "err%04x", ipa_rc);
+	return rc;
+}
+
+/* generic VNICC request call back control */
+struct _qeth_l2_vnicc_request_cbctl {
+	u32 sub_cmd;
+	struct {
+		u32 vnic_char;
+		u32 timeout;
+	} param;
+	struct {
+		union{
+			u32 *sup_cmds;
+			u32 *timeout;
+		};
+	} result;
+};
+
+/* generic VNICC request call back */
+static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
+				    struct qeth_reply *reply,
+				    unsigned long data)
+{
+	struct _qeth_l2_vnicc_request_cbctl *cbctl =
+		(struct _qeth_l2_vnicc_request_cbctl *) reply->param;
+	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+	struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc;
+
+	QETH_CARD_TEXT(card, 2, "vniccrcb");
+	if (cmd->hdr.return_code)
+		return 0;
+	/* return results to caller */
+	card->options.vnicc.sup_chars = rep->hdr.sup;
+	card->options.vnicc.cur_chars = rep->hdr.cur;
+
+	if (cbctl->sub_cmd == IPA_VNICC_QUERY_CMDS)
+		*cbctl->result.sup_cmds = rep->query_cmds.sup_cmds;
+
+	if (cbctl->sub_cmd == IPA_VNICC_GET_TIMEOUT)
+		*cbctl->result.timeout = rep->getset_timeout.timeout;
+
+	return 0;
+}
+
+/* generic VNICC request */
+static int qeth_l2_vnicc_request(struct qeth_card *card,
+				 struct _qeth_l2_vnicc_request_cbctl *cbctl)
+{
+	struct qeth_ipacmd_vnicc *req;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+	int rc;
+
+	QETH_CARD_TEXT(card, 2, "vniccreq");
+
+	/* get new buffer for request */
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_VNICC, 0);
+	if (!iob)
+		return -ENOMEM;
+
+	/* create header for request */
+	cmd = __ipa_cmd(iob);
+	req = &cmd->data.vnicc;
+
+	/* create sub command header for request */
+	req->sub_hdr.data_length = sizeof(req->sub_hdr);
+	req->sub_hdr.sub_command = cbctl->sub_cmd;
+
+	/* create sub command specific request fields */
+	switch (cbctl->sub_cmd) {
+	case IPA_VNICC_QUERY_CHARS:
+		break;
+	case IPA_VNICC_QUERY_CMDS:
+		req->sub_hdr.data_length += sizeof(req->query_cmds);
+		req->query_cmds.vnic_char = cbctl->param.vnic_char;
+		break;
+	case IPA_VNICC_ENABLE:
+	case IPA_VNICC_DISABLE:
+		req->sub_hdr.data_length += sizeof(req->set_char);
+		req->set_char.vnic_char = cbctl->param.vnic_char;
+		break;
+	case IPA_VNICC_SET_TIMEOUT:
+		req->getset_timeout.timeout = cbctl->param.timeout;
+		/* fallthrough */
+	case IPA_VNICC_GET_TIMEOUT:
+		req->sub_hdr.data_length += sizeof(req->getset_timeout);
+		req->getset_timeout.vnic_char = cbctl->param.vnic_char;
+		break;
+	default:
+		qeth_release_buffer(iob->channel, iob);
+		return -EOPNOTSUPP;
+	}
+
+	/* send request */
+	rc = qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb,
+			       (void *) cbctl);
+
+	return qeth_l2_vnicc_makerc(card, rc);
+}
+
+/* VNICC query VNIC characteristics request */
+static int qeth_l2_vnicc_query_chars(struct qeth_card *card)
+{
+	struct _qeth_l2_vnicc_request_cbctl cbctl;
+
+	/* prepare callback control */
+	cbctl.sub_cmd = IPA_VNICC_QUERY_CHARS;
+
+	QETH_CARD_TEXT(card, 2, "vniccqch");
+	return qeth_l2_vnicc_request(card, &cbctl);
+}
+
+/* VNICC query sub commands request */
+static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
+				    u32 *sup_cmds)
+{
+	struct _qeth_l2_vnicc_request_cbctl cbctl;
+
+	/* prepare callback control */
+	cbctl.sub_cmd = IPA_VNICC_QUERY_CMDS;
+	cbctl.param.vnic_char = vnic_char;
+	cbctl.result.sup_cmds = sup_cmds;
+
+	QETH_CARD_TEXT(card, 2, "vniccqcm");
+	return qeth_l2_vnicc_request(card, &cbctl);
+}
+
+/* VNICC enable/disable characteristic request */
+static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char,
+				      u32 cmd)
+{
+	struct _qeth_l2_vnicc_request_cbctl cbctl;
+
+	/* prepare callback control */
+	cbctl.sub_cmd = cmd;
+	cbctl.param.vnic_char = vnic_char;
+
+	QETH_CARD_TEXT(card, 2, "vniccedc");
+	return qeth_l2_vnicc_request(card, &cbctl);
+}
+
+/* VNICC get/set timeout for characteristic request */
+static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
+					u32 cmd, u32 *timeout)
+{
+	struct _qeth_l2_vnicc_request_cbctl cbctl;
+
+	/* prepare callback control */
+	cbctl.sub_cmd = cmd;
+	cbctl.param.vnic_char = vnicc;
+	if (cmd == IPA_VNICC_SET_TIMEOUT)
+		cbctl.param.timeout = *timeout;
+	if (cmd == IPA_VNICC_GET_TIMEOUT)
+		cbctl.result.timeout = timeout;
+
+	QETH_CARD_TEXT(card, 2, "vniccgst");
+	return qeth_l2_vnicc_request(card, &cbctl);
+}
+
+/* set current VNICC flag state; called from sysfs store function */
+int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state)
+{
+	int rc = 0;
+	u32 cmd;
+
+	QETH_CARD_TEXT(card, 2, "vniccsch");
+
+	/* do not change anything if BridgePort is enabled */
+	if (qeth_bridgeport_is_in_use(card))
+		return -EBUSY;
+
+	/* check if characteristic and enable/disable are supported */
+	if (!(card->options.vnicc.sup_chars & vnicc) ||
+	    !(card->options.vnicc.set_char_sup & vnicc))
+		return -EOPNOTSUPP;
+
+	/* set enable/disable command and store wanted characteristic */
+	if (state) {
+		cmd = IPA_VNICC_ENABLE;
+		card->options.vnicc.wanted_chars |= vnicc;
+	} else {
+		cmd = IPA_VNICC_DISABLE;
+		card->options.vnicc.wanted_chars &= ~vnicc;
+	}
+
+	/* do we need to do anything? */
+	if (card->options.vnicc.cur_chars == card->options.vnicc.wanted_chars)
+		return rc;
+
+	/* if card is not ready, simply stop here */
+	if (!qeth_card_hw_is_reachable(card)) {
+		if (state)
+			card->options.vnicc.cur_chars |= vnicc;
+		else
+			card->options.vnicc.cur_chars &= ~vnicc;
+		return rc;
+	}
+
+	rc = qeth_l2_vnicc_set_char(card, vnicc, cmd);
+	if (rc)
+		card->options.vnicc.wanted_chars =
+			card->options.vnicc.cur_chars;
+	else {
+		/* successful online VNICC change; handle special cases */
+		if (state && vnicc == QETH_VNICC_RX_BCAST)
+			card->options.vnicc.rx_bcast_enabled = true;
+		if (!state && vnicc == QETH_VNICC_LEARNING)
+			qeth_l2_vnicc_recover_timeout(card, vnicc,
+					&card->options.vnicc.learning_timeout);
+	}
+
+	return rc;
+}
+
+/* get current VNICC flag state; called from sysfs show function */
+int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state)
+{
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 2, "vniccgch");
+
+	/* do not get anything if BridgePort is enabled */
+	if (qeth_bridgeport_is_in_use(card))
+		return -EBUSY;
+
+	/* check if characteristic is supported */
+	if (!(card->options.vnicc.sup_chars & vnicc))
+		return -EOPNOTSUPP;
+
+	/* if card is ready, query current VNICC state */
+	if (qeth_card_hw_is_reachable(card))
+		rc = qeth_l2_vnicc_query_chars(card);
+
+	*state = (card->options.vnicc.cur_chars & vnicc) ? true : false;
+	return rc;
+}
+
+/* set VNICC timeout; called from sysfs store function. Currently, only learning
+ * supports timeout
+ */
+int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout)
+{
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 2, "vniccsto");
+
+	/* do not change anything if BridgePort is enabled */
+	if (qeth_bridgeport_is_in_use(card))
+		return -EBUSY;
+
+	/* check if characteristic and set_timeout are supported */
+	if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
+	    !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
+		return -EOPNOTSUPP;
+
+	/* do we need to do anything? */
+	if (card->options.vnicc.learning_timeout == timeout)
+		return rc;
+
+	/* if card is not ready, simply store the value internally and return */
+	if (!qeth_card_hw_is_reachable(card)) {
+		card->options.vnicc.learning_timeout = timeout;
+		return rc;
+	}
+
+	/* send timeout value to card; if successful, store value internally */
+	rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING,
+					  IPA_VNICC_SET_TIMEOUT, &timeout);
+	if (!rc)
+		card->options.vnicc.learning_timeout = timeout;
+
+	return rc;
+}
+
+/* get current VNICC timeout; called from sysfs show function. Currently, only
+ * learning supports timeout
+ */
+int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
+{
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 2, "vniccgto");
+
+	/* do not get anything if BridgePort is enabled */
+	if (qeth_bridgeport_is_in_use(card))
+		return -EBUSY;
+
+	/* check if characteristic and get_timeout are supported */
+	if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
+	    !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
+		return -EOPNOTSUPP;
+	/* if card is ready, get timeout. Otherwise, just return stored value */
+	*timeout = card->options.vnicc.learning_timeout;
+	if (qeth_card_hw_is_reachable(card))
+		rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING,
+						  IPA_VNICC_GET_TIMEOUT,
+						  timeout);
+
+	return rc;
+}
+
+/* check if VNICC is currently enabled */
+bool qeth_l2_vnicc_is_in_use(struct qeth_card *card)
+{
+	/* if everything is turned off, VNICC is not active */
+	if (!card->options.vnicc.cur_chars)
+		return false;
+	/* default values are only OK if rx_bcast was not enabled by user
+	 * or the card is offline.
+	 */
+	if (card->options.vnicc.cur_chars == QETH_VNICC_DEFAULT) {
+		if (!card->options.vnicc.rx_bcast_enabled ||
+		    !qeth_card_hw_is_reachable(card))
+			return false;
+	}
+	return true;
+}
+
+/* recover user timeout setting */
+static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
+					  u32 *timeout)
+{
+	if (card->options.vnicc.sup_chars & vnicc &&
+	    card->options.vnicc.getset_timeout_sup & vnicc &&
+	    !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT,
+					  timeout))
+		return false;
+	*timeout = QETH_VNICC_DEFAULT_TIMEOUT;
+	return true;
+}
+
+/* recover user characteristic setting */
+static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc,
+				       bool enable)
+{
+	u32 cmd = enable ? IPA_VNICC_ENABLE : IPA_VNICC_DISABLE;
+
+	if (card->options.vnicc.sup_chars & vnicc &&
+	    card->options.vnicc.set_char_sup & vnicc &&
+	    !qeth_l2_vnicc_set_char(card, vnicc, cmd))
+		return false;
+	card->options.vnicc.wanted_chars &= ~vnicc;
+	card->options.vnicc.wanted_chars |= QETH_VNICC_DEFAULT & vnicc;
+	return true;
+}
+
+/* (re-)initialize VNICC */
+static void qeth_l2_vnicc_init(struct qeth_card *card)
+{
+	u32 *timeout = &card->options.vnicc.learning_timeout;
+	unsigned int chars_len, i;
+	unsigned long chars_tmp;
+	u32 sup_cmds, vnicc;
+	bool enable, error;
+
+	QETH_CARD_TEXT(card, 2, "vniccini");
+	/* reset rx_bcast */
+	card->options.vnicc.rx_bcast_enabled = 0;
+	/* initial query and storage of VNIC characteristics */
+	if (qeth_l2_vnicc_query_chars(card)) {
+		if (card->options.vnicc.wanted_chars != QETH_VNICC_DEFAULT ||
+		    *timeout != QETH_VNICC_DEFAULT_TIMEOUT)
+			dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n");
+		/* fail quietly if user didn't change the default config */
+		card->options.vnicc.sup_chars = 0;
+		card->options.vnicc.cur_chars = 0;
+		card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
+		return;
+	}
+	/* get supported commands for each supported characteristic */
+	chars_tmp = card->options.vnicc.sup_chars;
+	chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE;
+	for_each_set_bit(i, &chars_tmp, chars_len) {
+		vnicc = BIT(i);
+		qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds);
+		if (!(sup_cmds & IPA_VNICC_SET_TIMEOUT) ||
+		    !(sup_cmds & IPA_VNICC_GET_TIMEOUT))
+			card->options.vnicc.getset_timeout_sup &= ~vnicc;
+		if (!(sup_cmds & IPA_VNICC_ENABLE) ||
+		    !(sup_cmds & IPA_VNICC_DISABLE))
+			card->options.vnicc.set_char_sup &= ~vnicc;
+	}
+	/* enforce assumed default values and recover settings, if changed  */
+	error = qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
+					      timeout);
+	chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT;
+	chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE;
+	chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
+	for_each_set_bit(i, &chars_tmp, chars_len) {
+		vnicc = BIT(i);
+		enable = card->options.vnicc.wanted_chars & vnicc;
+		error |= qeth_l2_vnicc_recover_char(card, vnicc, enable);
+	}
+	if (error)
+		dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n");
+}
+
+/* configure default values of VNIC characteristics */
+static void qeth_l2_vnicc_set_defaults(struct qeth_card *card)
+{
+	/* characteristics values */
+	card->options.vnicc.sup_chars = QETH_VNICC_ALL;
+	card->options.vnicc.cur_chars = QETH_VNICC_DEFAULT;
+	card->options.vnicc.learning_timeout = QETH_VNICC_DEFAULT_TIMEOUT;
+	/* supported commands */
+	card->options.vnicc.set_char_sup = QETH_VNICC_ALL;
+	card->options.vnicc.getset_timeout_sup = QETH_VNICC_LEARNING;
+	/* settings wanted by users */
+	card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
+}
+
+module_init(qeth_l2_init);
+module_exit(qeth_l2_exit);
+MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
+MODULE_DESCRIPTION("qeth layer 2 discipline");
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
new file mode 100644
index 0000000..f2c3b12
--- /dev/null
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    Copyright IBM Corp. 2013
+ *    Author(s): Eugene Crosser <eugene.crosser@ru.ibm.com>
+ */
+
+#include <linux/slab.h>
+#include <asm/ebcdic.h>
+#include "qeth_core.h"
+#include "qeth_l2.h"
+
+static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
+				struct device_attribute *attr, char *buf,
+				int show_state)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	enum qeth_sbp_states state = QETH_SBP_STATE_INACTIVE;
+	int rc = 0;
+	char *word;
+
+	if (!card)
+		return -EINVAL;
+
+	if (qeth_l2_vnicc_is_in_use(card))
+		return sprintf(buf, "n/a (VNIC characteristics)\n");
+
+	if (qeth_card_hw_is_reachable(card) &&
+					card->options.sbp.supported_funcs)
+		rc = qeth_bridgeport_query_ports(card,
+			&card->options.sbp.role, &state);
+	if (!rc) {
+		if (show_state)
+			switch (state) {
+			case QETH_SBP_STATE_INACTIVE:
+				word = "inactive"; break;
+			case QETH_SBP_STATE_STANDBY:
+				word = "standby"; break;
+			case QETH_SBP_STATE_ACTIVE:
+				word = "active"; break;
+			default:
+				rc = -EIO;
+			}
+		else
+			switch (card->options.sbp.role) {
+			case QETH_SBP_ROLE_NONE:
+				word = "none"; break;
+			case QETH_SBP_ROLE_PRIMARY:
+				word = "primary"; break;
+			case QETH_SBP_ROLE_SECONDARY:
+				word = "secondary"; break;
+			default:
+				rc = -EIO;
+			}
+		if (rc)
+			QETH_CARD_TEXT_(card, 2, "SBP%02x:%02x",
+				card->options.sbp.role, state);
+		else
+			rc = sprintf(buf, "%s\n", word);
+	}
+
+	return rc;
+}
+
+static ssize_t qeth_bridge_port_role_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (qeth_l2_vnicc_is_in_use(card))
+		return sprintf(buf, "n/a (VNIC characteristics)\n");
+
+	return qeth_bridge_port_role_state_show(dev, attr, buf, 0);
+}
+
+static ssize_t qeth_bridge_port_role_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	int rc = 0;
+	enum qeth_sbp_roles role;
+
+	if (!card)
+		return -EINVAL;
+	if (sysfs_streq(buf, "primary"))
+		role = QETH_SBP_ROLE_PRIMARY;
+	else if (sysfs_streq(buf, "secondary"))
+		role = QETH_SBP_ROLE_SECONDARY;
+	else if (sysfs_streq(buf, "none"))
+		role = QETH_SBP_ROLE_NONE;
+	else
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+
+	if (qeth_l2_vnicc_is_in_use(card))
+		rc = -EBUSY;
+	else if (card->options.sbp.reflect_promisc)
+		/* Forbid direct manipulation */
+		rc = -EPERM;
+	else if (qeth_card_hw_is_reachable(card)) {
+		rc = qeth_bridgeport_setrole(card, role);
+		if (!rc)
+			card->options.sbp.role = role;
+	} else
+		card->options.sbp.role = role;
+
+	mutex_unlock(&card->conf_mutex);
+
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(bridge_role, 0644, qeth_bridge_port_role_show,
+		   qeth_bridge_port_role_store);
+
+static ssize_t qeth_bridge_port_state_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (qeth_l2_vnicc_is_in_use(card))
+		return sprintf(buf, "n/a (VNIC characteristics)\n");
+
+	return qeth_bridge_port_role_state_show(dev, attr, buf, 1);
+}
+
+static DEVICE_ATTR(bridge_state, 0444, qeth_bridge_port_state_show,
+		   NULL);
+
+static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	int enabled;
+
+	if (!card)
+		return -EINVAL;
+
+	if (qeth_l2_vnicc_is_in_use(card))
+		return sprintf(buf, "n/a (VNIC characteristics)\n");
+
+	enabled = card->options.sbp.hostnotification;
+
+	return sprintf(buf, "%d\n", enabled);
+}
+
+static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	bool enable;
+	int rc;
+
+	if (!card)
+		return -EINVAL;
+
+	rc = kstrtobool(buf, &enable);
+	if (rc)
+		return rc;
+
+	mutex_lock(&card->conf_mutex);
+
+	if (qeth_l2_vnicc_is_in_use(card))
+		rc = -EBUSY;
+	else if (qeth_card_hw_is_reachable(card)) {
+		rc = qeth_bridgeport_an_set(card, enable);
+		if (!rc)
+			card->options.sbp.hostnotification = enable;
+	} else
+		card->options.sbp.hostnotification = enable;
+
+	mutex_unlock(&card->conf_mutex);
+
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(bridge_hostnotify, 0644,
+			qeth_bridgeport_hostnotification_show,
+			qeth_bridgeport_hostnotification_store);
+
+static ssize_t qeth_bridgeport_reflect_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *state;
+
+	if (!card)
+		return -EINVAL;
+
+	if (qeth_l2_vnicc_is_in_use(card))
+		return sprintf(buf, "n/a (VNIC characteristics)\n");
+
+	if (card->options.sbp.reflect_promisc) {
+		if (card->options.sbp.reflect_promisc_primary)
+			state = "primary";
+		else
+			state = "secondary";
+	} else
+		state = "none";
+
+	return sprintf(buf, "%s\n", state);
+}
+
+static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	int enable, primary;
+	int rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	if (sysfs_streq(buf, "none")) {
+		enable = 0;
+		primary = 0;
+	} else if (sysfs_streq(buf, "primary")) {
+		enable = 1;
+		primary = 1;
+	} else if (sysfs_streq(buf, "secondary")) {
+		enable = 1;
+		primary = 0;
+	} else
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+
+	if (qeth_l2_vnicc_is_in_use(card))
+		rc = -EBUSY;
+	else if (card->options.sbp.role != QETH_SBP_ROLE_NONE)
+		rc = -EPERM;
+	else {
+		card->options.sbp.reflect_promisc = enable;
+		card->options.sbp.reflect_promisc_primary = primary;
+		rc = 0;
+	}
+
+	mutex_unlock(&card->conf_mutex);
+
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(bridge_reflect_promisc, 0644,
+			qeth_bridgeport_reflect_show,
+			qeth_bridgeport_reflect_store);
+
+static struct attribute *qeth_l2_bridgeport_attrs[] = {
+	&dev_attr_bridge_role.attr,
+	&dev_attr_bridge_state.attr,
+	&dev_attr_bridge_hostnotify.attr,
+	&dev_attr_bridge_reflect_promisc.attr,
+	NULL,
+};
+
+static struct attribute_group qeth_l2_bridgeport_attr_group = {
+	.attrs = qeth_l2_bridgeport_attrs,
+};
+
+/**
+ * qeth_l2_setup_bridgeport_attrs() - set/restore attrs when turning online.
+ * @card:			      qeth_card structure pointer
+ *
+ * Note: this function is called with conf_mutex held by the caller
+ */
+void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
+{
+	int rc;
+
+	if (!card)
+		return;
+	if (!card->options.sbp.supported_funcs)
+		return;
+	if (card->options.sbp.role != QETH_SBP_ROLE_NONE) {
+		/* Conditional to avoid spurious error messages */
+		qeth_bridgeport_setrole(card, card->options.sbp.role);
+		/* Let the callback function refresh the stored role value. */
+		qeth_bridgeport_query_ports(card,
+			&card->options.sbp.role, NULL);
+	}
+	if (card->options.sbp.hostnotification) {
+		rc = qeth_bridgeport_an_set(card, 1);
+		if (rc)
+			card->options.sbp.hostnotification = 0;
+	} else
+		qeth_bridgeport_an_set(card, 0);
+}
+
+/* VNIC CHARS support */
+
+/* convert sysfs attr name to VNIC characteristic */
+static u32 qeth_l2_vnicc_sysfs_attr_to_char(const char *attr_name)
+{
+	if (sysfs_streq(attr_name, "flooding"))
+		return QETH_VNICC_FLOODING;
+	else if (sysfs_streq(attr_name, "mcast_flooding"))
+		return QETH_VNICC_MCAST_FLOODING;
+	else if (sysfs_streq(attr_name, "learning"))
+		return QETH_VNICC_LEARNING;
+	else if (sysfs_streq(attr_name, "takeover_setvmac"))
+		return QETH_VNICC_TAKEOVER_SETVMAC;
+	else if (sysfs_streq(attr_name, "takeover_learning"))
+		return QETH_VNICC_TAKEOVER_LEARNING;
+	else if (sysfs_streq(attr_name, "bridge_invisible"))
+		return QETH_VNICC_BRIDGE_INVISIBLE;
+	else if (sysfs_streq(attr_name, "rx_bcast"))
+		return QETH_VNICC_RX_BCAST;
+
+	return 0;
+}
+
+/* get current timeout setting */
+static ssize_t qeth_vnicc_timeout_show(struct device *dev,
+				       struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	u32 timeout;
+	int rc;
+
+	if (!card)
+		return -EINVAL;
+
+	rc = qeth_l2_vnicc_get_timeout(card, &timeout);
+	if (rc == -EBUSY)
+		return sprintf(buf, "n/a (BridgePort)\n");
+	if (rc == -EOPNOTSUPP)
+		return sprintf(buf, "n/a\n");
+	return rc ? rc : sprintf(buf, "%d\n", timeout);
+}
+
+/* change timeout setting */
+static ssize_t qeth_vnicc_timeout_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	u32 timeout;
+	int rc;
+
+	if (!card)
+		return -EINVAL;
+
+	rc = kstrtou32(buf, 10, &timeout);
+	if (rc)
+		return rc;
+
+	mutex_lock(&card->conf_mutex);
+	rc = qeth_l2_vnicc_set_timeout(card, timeout);
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+/* get current setting of characteristic */
+static ssize_t qeth_vnicc_char_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	bool state;
+	u32 vnicc;
+	int rc;
+
+	if (!card)
+		return -EINVAL;
+
+	vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name);
+	rc = qeth_l2_vnicc_get_state(card, vnicc, &state);
+
+	if (rc == -EBUSY)
+		return sprintf(buf, "n/a (BridgePort)\n");
+	if (rc == -EOPNOTSUPP)
+		return sprintf(buf, "n/a\n");
+	return rc ? rc : sprintf(buf, "%d\n", state);
+}
+
+/* change setting of characteristic */
+static ssize_t qeth_vnicc_char_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	bool state;
+	u32 vnicc;
+	int rc;
+
+	if (!card)
+		return -EINVAL;
+
+	if (kstrtobool(buf, &state))
+		return -EINVAL;
+
+	vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name);
+	mutex_lock(&card->conf_mutex);
+	rc = qeth_l2_vnicc_set_state(card, vnicc, state);
+	mutex_unlock(&card->conf_mutex);
+
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(flooding, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store);
+static DEVICE_ATTR(mcast_flooding, 0644, qeth_vnicc_char_show,
+		   qeth_vnicc_char_store);
+static DEVICE_ATTR(learning, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store);
+static DEVICE_ATTR(learning_timeout, 0644, qeth_vnicc_timeout_show,
+		   qeth_vnicc_timeout_store);
+static DEVICE_ATTR(takeover_setvmac, 0644, qeth_vnicc_char_show,
+		   qeth_vnicc_char_store);
+static DEVICE_ATTR(takeover_learning, 0644, qeth_vnicc_char_show,
+		   qeth_vnicc_char_store);
+static DEVICE_ATTR(bridge_invisible, 0644, qeth_vnicc_char_show,
+		   qeth_vnicc_char_store);
+static DEVICE_ATTR(rx_bcast, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store);
+
+static struct attribute *qeth_l2_vnicc_attrs[] = {
+	&dev_attr_flooding.attr,
+	&dev_attr_mcast_flooding.attr,
+	&dev_attr_learning.attr,
+	&dev_attr_learning_timeout.attr,
+	&dev_attr_takeover_setvmac.attr,
+	&dev_attr_takeover_learning.attr,
+	&dev_attr_bridge_invisible.attr,
+	&dev_attr_rx_bcast.attr,
+	NULL,
+};
+
+static struct attribute_group qeth_l2_vnicc_attr_group = {
+	.attrs = qeth_l2_vnicc_attrs,
+	.name = "vnicc",
+};
+
+static const struct attribute_group *qeth_l2_only_attr_groups[] = {
+	&qeth_l2_bridgeport_attr_group,
+	&qeth_l2_vnicc_attr_group,
+	NULL,
+};
+
+int qeth_l2_create_device_attributes(struct device *dev)
+{
+	return sysfs_create_groups(&dev->kobj, qeth_l2_only_attr_groups);
+}
+
+void qeth_l2_remove_device_attributes(struct device *dev)
+{
+	sysfs_remove_groups(&dev->kobj, qeth_l2_only_attr_groups);
+}
+
+const struct attribute_group *qeth_l2_attr_groups[] = {
+	&qeth_device_attr_group,
+	&qeth_device_blkt_group,
+	/* l2 specific, see qeth_l2_only_attr_groups: */
+	&qeth_l2_bridgeport_attr_group,
+	&qeth_l2_vnicc_attr_group,
+	NULL,
+};
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
new file mode 100644
index 0000000..87659cf
--- /dev/null
+++ b/drivers/s390/net/qeth_l3.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+ *		 Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#ifndef __QETH_L3_H__
+#define __QETH_L3_H__
+
+#include "qeth_core.h"
+#include <linux/hashtable.h>
+
+#define QETH_SNIFF_AVAIL	0x0008
+
+enum qeth_ip_types {
+	QETH_IP_TYPE_NORMAL,
+	QETH_IP_TYPE_VIPA,
+	QETH_IP_TYPE_RXIP,
+};
+
+struct qeth_ipaddr {
+	struct hlist_node hnode;
+	enum qeth_ip_types type;
+	unsigned char mac[ETH_ALEN];
+	u8 is_multicast:1;
+	u8 in_progress:1;
+	u8 disp_flag:2;
+	u8 ipato:1;			/* ucast only */
+
+	/* is changed only for normal ip addresses
+	 * for non-normal addresses it always is  1
+	 */
+	int  ref_counter;
+	enum qeth_prot_versions proto;
+	union {
+		struct {
+			unsigned int addr;
+			unsigned int mask;
+		} a4;
+		struct {
+			struct in6_addr addr;
+			unsigned int pfxlen;
+		} a6;
+	} u;
+};
+
+static inline void qeth_l3_init_ipaddr(struct qeth_ipaddr *addr,
+				       enum qeth_ip_types type,
+				       enum qeth_prot_versions proto)
+{
+	memset(addr, 0, sizeof(*addr));
+	addr->type = type;
+	addr->proto = proto;
+	addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+}
+
+static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
+					 struct qeth_ipaddr *a2)
+{
+	if (a1->proto != a2->proto)
+		return false;
+	if (a1->proto == QETH_PROT_IPV6)
+		return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr);
+	return a1->u.a4.addr == a2->u.a4.addr;
+}
+
+static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
+					  struct qeth_ipaddr *a2)
+{
+	/* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(),
+	 * so 'proto' and 'addr' match for sure.
+	 *
+	 * For ucast:
+	 * -	'mac' is always 0.
+	 * -	'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
+	 *	values are required to avoid mixups in takeover eligibility.
+	 *
+	 * For mcast,
+	 * -	'mac' is mapped from the IP, and thus always matches.
+	 * -	'mask'/'pfxlen' is always 0.
+	 */
+	if (a1->type != a2->type)
+		return false;
+	if (a1->proto == QETH_PROT_IPV6)
+		return a1->u.a6.pfxlen == a2->u.a6.pfxlen;
+	return a1->u.a4.mask == a2->u.a4.mask;
+}
+
+static inline  u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
+{
+	u64  ret = 0;
+	u8 *point;
+
+	if (addr->proto == QETH_PROT_IPV6) {
+		point = (u8 *) &addr->u.a6.addr;
+		ret = get_unaligned((u64 *)point) ^
+			get_unaligned((u64 *) (point + 8));
+	}
+	if (addr->proto == QETH_PROT_IPV4) {
+		point = (u8 *) &addr->u.a4.addr;
+		ret = get_unaligned((u32 *) point);
+	}
+	return ret;
+}
+
+struct qeth_ipato_entry {
+	struct list_head entry;
+	enum qeth_prot_versions proto;
+	char addr[16];
+	int mask_bits;
+};
+
+extern const struct attribute_group *qeth_l3_attr_groups[];
+
+void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
+int qeth_l3_create_device_attributes(struct device *);
+void qeth_l3_remove_device_attributes(struct device *);
+int qeth_l3_setrouting_v4(struct qeth_card *);
+int qeth_l3_setrouting_v6(struct qeth_card *);
+int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
+int qeth_l3_del_ipato_entry(struct qeth_card *card,
+			    enum qeth_prot_versions proto, u8 *addr,
+			    int mask_bits);
+void qeth_l3_update_ipato(struct qeth_card *card);
+int qeth_l3_modify_hsuid(struct qeth_card *card, bool add);
+int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
+			     enum qeth_ip_types type,
+			     enum qeth_prot_versions proto);
+
+#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
new file mode 100644
index 0000000..b7f6a83
--- /dev/null
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -0,0 +1,2994 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    Copyright IBM Corp. 2007, 2009
+ *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+ *		 Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "qeth"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/igmp.h>
+#include <linux/slab.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+
+#include <net/ip.h>
+#include <net/arp.h>
+#include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/ip6_fib.h>
+#include <net/ip6_checksum.h>
+#include <net/iucv/af_iucv.h>
+#include <linux/hashtable.h>
+
+#include "qeth_l3.h"
+
+
+static int qeth_l3_set_offline(struct ccwgroup_device *);
+static int qeth_l3_stop(struct net_device *);
+static void qeth_l3_set_rx_mode(struct net_device *dev);
+static int qeth_l3_register_addr_entry(struct qeth_card *,
+		struct qeth_ipaddr *);
+static int qeth_l3_deregister_addr_entry(struct qeth_card *,
+		struct qeth_ipaddr *);
+
+static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
+{
+	sprintf(buf, "%pI4", addr);
+}
+
+static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
+{
+	sprintf(buf, "%pI6", addr);
+}
+
+void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
+				char *buf)
+{
+	if (proto == QETH_PROT_IPV4)
+		qeth_l3_ipaddr4_to_string(addr, buf);
+	else if (proto == QETH_PROT_IPV6)
+		qeth_l3_ipaddr6_to_string(addr, buf);
+}
+
+static struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions prot)
+{
+	struct qeth_ipaddr *addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
+
+	if (addr)
+		qeth_l3_init_ipaddr(addr, QETH_IP_TYPE_NORMAL, prot);
+	return addr;
+}
+
+static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
+						   struct qeth_ipaddr *query)
+{
+	u64 key = qeth_l3_ipaddr_hash(query);
+	struct qeth_ipaddr *addr;
+
+	if (query->is_multicast) {
+		hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
+			if (qeth_l3_addr_match_ip(addr, query))
+				return addr;
+	} else {
+		hash_for_each_possible(card->ip_htable,  addr, hnode, key)
+			if (qeth_l3_addr_match_ip(addr, query))
+				return addr;
+	}
+	return NULL;
+}
+
+static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
+{
+	int i, j;
+	u8 octet;
+
+	for (i = 0; i < len; ++i) {
+		octet = addr[i];
+		for (j = 7; j >= 0; --j) {
+			bits[i*8 + j] = octet & 1;
+			octet >>= 1;
+		}
+	}
+}
+
+static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
+					     struct qeth_ipaddr *addr)
+{
+	struct qeth_ipato_entry *ipatoe;
+	u8 addr_bits[128] = {0, };
+	u8 ipatoe_bits[128] = {0, };
+	int rc = 0;
+
+	if (!card->ipato.enabled)
+		return false;
+	if (addr->type != QETH_IP_TYPE_NORMAL)
+		return false;
+
+	qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
+				  (addr->proto == QETH_PROT_IPV4)? 4:16);
+	list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
+		if (addr->proto != ipatoe->proto)
+			continue;
+		qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
+					  (ipatoe->proto == QETH_PROT_IPV4) ?
+					  4 : 16);
+		if (addr->proto == QETH_PROT_IPV4)
+			rc = !memcmp(addr_bits, ipatoe_bits,
+				     min(32, ipatoe->mask_bits));
+		else
+			rc = !memcmp(addr_bits, ipatoe_bits,
+				     min(128, ipatoe->mask_bits));
+		if (rc)
+			break;
+	}
+	/* invert? */
+	if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
+		rc = !rc;
+	else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
+		rc = !rc;
+
+	return rc;
+}
+
+static int qeth_l3_delete_ip(struct qeth_card *card,
+			     struct qeth_ipaddr *tmp_addr)
+{
+	int rc = 0;
+	struct qeth_ipaddr *addr;
+
+	if (tmp_addr->type == QETH_IP_TYPE_RXIP)
+		QETH_CARD_TEXT(card, 2, "delrxip");
+	else if (tmp_addr->type == QETH_IP_TYPE_VIPA)
+		QETH_CARD_TEXT(card, 2, "delvipa");
+	else
+		QETH_CARD_TEXT(card, 2, "delip");
+
+	if (tmp_addr->proto == QETH_PROT_IPV4)
+		QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
+	else {
+		QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
+		QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
+	}
+
+	addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
+	if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
+		return -ENOENT;
+
+	addr->ref_counter--;
+	if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
+		return rc;
+	if (addr->in_progress)
+		return -EINPROGRESS;
+
+	if (qeth_card_hw_is_reachable(card))
+		rc = qeth_l3_deregister_addr_entry(card, addr);
+
+	hash_del(&addr->hnode);
+	kfree(addr);
+
+	return rc;
+}
+
+static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
+{
+	int rc = 0;
+	struct qeth_ipaddr *addr;
+	char buf[40];
+
+	if (tmp_addr->type == QETH_IP_TYPE_RXIP)
+		QETH_CARD_TEXT(card, 2, "addrxip");
+	else if (tmp_addr->type == QETH_IP_TYPE_VIPA)
+		QETH_CARD_TEXT(card, 2, "addvipa");
+	else
+		QETH_CARD_TEXT(card, 2, "addip");
+
+	if (tmp_addr->proto == QETH_PROT_IPV4)
+		QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
+	else {
+		QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
+		QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
+	}
+
+	addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
+	if (addr) {
+		if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
+			return -EADDRINUSE;
+		if (qeth_l3_addr_match_all(addr, tmp_addr)) {
+			addr->ref_counter++;
+			return 0;
+		}
+		qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
+					 buf);
+		dev_warn(&card->gdev->dev,
+			 "Registering IP address %s failed\n", buf);
+		return -EADDRINUSE;
+	} else {
+		addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
+		if (!addr)
+			return -ENOMEM;
+
+		memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
+		addr->ref_counter = 1;
+
+		if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
+			QETH_CARD_TEXT(card, 2, "tkovaddr");
+			addr->ipato = 1;
+		}
+		hash_add(card->ip_htable, &addr->hnode,
+				qeth_l3_ipaddr_hash(addr));
+
+		if (!qeth_card_hw_is_reachable(card)) {
+			addr->disp_flag = QETH_DISP_ADDR_ADD;
+			return 0;
+		}
+
+		/* qeth_l3_register_addr_entry can go to sleep
+		 * if we add a IPV4 addr. It is caused by the reason
+		 * that SETIP ipa cmd starts ARP staff for IPV4 addr.
+		 * Thus we should unlock spinlock, and make a protection
+		 * using in_progress variable to indicate that there is
+		 * an hardware operation with this IPV4 address
+		 */
+		if (addr->proto == QETH_PROT_IPV4) {
+			addr->in_progress = 1;
+			spin_unlock_bh(&card->ip_lock);
+			rc = qeth_l3_register_addr_entry(card, addr);
+			spin_lock_bh(&card->ip_lock);
+			addr->in_progress = 0;
+		} else
+			rc = qeth_l3_register_addr_entry(card, addr);
+
+		if (!rc || (rc == IPA_RC_DUPLICATE_IP_ADDRESS) ||
+				(rc == IPA_RC_LAN_OFFLINE)) {
+			addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+			if (addr->ref_counter < 1) {
+				qeth_l3_deregister_addr_entry(card, addr);
+				hash_del(&addr->hnode);
+				kfree(addr);
+			}
+		} else {
+			hash_del(&addr->hnode);
+			kfree(addr);
+		}
+	}
+	return rc;
+}
+
+static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
+{
+	struct qeth_ipaddr *addr;
+	struct hlist_node *tmp;
+	int i;
+
+	QETH_CARD_TEXT(card, 4, "clearip");
+
+	spin_lock_bh(&card->ip_lock);
+
+	hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
+		if (!recover) {
+			hash_del(&addr->hnode);
+			kfree(addr);
+			continue;
+		}
+		addr->disp_flag = QETH_DISP_ADDR_ADD;
+	}
+
+	spin_unlock_bh(&card->ip_lock);
+
+	spin_lock_bh(&card->mclock);
+
+	hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+		hash_del(&addr->hnode);
+		kfree(addr);
+	}
+
+	spin_unlock_bh(&card->mclock);
+
+
+}
+static void qeth_l3_recover_ip(struct qeth_card *card)
+{
+	struct qeth_ipaddr *addr;
+	struct hlist_node *tmp;
+	int i;
+	int rc;
+
+	QETH_CARD_TEXT(card, 4, "recovrip");
+
+	spin_lock_bh(&card->ip_lock);
+
+	hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
+		if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
+			if (addr->proto == QETH_PROT_IPV4) {
+				addr->in_progress = 1;
+				spin_unlock_bh(&card->ip_lock);
+				rc = qeth_l3_register_addr_entry(card, addr);
+				spin_lock_bh(&card->ip_lock);
+				addr->in_progress = 0;
+			} else
+				rc = qeth_l3_register_addr_entry(card, addr);
+
+			if (!rc) {
+				addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+				if (addr->ref_counter < 1)
+					qeth_l3_delete_ip(card, addr);
+			} else {
+				hash_del(&addr->hnode);
+				kfree(addr);
+			}
+		}
+	}
+
+	spin_unlock_bh(&card->ip_lock);
+
+}
+
+static int qeth_l3_send_setdelmc(struct qeth_card *card,
+			struct qeth_ipaddr *addr, int ipacmd)
+{
+	int rc;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_CARD_TEXT(card, 4, "setdelmc");
+
+	iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+	if (!iob)
+		return -ENOMEM;
+	cmd = __ipa_cmd(iob);
+	ether_addr_copy(cmd->data.setdelipm.mac, addr->mac);
+	if (addr->proto == QETH_PROT_IPV6)
+		memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
+		       sizeof(struct in6_addr));
+	else
+		memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4);
+
+	rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
+
+	return rc;
+}
+
+static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len)
+{
+	int i, j;
+	for (i = 0; i < 16; i++) {
+		j = (len) - (i * 8);
+		if (j >= 8)
+			netmask[i] = 0xff;
+		else if (j > 0)
+			netmask[i] = (u8)(0xFF00 >> j);
+		else
+			netmask[i] = 0;
+	}
+}
+
+static u32 qeth_l3_get_setdelip_flags(struct qeth_ipaddr *addr, bool set)
+{
+	switch (addr->type) {
+	case QETH_IP_TYPE_RXIP:
+		return (set) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0;
+	case QETH_IP_TYPE_VIPA:
+		return (set) ? QETH_IPA_SETIP_VIPA_FLAG :
+			       QETH_IPA_DELIP_VIPA_FLAG;
+	default:
+		return (set && addr->ipato) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0;
+	}
+}
+
+static int qeth_l3_send_setdelip(struct qeth_card *card,
+				 struct qeth_ipaddr *addr,
+				 enum qeth_ipa_cmds ipacmd)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+	__u8 netmask[16];
+	u32 flags;
+
+	QETH_CARD_TEXT(card, 4, "setdelip");
+
+	iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+	if (!iob)
+		return -ENOMEM;
+	cmd = __ipa_cmd(iob);
+
+	flags = qeth_l3_get_setdelip_flags(addr, ipacmd == IPA_CMD_SETIP);
+	QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
+
+	if (addr->proto == QETH_PROT_IPV6) {
+		memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
+		       sizeof(struct in6_addr));
+		qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen);
+		memcpy(cmd->data.setdelip6.mask, netmask,
+		       sizeof(struct in6_addr));
+		cmd->data.setdelip6.flags = flags;
+	} else {
+		memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
+		memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
+		cmd->data.setdelip4.flags = flags;
+	}
+
+	return qeth_send_ipa_cmd(card, iob, NULL, NULL);
+}
+
+static int qeth_l3_send_setrouting(struct qeth_card *card,
+	enum qeth_routing_types type, enum qeth_prot_versions prot)
+{
+	int rc;
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_cmd_buffer *iob;
+
+	QETH_CARD_TEXT(card, 4, "setroutg");
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
+	if (!iob)
+		return -ENOMEM;
+	cmd = __ipa_cmd(iob);
+	cmd->data.setrtg.type = (type);
+	rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
+
+	return rc;
+}
+
+static int qeth_l3_correct_routing_type(struct qeth_card *card,
+		enum qeth_routing_types *type, enum qeth_prot_versions prot)
+{
+	if (card->info.type == QETH_CARD_TYPE_IQD) {
+		switch (*type) {
+		case NO_ROUTER:
+		case PRIMARY_CONNECTOR:
+		case SECONDARY_CONNECTOR:
+		case MULTICAST_ROUTER:
+			return 0;
+		default:
+			goto out_inval;
+		}
+	} else {
+		switch (*type) {
+		case NO_ROUTER:
+		case PRIMARY_ROUTER:
+		case SECONDARY_ROUTER:
+			return 0;
+		case MULTICAST_ROUTER:
+			if (qeth_is_ipafunc_supported(card, prot,
+						      IPA_OSA_MC_ROUTER))
+				return 0;
+		default:
+			goto out_inval;
+		}
+	}
+out_inval:
+	*type = NO_ROUTER;
+	return -EINVAL;
+}
+
+int qeth_l3_setrouting_v4(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "setrtg4");
+
+	rc = qeth_l3_correct_routing_type(card, &card->options.route4.type,
+				  QETH_PROT_IPV4);
+	if (rc)
+		return rc;
+
+	rc = qeth_l3_send_setrouting(card, card->options.route4.type,
+				  QETH_PROT_IPV4);
+	if (rc) {
+		card->options.route4.type = NO_ROUTER;
+		QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
+			" on %s. Type set to 'no router'.\n", rc,
+			QETH_CARD_IFNAME(card));
+	}
+	return rc;
+}
+
+int qeth_l3_setrouting_v6(struct qeth_card *card)
+{
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 3, "setrtg6");
+
+	if (!qeth_is_supported(card, IPA_IPV6))
+		return 0;
+	rc = qeth_l3_correct_routing_type(card, &card->options.route6.type,
+				  QETH_PROT_IPV6);
+	if (rc)
+		return rc;
+
+	rc = qeth_l3_send_setrouting(card, card->options.route6.type,
+				  QETH_PROT_IPV6);
+	if (rc) {
+		card->options.route6.type = NO_ROUTER;
+		QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
+			" on %s. Type set to 'no router'.\n", rc,
+			QETH_CARD_IFNAME(card));
+	}
+	return rc;
+}
+
+/*
+ * IP address takeover related functions
+ */
+
+/**
+ * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
+ *
+ * Caller must hold ip_lock.
+ */
+void qeth_l3_update_ipato(struct qeth_card *card)
+{
+	struct qeth_ipaddr *addr;
+	unsigned int i;
+
+	hash_for_each(card->ip_htable, i, addr, hnode) {
+		if (addr->type != QETH_IP_TYPE_NORMAL)
+			continue;
+		addr->ipato = qeth_l3_is_addr_covered_by_ipato(card, addr);
+	}
+}
+
+static void qeth_l3_clear_ipato_list(struct qeth_card *card)
+{
+	struct qeth_ipato_entry *ipatoe, *tmp;
+
+	spin_lock_bh(&card->ip_lock);
+
+	list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
+		list_del(&ipatoe->entry);
+		kfree(ipatoe);
+	}
+
+	qeth_l3_update_ipato(card);
+	spin_unlock_bh(&card->ip_lock);
+}
+
+int qeth_l3_add_ipato_entry(struct qeth_card *card,
+				struct qeth_ipato_entry *new)
+{
+	struct qeth_ipato_entry *ipatoe;
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 2, "addipato");
+
+	spin_lock_bh(&card->ip_lock);
+
+	list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
+		if (ipatoe->proto != new->proto)
+			continue;
+		if (!memcmp(ipatoe->addr, new->addr,
+			    (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
+		    (ipatoe->mask_bits == new->mask_bits)) {
+			rc = -EEXIST;
+			break;
+		}
+	}
+
+	if (!rc) {
+		list_add_tail(&new->entry, &card->ipato.entries);
+		qeth_l3_update_ipato(card);
+	}
+
+	spin_unlock_bh(&card->ip_lock);
+
+	return rc;
+}
+
+int qeth_l3_del_ipato_entry(struct qeth_card *card,
+			    enum qeth_prot_versions proto, u8 *addr,
+			    int mask_bits)
+{
+	struct qeth_ipato_entry *ipatoe, *tmp;
+	int rc = -ENOENT;
+
+	QETH_CARD_TEXT(card, 2, "delipato");
+
+	spin_lock_bh(&card->ip_lock);
+
+	list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
+		if (ipatoe->proto != proto)
+			continue;
+		if (!memcmp(ipatoe->addr, addr,
+			    (proto == QETH_PROT_IPV4)? 4:16) &&
+		    (ipatoe->mask_bits == mask_bits)) {
+			list_del(&ipatoe->entry);
+			qeth_l3_update_ipato(card);
+			kfree(ipatoe);
+			rc = 0;
+		}
+	}
+
+	spin_unlock_bh(&card->ip_lock);
+	return rc;
+}
+
+int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
+			     enum qeth_ip_types type,
+			     enum qeth_prot_versions proto)
+{
+	struct qeth_ipaddr addr;
+	int rc;
+
+	qeth_l3_init_ipaddr(&addr, type, proto);
+	if (proto == QETH_PROT_IPV4)
+		memcpy(&addr.u.a4.addr, ip, 4);
+	else
+		memcpy(&addr.u.a6.addr, ip, 16);
+
+	spin_lock_bh(&card->ip_lock);
+	rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr);
+	spin_unlock_bh(&card->ip_lock);
+	return rc;
+}
+
+int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
+{
+	struct qeth_ipaddr addr;
+	int rc, i;
+
+	qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
+	addr.u.a6.addr.s6_addr[0] = 0xfe;
+	addr.u.a6.addr.s6_addr[1] = 0x80;
+	for (i = 0; i < 8; i++)
+		addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i];
+
+	spin_lock_bh(&card->ip_lock);
+	rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr);
+	spin_unlock_bh(&card->ip_lock);
+	return rc;
+}
+
+static int qeth_l3_register_addr_entry(struct qeth_card *card,
+				struct qeth_ipaddr *addr)
+{
+	char buf[50];
+	int rc = 0;
+	int cnt = 3;
+
+	if (card->options.sniffer)
+		return 0;
+
+	if (addr->proto == QETH_PROT_IPV4) {
+		QETH_CARD_TEXT(card, 2, "setaddr4");
+		QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
+	} else if (addr->proto == QETH_PROT_IPV6) {
+		QETH_CARD_TEXT(card, 2, "setaddr6");
+		QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
+		QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
+	} else {
+		QETH_CARD_TEXT(card, 2, "setaddr?");
+		QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
+	}
+	do {
+		if (addr->is_multicast)
+			rc =  qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM);
+		else
+			rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP);
+		if (rc)
+			QETH_CARD_TEXT(card, 2, "failed");
+	} while ((--cnt > 0) && rc);
+	if (rc) {
+		QETH_CARD_TEXT(card, 2, "FAILED");
+		qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
+		dev_warn(&card->gdev->dev,
+			"Registering IP address %s failed\n", buf);
+	}
+	return rc;
+}
+
+static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
+						struct qeth_ipaddr *addr)
+{
+	int rc = 0;
+
+	if (card->options.sniffer)
+		return 0;
+
+	if (addr->proto == QETH_PROT_IPV4) {
+		QETH_CARD_TEXT(card, 2, "deladdr4");
+		QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
+	} else if (addr->proto == QETH_PROT_IPV6) {
+		QETH_CARD_TEXT(card, 2, "deladdr6");
+		QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
+		QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
+	} else {
+		QETH_CARD_TEXT(card, 2, "deladdr?");
+		QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
+	}
+	if (addr->is_multicast)
+		rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM);
+	else
+		rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP);
+	if (rc)
+		QETH_CARD_TEXT(card, 2, "failed");
+
+	return rc;
+}
+
+static int qeth_l3_setadapter_parms(struct qeth_card *card)
+{
+	int rc = 0;
+
+	QETH_DBF_TEXT(SETUP, 2, "setadprm");
+
+	if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
+		rc = qeth_setadpparms_change_macaddr(card);
+		if (rc)
+			dev_warn(&card->gdev->dev, "Reading the adapter MAC"
+				" address failed\n");
+	}
+
+	return rc;
+}
+
+static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "ipaarp");
+
+	if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
+		dev_info(&card->gdev->dev,
+			"ARP processing not supported on %s!\n",
+			QETH_CARD_IFNAME(card));
+		return 0;
+	}
+	rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
+					  IPA_CMD_ASS_START, 0);
+	if (rc) {
+		dev_warn(&card->gdev->dev,
+			"Starting ARP processing support for %s failed\n",
+			QETH_CARD_IFNAME(card));
+	}
+	return rc;
+}
+
+static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "stsrcmac");
+
+	if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
+		dev_info(&card->gdev->dev,
+			"Inbound source MAC-address not supported on %s\n",
+			QETH_CARD_IFNAME(card));
+		return -EOPNOTSUPP;
+	}
+
+	rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
+					  IPA_CMD_ASS_START, 0);
+	if (rc)
+		dev_warn(&card->gdev->dev,
+			"Starting source MAC-address support for %s failed\n",
+			QETH_CARD_IFNAME(card));
+	return rc;
+}
+
+static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
+{
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 3, "strtvlan");
+
+	if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
+		dev_info(&card->gdev->dev,
+			"VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
+		return -EOPNOTSUPP;
+	}
+
+	rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
+					  IPA_CMD_ASS_START, 0);
+	if (rc) {
+		dev_warn(&card->gdev->dev,
+			"Starting VLAN support for %s failed\n",
+			QETH_CARD_IFNAME(card));
+	} else {
+		dev_info(&card->gdev->dev, "VLAN enabled\n");
+	}
+	return rc;
+}
+
+static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "stmcast");
+
+	if (!qeth_is_supported(card, IPA_MULTICASTING)) {
+		dev_info(&card->gdev->dev,
+			"Multicast not supported on %s\n",
+			QETH_CARD_IFNAME(card));
+		return -EOPNOTSUPP;
+	}
+
+	rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
+					  IPA_CMD_ASS_START, 0);
+	if (rc) {
+		dev_warn(&card->gdev->dev,
+			"Starting multicast support for %s failed\n",
+			QETH_CARD_IFNAME(card));
+	} else {
+		dev_info(&card->gdev->dev, "Multicast enabled\n");
+		card->dev->flags |= IFF_MULTICAST;
+	}
+	return rc;
+}
+
+static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "softipv6");
+
+	if (card->info.type == QETH_CARD_TYPE_IQD)
+		goto out;
+
+	rc = qeth_send_simple_setassparms(card, IPA_IPV6,
+					  IPA_CMD_ASS_START, 3);
+	if (rc) {
+		dev_err(&card->gdev->dev,
+			"Activating IPv6 support for %s failed\n",
+			QETH_CARD_IFNAME(card));
+		return rc;
+	}
+	rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6,
+					     IPA_CMD_ASS_START, 0);
+	if (rc) {
+		dev_err(&card->gdev->dev,
+			"Activating IPv6 support for %s failed\n",
+			 QETH_CARD_IFNAME(card));
+		return rc;
+	}
+	rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU,
+					     IPA_CMD_ASS_START, 0);
+	if (rc) {
+		dev_warn(&card->gdev->dev,
+			"Enabling the passthrough mode for %s failed\n",
+			QETH_CARD_IFNAME(card));
+		return rc;
+	}
+out:
+	dev_info(&card->gdev->dev, "IPV6 enabled\n");
+	return 0;
+}
+
+static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
+{
+	QETH_CARD_TEXT(card, 3, "strtipv6");
+
+	if (!qeth_is_supported(card, IPA_IPV6)) {
+		dev_info(&card->gdev->dev,
+			"IPv6 not supported on %s\n", QETH_CARD_IFNAME(card));
+		return 0;
+	}
+	return qeth_l3_softsetup_ipv6(card);
+}
+
+static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
+{
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "stbrdcst");
+	card->info.broadcast_capable = 0;
+	if (!qeth_is_supported(card, IPA_FILTERING)) {
+		dev_info(&card->gdev->dev,
+			"Broadcast not supported on %s\n",
+			QETH_CARD_IFNAME(card));
+		rc = -EOPNOTSUPP;
+		goto out;
+	}
+	rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
+					  IPA_CMD_ASS_START, 0);
+	if (rc) {
+		dev_warn(&card->gdev->dev, "Enabling broadcast filtering for "
+			"%s failed\n", QETH_CARD_IFNAME(card));
+		goto out;
+	}
+
+	rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
+					  IPA_CMD_ASS_CONFIGURE, 1);
+	if (rc) {
+		dev_warn(&card->gdev->dev,
+			"Setting up broadcast filtering for %s failed\n",
+			QETH_CARD_IFNAME(card));
+		goto out;
+	}
+	card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
+	dev_info(&card->gdev->dev, "Broadcast enabled\n");
+	rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
+					  IPA_CMD_ASS_ENABLE, 1);
+	if (rc) {
+		dev_warn(&card->gdev->dev, "Setting up broadcast echo "
+			"filtering for %s failed\n", QETH_CARD_IFNAME(card));
+		goto out;
+	}
+	card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
+out:
+	if (card->info.broadcast_capable)
+		card->dev->flags |= IFF_BROADCAST;
+	else
+		card->dev->flags &= ~IFF_BROADCAST;
+	return rc;
+}
+
+static int qeth_l3_start_ipassists(struct qeth_card *card)
+{
+	QETH_CARD_TEXT(card, 3, "strtipas");
+
+	if (qeth_set_access_ctrl_online(card, 0))
+		return -EIO;
+	qeth_l3_start_ipa_arp_processing(card);	/* go on*/
+	qeth_l3_start_ipa_source_mac(card);	/* go on*/
+	qeth_l3_start_ipa_vlan(card);		/* go on*/
+	qeth_l3_start_ipa_multicast(card);		/* go on*/
+	qeth_l3_start_ipa_ipv6(card);		/* go on*/
+	qeth_l3_start_ipa_broadcast(card);		/* go on*/
+	return 0;
+}
+
+static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+
+	cmd = (struct qeth_ipa_cmd *) data;
+	if (cmd->hdr.return_code == 0)
+		ether_addr_copy(card->dev->dev_addr,
+				cmd->data.create_destroy_addr.unique_id);
+	else
+		eth_random_addr(card->dev->dev_addr);
+
+	return 0;
+}
+
+static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
+{
+	int rc = 0;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_DBF_TEXT(SETUP, 2, "hsrmac");
+
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
+				     QETH_PROT_IPV6);
+	if (!iob)
+		return -ENOMEM;
+	cmd = __ipa_cmd(iob);
+	*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
+			card->info.unique_id;
+
+	rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb,
+				NULL);
+	return rc;
+}
+
+static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+
+	cmd = (struct qeth_ipa_cmd *) data;
+	if (cmd->hdr.return_code == 0)
+		card->info.unique_id = *((__u16 *)
+				&cmd->data.create_destroy_addr.unique_id[6]);
+	else {
+		card->info.unique_id =  UNIQUE_ID_IF_CREATE_ADDR_FAILED |
+					UNIQUE_ID_NOT_BY_CARD;
+		dev_warn(&card->gdev->dev, "The network adapter failed to "
+			"generate a unique ID\n");
+	}
+	return 0;
+}
+
+static int qeth_l3_get_unique_id(struct qeth_card *card)
+{
+	int rc = 0;
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+
+	QETH_DBF_TEXT(SETUP, 2, "guniqeid");
+
+	if (!qeth_is_supported(card, IPA_IPV6)) {
+		card->info.unique_id =  UNIQUE_ID_IF_CREATE_ADDR_FAILED |
+					UNIQUE_ID_NOT_BY_CARD;
+		return 0;
+	}
+
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
+				     QETH_PROT_IPV6);
+	if (!iob)
+		return -ENOMEM;
+	cmd = __ipa_cmd(iob);
+	*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
+			card->info.unique_id;
+
+	rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL);
+	return rc;
+}
+
+static int
+qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
+			    unsigned long data)
+{
+	struct qeth_ipa_cmd	   *cmd;
+	__u16 rc;
+
+	QETH_DBF_TEXT(SETUP, 2, "diastrcb");
+
+	cmd = (struct qeth_ipa_cmd *)data;
+	rc = cmd->hdr.return_code;
+	if (rc)
+		QETH_CARD_TEXT_(card, 2, "dxter%x", rc);
+	switch (cmd->data.diagass.action) {
+	case QETH_DIAGS_CMD_TRACE_QUERY:
+		break;
+	case QETH_DIAGS_CMD_TRACE_DISABLE:
+		switch (rc) {
+		case 0:
+		case IPA_RC_INVALID_SUBCMD:
+			card->info.promisc_mode = SET_PROMISC_MODE_OFF;
+			dev_info(&card->gdev->dev, "The HiperSockets network "
+				"traffic analyzer is deactivated\n");
+			break;
+		default:
+			break;
+		}
+		break;
+	case QETH_DIAGS_CMD_TRACE_ENABLE:
+		switch (rc) {
+		case 0:
+			card->info.promisc_mode = SET_PROMISC_MODE_ON;
+			dev_info(&card->gdev->dev, "The HiperSockets network "
+				"traffic analyzer is activated\n");
+			break;
+		case IPA_RC_HARDWARE_AUTH_ERROR:
+			dev_warn(&card->gdev->dev, "The device is not "
+				"authorized to run as a HiperSockets network "
+				"traffic analyzer\n");
+			break;
+		case IPA_RC_TRACE_ALREADY_ACTIVE:
+			dev_warn(&card->gdev->dev, "A HiperSockets "
+				"network traffic analyzer is already "
+				"active in the HiperSockets LAN\n");
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
+			cmd->data.diagass.action, QETH_CARD_IFNAME(card));
+	}
+
+	return 0;
+}
+
+static int
+qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd    *cmd;
+
+	QETH_DBF_TEXT(SETUP, 2, "diagtrac");
+
+	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+	if (!iob)
+		return -ENOMEM;
+	cmd = __ipa_cmd(iob);
+	cmd->data.diagass.subcmd_len = 16;
+	cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
+	cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET;
+	cmd->data.diagass.action = diags_cmd;
+	return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
+}
+
+static void
+qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
+{
+	struct ip_mc_list *im4;
+	struct qeth_ipaddr *tmp, *ipm;
+
+	QETH_CARD_TEXT(card, 4, "addmc");
+
+	tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
+	if (!tmp)
+		return;
+
+	for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
+	     im4 = rcu_dereference(im4->next_rcu)) {
+		ip_eth_mc_map(im4->multiaddr, tmp->mac);
+		tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
+		tmp->is_multicast = 1;
+
+		ipm = qeth_l3_find_addr_by_ip(card, tmp);
+		if (ipm) {
+			/* for mcast, by-IP match means full match */
+			ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+		} else {
+			ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
+			if (!ipm)
+				continue;
+			ether_addr_copy(ipm->mac, tmp->mac);
+			ipm->u.a4.addr = be32_to_cpu(im4->multiaddr);
+			ipm->is_multicast = 1;
+			ipm->disp_flag = QETH_DISP_ADDR_ADD;
+			hash_add(card->ip_mc_htable,
+					&ipm->hnode, qeth_l3_ipaddr_hash(ipm));
+		}
+	}
+
+	kfree(tmp);
+}
+
+/* called with rcu_read_lock */
+static void qeth_l3_add_vlan_mc(struct qeth_card *card)
+{
+	struct in_device *in_dev;
+	u16 vid;
+
+	QETH_CARD_TEXT(card, 4, "addmcvl");
+
+	if (!qeth_is_supported(card, IPA_FULL_VLAN))
+		return;
+
+	for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
+		struct net_device *netdev;
+
+		netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
+					      vid);
+		if (netdev == NULL ||
+		    !(netdev->flags & IFF_UP))
+			continue;
+		in_dev = __in_dev_get_rcu(netdev);
+		if (!in_dev)
+			continue;
+		qeth_l3_add_mc_to_hash(card, in_dev);
+	}
+}
+
+static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
+{
+	struct in_device *in4_dev;
+
+	QETH_CARD_TEXT(card, 4, "chkmcv4");
+
+	rcu_read_lock();
+	in4_dev = __in_dev_get_rcu(card->dev);
+	if (in4_dev == NULL)
+		goto unlock;
+	qeth_l3_add_mc_to_hash(card, in4_dev);
+	qeth_l3_add_vlan_mc(card);
+unlock:
+	rcu_read_unlock();
+}
+
+static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
+				    struct inet6_dev *in6_dev)
+{
+	struct qeth_ipaddr *ipm;
+	struct ifmcaddr6 *im6;
+	struct qeth_ipaddr *tmp;
+
+	QETH_CARD_TEXT(card, 4, "addmc6");
+
+	tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
+	if (!tmp)
+		return;
+
+	for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
+		ipv6_eth_mc_map(&im6->mca_addr, tmp->mac);
+		memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
+		       sizeof(struct in6_addr));
+		tmp->is_multicast = 1;
+
+		ipm = qeth_l3_find_addr_by_ip(card, tmp);
+		if (ipm) {
+			/* for mcast, by-IP match means full match */
+			ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+			continue;
+		}
+
+		ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
+		if (!ipm)
+			continue;
+
+		ether_addr_copy(ipm->mac, tmp->mac);
+		memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
+		       sizeof(struct in6_addr));
+		ipm->is_multicast = 1;
+		ipm->disp_flag = QETH_DISP_ADDR_ADD;
+		hash_add(card->ip_mc_htable,
+				&ipm->hnode, qeth_l3_ipaddr_hash(ipm));
+
+	}
+	kfree(tmp);
+}
+
+/* called with rcu_read_lock */
+static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
+{
+	struct inet6_dev *in_dev;
+	u16 vid;
+
+	QETH_CARD_TEXT(card, 4, "admc6vl");
+
+	if (!qeth_is_supported(card, IPA_FULL_VLAN))
+		return;
+
+	for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
+		struct net_device *netdev;
+
+		netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
+					      vid);
+		if (netdev == NULL ||
+		    !(netdev->flags & IFF_UP))
+			continue;
+		in_dev = in6_dev_get(netdev);
+		if (!in_dev)
+			continue;
+		read_lock_bh(&in_dev->lock);
+		qeth_l3_add_mc6_to_hash(card, in_dev);
+		read_unlock_bh(&in_dev->lock);
+		in6_dev_put(in_dev);
+	}
+}
+
+static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
+{
+	struct inet6_dev *in6_dev;
+
+	QETH_CARD_TEXT(card, 4, "chkmcv6");
+
+	if (!qeth_is_supported(card, IPA_IPV6))
+		return ;
+	in6_dev = in6_dev_get(card->dev);
+	if (!in6_dev)
+		return;
+
+	rcu_read_lock();
+	read_lock_bh(&in6_dev->lock);
+	qeth_l3_add_mc6_to_hash(card, in6_dev);
+	qeth_l3_add_vlan_mc6(card);
+	read_unlock_bh(&in6_dev->lock);
+	rcu_read_unlock();
+	in6_dev_put(in6_dev);
+}
+
+static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
+				   __be16 proto, u16 vid)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	set_bit(vid, card->active_vlans);
+	return 0;
+}
+
+static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
+				    __be16 proto, u16 vid)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
+
+	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+		QETH_CARD_TEXT(card, 3, "kidREC");
+		return 0;
+	}
+	clear_bit(vid, card->active_vlans);
+	qeth_l3_set_rx_mode(dev);
+	return 0;
+}
+
+static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
+				struct qeth_hdr *hdr)
+{
+	if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
+		u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
+								 ETH_P_IP;
+		unsigned char tg_addr[ETH_ALEN];
+
+		skb_reset_network_header(skb);
+		switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
+		case QETH_CAST_MULTICAST:
+			if (prot == ETH_P_IP)
+				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
+			else
+				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
+
+			card->stats.multicast++;
+			break;
+		case QETH_CAST_BROADCAST:
+			ether_addr_copy(tg_addr, card->dev->broadcast);
+			card->stats.multicast++;
+			break;
+		default:
+			if (card->options.sniffer)
+				skb->pkt_type = PACKET_OTHERHOST;
+			ether_addr_copy(tg_addr, card->dev->dev_addr);
+		}
+
+		if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
+			card->dev->header_ops->create(skb, card->dev, prot,
+				tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac,
+				skb->len);
+		else
+			card->dev->header_ops->create(skb, card->dev, prot,
+				tg_addr, "FAKELL", skb->len);
+	}
+
+	skb->protocol = eth_type_trans(skb, card->dev);
+
+	/* copy VLAN tag from hdr into skb */
+	if (!card->options.sniffer &&
+	    (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
+				      QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
+		u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
+				hdr->hdr.l3.vlan_id :
+				hdr->hdr.l3.next_hop.rx.vlan_id;
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
+	}
+
+	qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags);
+}
+
+static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
+				int budget, int *done)
+{
+	int work_done = 0;
+	struct sk_buff *skb;
+	struct qeth_hdr *hdr;
+	unsigned int len;
+	__u16 magic;
+
+	*done = 0;
+	WARN_ON_ONCE(!budget);
+	while (budget) {
+		skb = qeth_core_get_next_skb(card,
+			&card->qdio.in_q->bufs[card->rx.b_index],
+			&card->rx.b_element, &card->rx.e_offset, &hdr);
+		if (!skb) {
+			*done = 1;
+			break;
+		}
+		switch (hdr->hdr.l3.id) {
+		case QETH_HEADER_TYPE_LAYER3:
+			magic = *(__u16 *)skb->data;
+			if ((card->info.type == QETH_CARD_TYPE_IQD) &&
+			    (magic == ETH_P_AF_IUCV)) {
+				skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
+				len = skb->len;
+				card->dev->header_ops->create(skb, card->dev, 0,
+					card->dev->dev_addr, "FAKELL", len);
+				skb_reset_mac_header(skb);
+				netif_receive_skb(skb);
+			} else {
+				qeth_l3_rebuild_skb(card, skb, hdr);
+				len = skb->len;
+				napi_gro_receive(&card->napi, skb);
+			}
+			break;
+		case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
+			skb->protocol = eth_type_trans(skb, skb->dev);
+			len = skb->len;
+			netif_receive_skb(skb);
+			break;
+		default:
+			dev_kfree_skb_any(skb);
+			QETH_CARD_TEXT(card, 3, "inbunkno");
+			QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
+			continue;
+		}
+		work_done++;
+		budget--;
+		card->stats.rx_packets++;
+		card->stats.rx_bytes += len;
+	}
+	return work_done;
+}
+
+static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
+{
+	QETH_DBF_TEXT(SETUP, 2, "stopcard");
+	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+
+	qeth_set_allowed_threads(card, 0, 1);
+	if (card->options.sniffer &&
+	    (card->info.promisc_mode == SET_PROMISC_MODE_ON))
+		qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
+	if (card->read.state == CH_STATE_UP &&
+	    card->write.state == CH_STATE_UP &&
+	    (card->state == CARD_STATE_UP)) {
+		if (recovery_mode)
+			qeth_l3_stop(card->dev);
+		else {
+			rtnl_lock();
+			dev_close(card->dev);
+			rtnl_unlock();
+		}
+		card->state = CARD_STATE_SOFTSETUP;
+	}
+	if (card->state == CARD_STATE_SOFTSETUP) {
+		qeth_l3_clear_ip_htable(card, 1);
+		qeth_clear_ipacmd_list(card);
+		card->state = CARD_STATE_HARDSETUP;
+	}
+	if (card->state == CARD_STATE_HARDSETUP) {
+		qeth_qdio_clear_card(card, 0);
+		qeth_clear_qdio_buffers(card);
+		qeth_clear_working_pool_list(card);
+		card->state = CARD_STATE_DOWN;
+	}
+	if (card->state == CARD_STATE_DOWN) {
+		qeth_clear_cmd_buffers(&card->read);
+		qeth_clear_cmd_buffers(&card->write);
+	}
+}
+
+/*
+ * test for and Switch promiscuous mode (on or off)
+ *  either for guestlan or HiperSocket Sniffer
+ */
+static void
+qeth_l3_handle_promisc_mode(struct qeth_card *card)
+{
+	struct net_device *dev = card->dev;
+
+	if (((dev->flags & IFF_PROMISC) &&
+	     (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
+	    (!(dev->flags & IFF_PROMISC) &&
+	     (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
+		return;
+
+	if (card->info.guestlan) {		/* Guestlan trace */
+		if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+			qeth_setadp_promisc_mode(card);
+	} else if (card->options.sniffer &&	/* HiperSockets trace */
+		   qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
+		if (dev->flags & IFF_PROMISC) {
+			QETH_CARD_TEXT(card, 3, "+promisc");
+			qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
+		} else {
+			QETH_CARD_TEXT(card, 3, "-promisc");
+			qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
+		}
+	}
+}
+
+static void qeth_l3_set_rx_mode(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+	struct qeth_ipaddr *addr;
+	struct hlist_node *tmp;
+	int i, rc;
+
+	QETH_CARD_TEXT(card, 3, "setmulti");
+	if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
+	    (card->state != CARD_STATE_UP))
+		return;
+	if (!card->options.sniffer) {
+		spin_lock_bh(&card->mclock);
+
+		qeth_l3_add_multicast_ipv4(card);
+		qeth_l3_add_multicast_ipv6(card);
+
+		hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+			switch (addr->disp_flag) {
+			case QETH_DISP_ADDR_DELETE:
+				rc = qeth_l3_deregister_addr_entry(card, addr);
+				if (!rc || rc == IPA_RC_MC_ADDR_NOT_FOUND) {
+					hash_del(&addr->hnode);
+					kfree(addr);
+				}
+				break;
+			case QETH_DISP_ADDR_ADD:
+				rc = qeth_l3_register_addr_entry(card, addr);
+				if (rc && rc != IPA_RC_LAN_OFFLINE) {
+					hash_del(&addr->hnode);
+					kfree(addr);
+					break;
+				}
+				addr->ref_counter = 1;
+				/* fall through */
+			default:
+				/* for next call to set_rx_mode(): */
+				addr->disp_flag = QETH_DISP_ADDR_DELETE;
+			}
+		}
+
+		spin_unlock_bh(&card->mclock);
+
+		if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+			return;
+	}
+	qeth_l3_handle_promisc_mode(card);
+}
+
+static const char *qeth_l3_arp_get_error_cause(int *rc)
+{
+	switch (*rc) {
+	case QETH_IPA_ARP_RC_FAILED:
+		*rc = -EIO;
+		return "operation failed";
+	case QETH_IPA_ARP_RC_NOTSUPP:
+		*rc = -EOPNOTSUPP;
+		return "operation not supported";
+	case QETH_IPA_ARP_RC_OUT_OF_RANGE:
+		*rc = -EINVAL;
+		return "argument out of range";
+	case QETH_IPA_ARP_RC_Q_NOTSUPP:
+		*rc = -EOPNOTSUPP;
+		return "query operation not supported";
+	case QETH_IPA_ARP_RC_Q_NO_DATA:
+		*rc = -ENOENT;
+		return "no query data available";
+	default:
+		return "unknown error";
+	}
+}
+
+static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
+{
+	int tmp;
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "arpstnoe");
+
+	/*
+	 * currently GuestLAN only supports the ARP assist function
+	 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
+	 * thus we say EOPNOTSUPP for this ARP function
+	 */
+	if (card->info.guestlan)
+		return -EOPNOTSUPP;
+	if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
+		return -EOPNOTSUPP;
+	}
+	rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
+					  IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
+					  no_entries);
+	if (rc) {
+		tmp = rc;
+		QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on "
+			"%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
+			qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
+	}
+	return rc;
+}
+
+static __u32 get_arp_entry_size(struct qeth_card *card,
+			struct qeth_arp_query_data *qdata,
+			struct qeth_arp_entrytype *type, __u8 strip_entries)
+{
+	__u32 rc;
+	__u8 is_hsi;
+
+	is_hsi = qdata->reply_bits == 5;
+	if (type->ip == QETHARP_IP_ADDR_V4) {
+		QETH_CARD_TEXT(card, 4, "arpev4");
+		if (strip_entries) {
+			rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) :
+				sizeof(struct qeth_arp_qi_entry7_short);
+		} else {
+			rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) :
+				sizeof(struct qeth_arp_qi_entry7);
+		}
+	} else if (type->ip == QETHARP_IP_ADDR_V6) {
+		QETH_CARD_TEXT(card, 4, "arpev6");
+		if (strip_entries) {
+			rc = is_hsi ?
+				sizeof(struct qeth_arp_qi_entry5_short_ipv6) :
+				sizeof(struct qeth_arp_qi_entry7_short_ipv6);
+		} else {
+			rc = is_hsi ?
+				sizeof(struct qeth_arp_qi_entry5_ipv6) :
+				sizeof(struct qeth_arp_qi_entry7_ipv6);
+		}
+	} else {
+		QETH_CARD_TEXT(card, 4, "arpinv");
+		rc = 0;
+	}
+
+	return rc;
+}
+
+static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot)
+{
+	return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) ||
+		(type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6);
+}
+
+static int qeth_l3_arp_query_cb(struct qeth_card *card,
+		struct qeth_reply *reply, unsigned long data)
+{
+	struct qeth_ipa_cmd *cmd;
+	struct qeth_arp_query_data *qdata;
+	struct qeth_arp_query_info *qinfo;
+	int i;
+	int e;
+	int entrybytes_done;
+	int stripped_bytes;
+	__u8 do_strip_entries;
+
+	QETH_CARD_TEXT(card, 3, "arpquecb");
+
+	qinfo = (struct qeth_arp_query_info *) reply->param;
+	cmd = (struct qeth_ipa_cmd *) data;
+	QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version);
+	if (cmd->hdr.return_code) {
+		QETH_CARD_TEXT(card, 4, "arpcberr");
+		QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
+		return 0;
+	}
+	if (cmd->data.setassparms.hdr.return_code) {
+		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+		QETH_CARD_TEXT(card, 4, "setaperr");
+		QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
+		return 0;
+	}
+	qdata = &cmd->data.setassparms.data.query_arp;
+	QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries);
+
+	do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0;
+	stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0;
+	entrybytes_done = 0;
+	for (e = 0; e < qdata->no_entries; ++e) {
+		char *cur_entry;
+		__u32 esize;
+		struct qeth_arp_entrytype *etype;
+
+		cur_entry = &qdata->data + entrybytes_done;
+		etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type;
+		if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) {
+			QETH_CARD_TEXT(card, 4, "pmis");
+			QETH_CARD_TEXT_(card, 4, "%i", etype->ip);
+			break;
+		}
+		esize = get_arp_entry_size(card, qdata, etype,
+			do_strip_entries);
+		QETH_CARD_TEXT_(card, 5, "esz%i", esize);
+		if (!esize)
+			break;
+
+		if ((qinfo->udata_len - qinfo->udata_offset) < esize) {
+			QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
+			cmd->hdr.return_code = IPA_RC_ENOMEM;
+			goto out_error;
+		}
+
+		memcpy(qinfo->udata + qinfo->udata_offset,
+			&qdata->data + entrybytes_done + stripped_bytes,
+			esize);
+		entrybytes_done += esize + stripped_bytes;
+		qinfo->udata_offset += esize;
+		++qinfo->no_entries;
+	}
+	/* check if all replies received ... */
+	if (cmd->data.setassparms.hdr.seq_no <
+	    cmd->data.setassparms.hdr.number_of_replies)
+		return 1;
+	QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries);
+	memcpy(qinfo->udata, &qinfo->no_entries, 4);
+	/* keep STRIP_ENTRIES flag so the user program can distinguish
+	 * stripped entries from normal ones */
+	if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
+		qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
+	memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
+	QETH_CARD_TEXT_(card, 4, "rc%i", 0);
+	return 0;
+out_error:
+	i = 0;
+	memcpy(qinfo->udata, &i, 4);
+	return 0;
+}
+
+static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
+		struct qeth_cmd_buffer *iob, int len,
+		int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
+			unsigned long),
+		void *reply_param)
+{
+	QETH_CARD_TEXT(card, 4, "sendarp");
+
+	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
+	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
+	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+	return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
+				      reply_cb, reply_param);
+}
+
+static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
+	enum qeth_prot_versions prot,
+	struct qeth_arp_query_info *qinfo)
+{
+	struct qeth_cmd_buffer *iob;
+	struct qeth_ipa_cmd *cmd;
+	int tmp;
+	int rc;
+
+	QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
+
+	iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+				       IPA_CMD_ASS_ARP_QUERY_INFO,
+				       sizeof(struct qeth_arp_query_data)
+						- sizeof(char),
+				       prot);
+	if (!iob)
+		return -ENOMEM;
+	cmd = __ipa_cmd(iob);
+	cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
+	cmd->data.setassparms.data.query_arp.reply_bits = 0;
+	cmd->data.setassparms.data.query_arp.no_entries = 0;
+	rc = qeth_l3_send_ipa_arp_cmd(card, iob,
+			   QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
+			   qeth_l3_arp_query_cb, (void *)qinfo);
+	if (rc) {
+		tmp = rc;
+		QETH_DBF_MESSAGE(2,
+			"Error while querying ARP cache on %s: %s "
+			"(0x%x/%d)\n", QETH_CARD_IFNAME(card),
+			qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
+	}
+
+	return rc;
+}
+
+static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
+{
+	struct qeth_arp_query_info qinfo = {0, };
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "arpquery");
+
+	if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
+			       IPA_ARP_PROCESSING)) {
+		QETH_CARD_TEXT(card, 3, "arpqnsup");
+		rc = -EOPNOTSUPP;
+		goto out;
+	}
+	/* get size of userspace buffer and mask_bits -> 6 bytes */
+	if (copy_from_user(&qinfo, udata, 6)) {
+		rc = -EFAULT;
+		goto out;
+	}
+	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
+	if (!qinfo.udata) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
+	rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo);
+	if (rc) {
+		if (copy_to_user(udata, qinfo.udata, 4))
+			rc = -EFAULT;
+		goto free_and_out;
+	}
+	if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
+		/* fails in case of GuestLAN QDIO mode */
+		qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo);
+	}
+	if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
+		QETH_CARD_TEXT(card, 4, "qactf");
+		rc = -EFAULT;
+		goto free_and_out;
+	}
+	QETH_CARD_TEXT(card, 4, "qacts");
+
+free_and_out:
+	kfree(qinfo.udata);
+out:
+	return rc;
+}
+
+static int qeth_l3_arp_add_entry(struct qeth_card *card,
+				struct qeth_arp_cache_entry *entry)
+{
+	struct qeth_cmd_buffer *iob;
+	char buf[16];
+	int tmp;
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "arpadent");
+
+	/*
+	 * currently GuestLAN only supports the ARP assist function
+	 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
+	 * thus we say EOPNOTSUPP for this ARP function
+	 */
+	if (card->info.guestlan)
+		return -EOPNOTSUPP;
+	if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
+		return -EOPNOTSUPP;
+	}
+
+	iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+				       IPA_CMD_ASS_ARP_ADD_ENTRY,
+				       sizeof(struct qeth_arp_cache_entry),
+				       QETH_PROT_IPV4);
+	if (!iob)
+		return -ENOMEM;
+	rc = qeth_send_setassparms(card, iob,
+				   sizeof(struct qeth_arp_cache_entry),
+				   (unsigned long) entry,
+				   qeth_setassparms_cb, NULL);
+	if (rc) {
+		tmp = rc;
+		qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
+		QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s "
+			"on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
+			qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
+	}
+	return rc;
+}
+
+static int qeth_l3_arp_remove_entry(struct qeth_card *card,
+				struct qeth_arp_cache_entry *entry)
+{
+	struct qeth_cmd_buffer *iob;
+	char buf[16] = {0, };
+	int tmp;
+	int rc;
+
+	QETH_CARD_TEXT(card, 3, "arprment");
+
+	/*
+	 * currently GuestLAN only supports the ARP assist function
+	 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
+	 * thus we say EOPNOTSUPP for this ARP function
+	 */
+	if (card->info.guestlan)
+		return -EOPNOTSUPP;
+	if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
+		return -EOPNOTSUPP;
+	}
+	memcpy(buf, entry, 12);
+	iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+				       IPA_CMD_ASS_ARP_REMOVE_ENTRY,
+				       12,
+				       QETH_PROT_IPV4);
+	if (!iob)
+		return -ENOMEM;
+	rc = qeth_send_setassparms(card, iob,
+				   12, (unsigned long)buf,
+				   qeth_setassparms_cb, NULL);
+	if (rc) {
+		tmp = rc;
+		memset(buf, 0, 16);
+		qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
+		QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s"
+			" on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
+			qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
+	}
+	return rc;
+}
+
+static int qeth_l3_arp_flush_cache(struct qeth_card *card)
+{
+	int rc;
+	int tmp;
+
+	QETH_CARD_TEXT(card, 3, "arpflush");
+
+	/*
+	 * currently GuestLAN only supports the ARP assist function
+	 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
+	 * thus we say EOPNOTSUPP for this ARP function
+	*/
+	if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
+		return -EOPNOTSUPP;
+	if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
+		return -EOPNOTSUPP;
+	}
+	rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
+					  IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
+	if (rc) {
+		tmp = rc;
+		QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s "
+			"(0x%x/%d)\n", QETH_CARD_IFNAME(card),
+			qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
+	}
+	return rc;
+}
+
+static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct qeth_card *card = dev->ml_priv;
+	struct qeth_arp_cache_entry arp_entry;
+	int rc = 0;
+
+	switch (cmd) {
+	case SIOC_QETH_ARP_SET_NO_ENTRIES:
+		if (!capable(CAP_NET_ADMIN)) {
+			rc = -EPERM;
+			break;
+		}
+		rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
+		break;
+	case SIOC_QETH_ARP_QUERY_INFO:
+		if (!capable(CAP_NET_ADMIN)) {
+			rc = -EPERM;
+			break;
+		}
+		rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
+		break;
+	case SIOC_QETH_ARP_ADD_ENTRY:
+		if (!capable(CAP_NET_ADMIN)) {
+			rc = -EPERM;
+			break;
+		}
+		if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
+				   sizeof(struct qeth_arp_cache_entry)))
+			rc = -EFAULT;
+		else
+			rc = qeth_l3_arp_add_entry(card, &arp_entry);
+		break;
+	case SIOC_QETH_ARP_REMOVE_ENTRY:
+		if (!capable(CAP_NET_ADMIN)) {
+			rc = -EPERM;
+			break;
+		}
+		if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
+				   sizeof(struct qeth_arp_cache_entry)))
+			rc = -EFAULT;
+		else
+			rc = qeth_l3_arp_remove_entry(card, &arp_entry);
+		break;
+	case SIOC_QETH_ARP_FLUSH_CACHE:
+		if (!capable(CAP_NET_ADMIN)) {
+			rc = -EPERM;
+			break;
+		}
+		rc = qeth_l3_arp_flush_cache(card);
+		break;
+	default:
+		rc = -EOPNOTSUPP;
+	}
+	return rc;
+}
+
+static int qeth_l3_get_cast_type(struct sk_buff *skb)
+{
+	struct neighbour *n = NULL;
+	struct dst_entry *dst;
+
+	rcu_read_lock();
+	dst = skb_dst(skb);
+	if (dst)
+		n = dst_neigh_lookup_skb(dst, skb);
+	if (n) {
+		int cast_type = n->type;
+
+		rcu_read_unlock();
+		neigh_release(n);
+		if ((cast_type == RTN_BROADCAST) ||
+		    (cast_type == RTN_MULTICAST) ||
+		    (cast_type == RTN_ANYCAST))
+			return cast_type;
+		return RTN_UNICAST;
+	}
+	rcu_read_unlock();
+
+	/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
+	if (be16_to_cpu(skb->protocol) == ETH_P_IPV6)
+		return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
+				RTN_MULTICAST : RTN_UNICAST;
+	else if (be16_to_cpu(skb->protocol) == ETH_P_IP)
+		return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
+				RTN_MULTICAST : RTN_UNICAST;
+
+	/* ... and MAC address */
+	if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast))
+		return RTN_BROADCAST;
+	if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
+		return RTN_MULTICAST;
+
+	/* default to unicast */
+	return RTN_UNICAST;
+}
+
+static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb,
+				     unsigned int data_len)
+{
+	char daddr[16];
+	struct af_iucv_trans_hdr *iucv_hdr;
+
+	memset(hdr, 0, sizeof(struct qeth_hdr));
+	hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
+	hdr->hdr.l3.length = data_len;
+	hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
+
+	iucv_hdr = (struct af_iucv_trans_hdr *)(skb_mac_header(skb) + ETH_HLEN);
+	memset(daddr, 0, sizeof(daddr));
+	daddr[0] = 0xfe;
+	daddr[1] = 0x80;
+	memcpy(&daddr[8], iucv_hdr->destUserID, 8);
+	memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16);
+}
+
+static u8 qeth_l3_cast_type_to_flag(int cast_type)
+{
+	if (cast_type == RTN_MULTICAST)
+		return QETH_CAST_MULTICAST;
+	if (cast_type == RTN_ANYCAST)
+		return QETH_CAST_ANYCAST;
+	if (cast_type == RTN_BROADCAST)
+		return QETH_CAST_BROADCAST;
+	return QETH_CAST_UNICAST;
+}
+
+static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
+				struct sk_buff *skb, int ipv, int cast_type,
+				unsigned int data_len)
+{
+	memset(hdr, 0, sizeof(struct qeth_hdr));
+	hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
+	hdr->hdr.l3.length = data_len;
+
+	/*
+	 * before we're going to overwrite this location with next hop ip.
+	 * v6 uses passthrough, v4 sets the tag in the QDIO header.
+	 */
+	if (skb_vlan_tag_present(skb)) {
+		if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
+			hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
+		else
+			hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG;
+		hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
+	}
+
+	if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) {
+		qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
+		if (card->options.performance_stats)
+			card->perf_stats.tx_csum++;
+	}
+
+	/* OSA only: */
+	if (!ipv) {
+		hdr->hdr.l3.flags = QETH_HDR_PASSTHRU;
+		if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
+					    skb->dev->broadcast))
+			hdr->hdr.l3.flags |= QETH_CAST_BROADCAST;
+		else
+			hdr->hdr.l3.flags |= (cast_type == RTN_MULTICAST) ?
+				QETH_CAST_MULTICAST : QETH_CAST_UNICAST;
+		return;
+	}
+
+	hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type);
+	rcu_read_lock();
+	if (ipv == 4) {
+		struct rtable *rt = skb_rtable(skb);
+
+		*((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
+				rt_nexthop(rt, ip_hdr(skb)->daddr) :
+				ip_hdr(skb)->daddr;
+	} else {
+		/* IPv6 */
+		const struct rt6_info *rt = skb_rt6_info(skb);
+		const struct in6_addr *next_hop;
+
+		if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
+			next_hop = &rt->rt6i_gateway;
+		else
+			next_hop = &ipv6_hdr(skb)->daddr;
+		memcpy(hdr->hdr.l3.next_hop.ipv6_addr, next_hop, 16);
+
+		hdr->hdr.l3.flags |= QETH_HDR_IPV6;
+		if (card->info.type != QETH_CARD_TYPE_IQD)
+			hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
+	}
+	rcu_read_unlock();
+}
+
+static void qeth_tso_fill_header(struct qeth_card *card,
+		struct qeth_hdr *qhdr, struct sk_buff *skb)
+{
+	struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
+	struct tcphdr *tcph = tcp_hdr(skb);
+	struct iphdr *iph = ip_hdr(skb);
+	struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+	/*fix header to TSO values ...*/
+	hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
+	/*set values which are fix for the first approach ...*/
+	hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
+	hdr->ext.imb_hdr_no  = 1;
+	hdr->ext.hdr_type    = 1;
+	hdr->ext.hdr_version = 1;
+	hdr->ext.hdr_len     = 28;
+	/*insert non-fix values */
+	hdr->ext.mss = skb_shinfo(skb)->gso_size;
+	hdr->ext.dg_hdr_len = (__u16)(ip_hdrlen(skb) + tcp_hdrlen(skb));
+	hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
+				       sizeof(struct qeth_hdr_tso));
+	tcph->check = 0;
+	if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) {
+		ip6h->payload_len = 0;
+		tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+					       0, IPPROTO_TCP, 0);
+	} else {
+		/*OSA want us to set these values ...*/
+		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+					 0, IPPROTO_TCP, 0);
+		iph->tot_len = 0;
+		iph->check = 0;
+	}
+}
+
+/**
+ * qeth_l3_get_elements_no_tso() - find number of SBALEs for skb data for tso
+ * @card:			   qeth card structure, to check max. elems.
+ * @skb:			   SKB address
+ * @extra_elems:		   extra elems needed, to check against max.
+ *
+ * Returns the number of pages, and thus QDIO buffer elements, needed to cover
+ * skb data, including linear part and fragments, but excluding TCP header.
+ * (Exclusion of TCP header distinguishes it from qeth_get_elements_no().)
+ * Checks if the result plus extra_elems fits under the limit for the card.
+ * Returns 0 if it does not.
+ * Note: extra_elems is not included in the returned result.
+ */
+static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
+			struct sk_buff *skb, int extra_elems)
+{
+	addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
+	addr_t end = (addr_t)skb->data + skb_headlen(skb);
+	int elements = qeth_get_elements_for_frags(skb);
+
+	if (start != end)
+		elements += qeth_get_elements_for_range(start, end);
+
+	if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
+		QETH_DBF_MESSAGE(2,
+	"Invalid size of TSO IP packet (Number=%d / Length=%d). Discarded.\n",
+				elements + extra_elems, skb->len);
+		return 0;
+	}
+	return elements;
+}
+
+static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
+				struct qeth_qdio_out_q *queue, int ipv,
+				int cast_type)
+{
+	const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
+	unsigned int frame_len, elements;
+	unsigned char eth_hdr[ETH_HLEN];
+	struct qeth_hdr *hdr = NULL;
+	unsigned int hd_len = 0;
+	int push_len, rc;
+	bool is_sg;
+
+	/* re-use the L2 header area for the HW header: */
+	rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
+	if (rc)
+		return rc;
+	skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
+	skb_pull(skb, ETH_HLEN);
+	frame_len = skb->len;
+
+	push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, 0,
+				      &elements);
+	if (push_len < 0)
+		return push_len;
+	if (!push_len) {
+		/* hdr was added discontiguous from skb->data */
+		hd_len = hw_hdr_len;
+	}
+
+	if (skb->protocol == htons(ETH_P_AF_IUCV))
+		qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len);
+	else
+		qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len);
+
+	is_sg = skb_is_nonlinear(skb);
+	if (IS_IQD(card)) {
+		rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len);
+	} else {
+		/* TODO: drop skb_orphan() once TX completion is fast enough */
+		skb_orphan(skb);
+		rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len,
+					 elements);
+	}
+
+	if (!rc) {
+		if (card->options.performance_stats) {
+			card->perf_stats.buf_elements_sent += elements;
+			if (is_sg)
+				card->perf_stats.sg_skbs_sent++;
+		}
+	} else {
+		if (!push_len)
+			kmem_cache_free(qeth_core_header_cache, hdr);
+		if (rc == -EBUSY) {
+			/* roll back to ETH header */
+			skb_pull(skb, push_len);
+			skb_push(skb, ETH_HLEN);
+			skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
+		}
+	}
+	return rc;
+}
+
+static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
+			struct qeth_qdio_out_q *queue, int ipv, int cast_type)
+{
+	int elements, len, rc;
+	__be16 *tag;
+	struct qeth_hdr *hdr = NULL;
+	int hdr_elements = 0;
+	struct sk_buff *new_skb = NULL;
+	int tx_bytes = skb->len;
+	unsigned int hd_len;
+	bool use_tso, is_sg;
+
+	/* Ignore segment size from skb_is_gso(), 1 page is always used. */
+	use_tso = skb_is_gso(skb) &&
+		  (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4);
+
+	/* create a clone with writeable headroom */
+	new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) +
+					    VLAN_HLEN);
+	if (!new_skb)
+		return -ENOMEM;
+
+	if (ipv == 4) {
+		skb_pull(new_skb, ETH_HLEN);
+	} else if (skb_vlan_tag_present(new_skb)) {
+		skb_push(new_skb, VLAN_HLEN);
+		skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
+		skb_copy_to_linear_data_offset(new_skb, 4,
+					       new_skb->data + 8, 4);
+		skb_copy_to_linear_data_offset(new_skb, 8,
+					       new_skb->data + 12, 4);
+		tag = (__be16 *)(new_skb->data + 12);
+		*tag = cpu_to_be16(ETH_P_8021Q);
+		*(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb));
+	}
+
+	/* fix hardware limitation: as long as we do not have sbal
+	 * chaining we can not send long frag lists
+	 */
+	if ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
+	    (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0))) {
+		rc = skb_linearize(new_skb);
+
+		if (card->options.performance_stats) {
+			if (rc)
+				card->perf_stats.tx_linfail++;
+			else
+				card->perf_stats.tx_lin++;
+		}
+		if (rc)
+			goto out;
+	}
+
+	if (use_tso) {
+		hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
+		memset(hdr, 0, sizeof(struct qeth_hdr_tso));
+		qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+				    new_skb->len - sizeof(struct qeth_hdr_tso));
+		qeth_tso_fill_header(card, hdr, new_skb);
+		hdr_elements++;
+	} else {
+		hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
+		qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+				    new_skb->len - sizeof(struct qeth_hdr));
+	}
+
+	elements = use_tso ?
+		   qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
+		   qeth_get_elements_no(card, new_skb, hdr_elements, 0);
+	if (!elements) {
+		rc = -E2BIG;
+		goto out;
+	}
+	elements += hdr_elements;
+
+	if (use_tso) {
+		hd_len = sizeof(struct qeth_hdr_tso) +
+			 ip_hdrlen(new_skb) + tcp_hdrlen(new_skb);
+		len = hd_len;
+	} else {
+		hd_len = 0;
+		len = sizeof(struct qeth_hdr_layer3);
+	}
+
+	if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	is_sg = skb_is_nonlinear(new_skb);
+	rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len,
+				 elements);
+out:
+	if (!rc) {
+		if (new_skb != skb)
+			dev_kfree_skb_any(skb);
+		if (card->options.performance_stats) {
+			card->perf_stats.buf_elements_sent += elements;
+			if (is_sg)
+				card->perf_stats.sg_skbs_sent++;
+			if (use_tso) {
+				card->perf_stats.large_send_bytes += tx_bytes;
+				card->perf_stats.large_send_cnt++;
+			}
+		}
+	} else {
+		if (new_skb != skb)
+			dev_kfree_skb_any(new_skb);
+	}
+	return rc;
+}
+
+static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
+					   struct net_device *dev)
+{
+	int cast_type = qeth_l3_get_cast_type(skb);
+	struct qeth_card *card = dev->ml_priv;
+	int ipv = qeth_get_ip_version(skb);
+	struct qeth_qdio_out_q *queue;
+	int tx_bytes = skb->len;
+	int rc;
+
+	if (IS_IQD(card)) {
+		if (card->options.sniffer)
+			goto tx_drop;
+		if ((card->options.cq != QETH_CQ_ENABLED && !ipv) ||
+		    (card->options.cq == QETH_CQ_ENABLED &&
+		     skb->protocol != htons(ETH_P_AF_IUCV)))
+			goto tx_drop;
+	}
+
+	if (card->state != CARD_STATE_UP || !card->lan_online) {
+		card->stats.tx_carrier_errors++;
+		goto tx_drop;
+	}
+
+	if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
+		goto tx_drop;
+
+	queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
+
+	if (card->options.performance_stats) {
+		card->perf_stats.outbound_cnt++;
+		card->perf_stats.outbound_start_time = qeth_get_micros();
+	}
+	netif_stop_queue(dev);
+
+	if (IS_IQD(card) || (!skb_is_gso(skb) && ipv == 4))
+		rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type);
+	else
+		rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
+
+	if (!rc) {
+		card->stats.tx_packets++;
+		card->stats.tx_bytes += tx_bytes;
+		if (card->options.performance_stats)
+			card->perf_stats.outbound_time += qeth_get_micros() -
+				card->perf_stats.outbound_start_time;
+		netif_wake_queue(dev);
+		return NETDEV_TX_OK;
+	} else if (rc == -EBUSY) {
+		return NETDEV_TX_BUSY;
+	} /* else fall through */
+
+tx_drop:
+	card->stats.tx_dropped++;
+	card->stats.tx_errors++;
+	dev_kfree_skb_any(skb);
+	netif_wake_queue(dev);
+	return NETDEV_TX_OK;
+}
+
+static int __qeth_l3_open(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+	int rc = 0;
+
+	QETH_CARD_TEXT(card, 4, "qethopen");
+	if (card->state == CARD_STATE_UP)
+		return rc;
+	if (card->state != CARD_STATE_SOFTSETUP)
+		return -ENODEV;
+	card->data.state = CH_STATE_UP;
+	card->state = CARD_STATE_UP;
+	netif_start_queue(dev);
+
+	if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
+		napi_enable(&card->napi);
+		napi_schedule(&card->napi);
+	} else
+		rc = -EIO;
+	return rc;
+}
+
+static int qeth_l3_open(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_CARD_TEXT(card, 5, "qethope_");
+	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+		QETH_CARD_TEXT(card, 3, "openREC");
+		return -ERESTARTSYS;
+	}
+	return __qeth_l3_open(dev);
+}
+
+static int qeth_l3_stop(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_CARD_TEXT(card, 4, "qethstop");
+	netif_tx_disable(dev);
+	if (card->state == CARD_STATE_UP) {
+		card->state = CARD_STATE_SOFTSETUP;
+		napi_disable(&card->napi);
+	}
+	return 0;
+}
+
+static const struct ethtool_ops qeth_l3_ethtool_ops = {
+	.get_link = ethtool_op_get_link,
+	.get_strings = qeth_core_get_strings,
+	.get_ethtool_stats = qeth_core_get_ethtool_stats,
+	.get_sset_count = qeth_core_get_sset_count,
+	.get_drvinfo = qeth_core_get_drvinfo,
+	.get_link_ksettings = qeth_core_ethtool_get_link_ksettings,
+};
+
+/*
+ * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
+ * NOARP on the netdevice is no option because it also turns off neighbor
+ * solicitation. For IPv4 we install a neighbor_setup function. We don't want
+ * arp resolution but we want the hard header (packet socket will work
+ * e.g. tcpdump)
+ */
+static int qeth_l3_neigh_setup_noarp(struct neighbour *n)
+{
+	n->nud_state = NUD_NOARP;
+	memcpy(n->ha, "FAKELL", 6);
+	n->output = n->ops->connected_output;
+	return 0;
+}
+
+static int
+qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
+{
+	if (np->tbl->family == AF_INET)
+		np->neigh_setup = qeth_l3_neigh_setup_noarp;
+
+	return 0;
+}
+
+static const struct net_device_ops qeth_l3_netdev_ops = {
+	.ndo_open		= qeth_l3_open,
+	.ndo_stop		= qeth_l3_stop,
+	.ndo_get_stats		= qeth_get_stats,
+	.ndo_start_xmit		= qeth_l3_hard_start_xmit,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_rx_mode	= qeth_l3_set_rx_mode,
+	.ndo_do_ioctl		= qeth_do_ioctl,
+	.ndo_fix_features	= qeth_fix_features,
+	.ndo_set_features	= qeth_set_features,
+	.ndo_vlan_rx_add_vid	= qeth_l3_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid   = qeth_l3_vlan_rx_kill_vid,
+	.ndo_tx_timeout		= qeth_tx_timeout,
+};
+
+static const struct net_device_ops qeth_l3_osa_netdev_ops = {
+	.ndo_open		= qeth_l3_open,
+	.ndo_stop		= qeth_l3_stop,
+	.ndo_get_stats		= qeth_get_stats,
+	.ndo_start_xmit		= qeth_l3_hard_start_xmit,
+	.ndo_features_check	= qeth_features_check,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_rx_mode	= qeth_l3_set_rx_mode,
+	.ndo_do_ioctl		= qeth_do_ioctl,
+	.ndo_fix_features	= qeth_fix_features,
+	.ndo_set_features	= qeth_set_features,
+	.ndo_vlan_rx_add_vid	= qeth_l3_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid   = qeth_l3_vlan_rx_kill_vid,
+	.ndo_tx_timeout		= qeth_tx_timeout,
+	.ndo_neigh_setup	= qeth_l3_neigh_setup,
+};
+
+static int qeth_l3_setup_netdev(struct qeth_card *card)
+{
+	int rc;
+
+	if (qeth_netdev_is_registered(card->dev))
+		return 0;
+
+	if (card->info.type == QETH_CARD_TYPE_OSD ||
+	    card->info.type == QETH_CARD_TYPE_OSX) {
+		if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
+		    (card->info.link_type == QETH_LINK_TYPE_HSTR)) {
+			pr_info("qeth_l3: ignoring TR device\n");
+			return -ENODEV;
+		}
+
+		card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
+
+		/*IPv6 address autoconfiguration stuff*/
+		qeth_l3_get_unique_id(card);
+		if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
+			card->dev->dev_id = card->info.unique_id & 0xffff;
+
+		if (!card->info.guestlan) {
+			card->dev->features |= NETIF_F_SG;
+			card->dev->hw_features |= NETIF_F_TSO |
+				NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
+			card->dev->vlan_features |= NETIF_F_TSO |
+				NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
+		}
+
+		if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
+			card->dev->hw_features |= NETIF_F_IPV6_CSUM;
+			card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
+		}
+	} else if (card->info.type == QETH_CARD_TYPE_IQD) {
+		card->dev->flags |= IFF_NOARP;
+		card->dev->netdev_ops = &qeth_l3_netdev_ops;
+
+		rc = qeth_l3_iqd_read_initial_mac(card);
+		if (rc)
+			goto out;
+
+		if (card->options.hsuid[0])
+			memcpy(card->dev->perm_addr, card->options.hsuid, 9);
+	} else
+		return -ENODEV;
+
+	card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
+	card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
+	card->dev->features |=	NETIF_F_HW_VLAN_CTAG_TX |
+				NETIF_F_HW_VLAN_CTAG_RX |
+				NETIF_F_HW_VLAN_CTAG_FILTER;
+
+	netif_keep_dst(card->dev);
+	if (card->dev->hw_features & NETIF_F_TSO)
+		netif_set_gso_max_size(card->dev,
+				       PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
+
+	netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
+	rc = register_netdev(card->dev);
+out:
+	if (rc)
+		card->dev->netdev_ops = NULL;
+	return rc;
+}
+
+static const struct device_type qeth_l3_devtype = {
+	.name = "qeth_layer3",
+	.groups = qeth_l3_attr_groups,
+};
+
+static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc;
+
+	if (gdev->dev.type == &qeth_generic_devtype) {
+		rc = qeth_l3_create_device_attributes(&gdev->dev);
+		if (rc)
+			return rc;
+	}
+	hash_init(card->ip_htable);
+	hash_init(card->ip_mc_htable);
+	card->options.layer2 = 0;
+	card->info.hwtrap = 0;
+	return 0;
+}
+
+static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
+
+	if (cgdev->dev.type == &qeth_generic_devtype)
+		qeth_l3_remove_device_attributes(&cgdev->dev);
+
+	qeth_set_allowed_threads(card, 0, 1);
+	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+
+	if (cgdev->state == CCWGROUP_ONLINE)
+		qeth_l3_set_offline(cgdev);
+
+	if (qeth_netdev_is_registered(card->dev))
+		unregister_netdev(card->dev);
+	qeth_l3_clear_ip_htable(card, 0);
+	qeth_l3_clear_ipato_list(card);
+}
+
+static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc = 0;
+	enum qeth_card_states recover_flag;
+
+	mutex_lock(&card->discipline_mutex);
+	mutex_lock(&card->conf_mutex);
+	QETH_DBF_TEXT(SETUP, 2, "setonlin");
+	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+
+	recover_flag = card->state;
+	rc = qeth_core_hardsetup_card(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
+		rc = -ENODEV;
+		goto out_remove;
+	}
+
+	rc = qeth_l3_setup_netdev(card);
+	if (rc)
+		goto out_remove;
+
+	if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
+		if (card->info.hwtrap &&
+		    qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
+			card->info.hwtrap = 0;
+	} else
+		card->info.hwtrap = 0;
+
+	card->state = CARD_STATE_HARDSETUP;
+	qeth_print_status_message(card);
+
+	/* softsetup */
+	QETH_DBF_TEXT(SETUP, 2, "softsetp");
+
+	rc = qeth_l3_setadapter_parms(card);
+	if (rc)
+		QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
+	if (!card->options.sniffer) {
+		rc = qeth_l3_start_ipassists(card);
+		if (rc) {
+			QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+			goto out_remove;
+		}
+		rc = qeth_l3_setrouting_v4(card);
+		if (rc)
+			QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc);
+		rc = qeth_l3_setrouting_v6(card);
+		if (rc)
+			QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
+	}
+	netif_tx_disable(card->dev);
+
+	rc = qeth_init_qdio_queues(card);
+	if (rc) {
+		QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+		rc = -ENODEV;
+		goto out_remove;
+	}
+	card->state = CARD_STATE_SOFTSETUP;
+
+	qeth_set_allowed_threads(card, 0xffffffff, 0);
+	qeth_l3_recover_ip(card);
+	if (card->lan_online)
+		netif_carrier_on(card->dev);
+	else
+		netif_carrier_off(card->dev);
+
+	qeth_enable_hw_features(card->dev);
+	if (recover_flag == CARD_STATE_RECOVER) {
+		rtnl_lock();
+		if (recovery_mode) {
+			__qeth_l3_open(card->dev);
+			qeth_l3_set_rx_mode(card->dev);
+		} else {
+			dev_open(card->dev);
+		}
+		rtnl_unlock();
+	}
+	qeth_trace_features(card);
+	/* let user_space know that device is online */
+	kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
+	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
+	return 0;
+out_remove:
+	qeth_l3_stop_card(card, 0);
+	ccw_device_set_offline(CARD_DDEV(card));
+	ccw_device_set_offline(CARD_WDEV(card));
+	ccw_device_set_offline(CARD_RDEV(card));
+	qdio_free(CARD_DDEV(card));
+	if (recover_flag == CARD_STATE_RECOVER)
+		card->state = CARD_STATE_RECOVER;
+	else
+		card->state = CARD_STATE_DOWN;
+	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
+	return rc;
+}
+
+static int qeth_l3_set_online(struct ccwgroup_device *gdev)
+{
+	return __qeth_l3_set_online(gdev, 0);
+}
+
+static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
+			int recovery_mode)
+{
+	struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
+	int rc = 0, rc2 = 0, rc3 = 0;
+	enum qeth_card_states recover_flag;
+
+	mutex_lock(&card->discipline_mutex);
+	mutex_lock(&card->conf_mutex);
+	QETH_DBF_TEXT(SETUP, 3, "setoffl");
+	QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
+
+	netif_carrier_off(card->dev);
+	recover_flag = card->state;
+	if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
+		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+		card->info.hwtrap = 1;
+	}
+	qeth_l3_stop_card(card, recovery_mode);
+	if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) {
+		rtnl_lock();
+		call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
+		rtnl_unlock();
+	}
+	rc  = ccw_device_set_offline(CARD_DDEV(card));
+	rc2 = ccw_device_set_offline(CARD_WDEV(card));
+	rc3 = ccw_device_set_offline(CARD_RDEV(card));
+	if (!rc)
+		rc = (rc2) ? rc2 : rc3;
+	if (rc)
+		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+	qdio_free(CARD_DDEV(card));
+	if (recover_flag == CARD_STATE_UP)
+		card->state = CARD_STATE_RECOVER;
+	/* let user_space know that device is offline */
+	kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
+	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->discipline_mutex);
+	return 0;
+}
+
+static int qeth_l3_set_offline(struct ccwgroup_device *cgdev)
+{
+	return __qeth_l3_set_offline(cgdev, 0);
+}
+
+static int qeth_l3_recover(void *ptr)
+{
+	struct qeth_card *card;
+	int rc = 0;
+
+	card = (struct qeth_card *) ptr;
+	QETH_CARD_TEXT(card, 2, "recover1");
+	QETH_CARD_HEX(card, 2, &card, sizeof(void *));
+	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
+		return 0;
+	QETH_CARD_TEXT(card, 2, "recover2");
+	dev_warn(&card->gdev->dev,
+		"A recovery process has been started for the device\n");
+	qeth_set_recovery_task(card);
+	__qeth_l3_set_offline(card->gdev, 1);
+	rc = __qeth_l3_set_online(card->gdev, 1);
+	if (!rc)
+		dev_info(&card->gdev->dev,
+			"Device successfully recovered!\n");
+	else {
+		qeth_close_dev(card);
+		dev_warn(&card->gdev->dev, "The qeth device driver "
+				"failed to recover an error on the device\n");
+	}
+	qeth_clear_recovery_task(card);
+	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
+	return 0;
+}
+
+static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
+	netif_device_detach(card->dev);
+	qeth_set_allowed_threads(card, 0, 1);
+	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+	if (gdev->state == CCWGROUP_OFFLINE)
+		return 0;
+	if (card->state == CARD_STATE_UP) {
+		if (card->info.hwtrap)
+			qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+		__qeth_l3_set_offline(card->gdev, 1);
+	} else
+		__qeth_l3_set_offline(card->gdev, 0);
+	return 0;
+}
+
+static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc = 0;
+
+	if (gdev->state == CCWGROUP_OFFLINE)
+		goto out;
+
+	if (card->state == CARD_STATE_RECOVER) {
+		rc = __qeth_l3_set_online(card->gdev, 1);
+		if (rc) {
+			rtnl_lock();
+			dev_close(card->dev);
+			rtnl_unlock();
+		}
+	} else
+		rc = __qeth_l3_set_online(card->gdev, 0);
+out:
+	qeth_set_allowed_threads(card, 0xffffffff, 0);
+	netif_device_attach(card->dev);
+	if (rc)
+		dev_warn(&card->gdev->dev, "The qeth device driver "
+			"failed to recover an error on the device\n");
+	return rc;
+}
+
+/* Returns zero if the command is successfully "consumed" */
+static int qeth_l3_control_event(struct qeth_card *card,
+					struct qeth_ipa_cmd *cmd)
+{
+	return 1;
+}
+
+struct qeth_discipline qeth_l3_discipline = {
+	.devtype = &qeth_l3_devtype,
+	.process_rx_buffer = qeth_l3_process_inbound_buffer,
+	.recover = qeth_l3_recover,
+	.setup = qeth_l3_probe_device,
+	.remove = qeth_l3_remove_device,
+	.set_online = qeth_l3_set_online,
+	.set_offline = qeth_l3_set_offline,
+	.freeze = qeth_l3_pm_suspend,
+	.thaw = qeth_l3_pm_resume,
+	.restore = qeth_l3_pm_resume,
+	.do_ioctl = qeth_l3_do_ioctl,
+	.control_event_handler = qeth_l3_control_event,
+};
+EXPORT_SYMBOL_GPL(qeth_l3_discipline);
+
+static int qeth_l3_handle_ip_event(struct qeth_card *card,
+				   struct qeth_ipaddr *addr,
+				   unsigned long event)
+{
+	switch (event) {
+	case NETDEV_UP:
+		spin_lock_bh(&card->ip_lock);
+		qeth_l3_add_ip(card, addr);
+		spin_unlock_bh(&card->ip_lock);
+		return NOTIFY_OK;
+	case NETDEV_DOWN:
+		spin_lock_bh(&card->ip_lock);
+		qeth_l3_delete_ip(card, addr);
+		spin_unlock_bh(&card->ip_lock);
+		return NOTIFY_OK;
+	default:
+		return NOTIFY_DONE;
+	}
+}
+
+static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
+{
+	if (is_vlan_dev(dev))
+		dev = vlan_dev_real_dev(dev);
+	if (dev->netdev_ops == &qeth_l3_osa_netdev_ops ||
+	    dev->netdev_ops == &qeth_l3_netdev_ops)
+		return (struct qeth_card *) dev->ml_priv;
+	return NULL;
+}
+
+static int qeth_l3_ip_event(struct notifier_block *this,
+			    unsigned long event, void *ptr)
+{
+
+	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+	struct net_device *dev = ifa->ifa_dev->dev;
+	struct qeth_ipaddr addr;
+	struct qeth_card *card;
+
+	if (dev_net(dev) != &init_net)
+		return NOTIFY_DONE;
+
+	card = qeth_l3_get_card_from_dev(dev);
+	if (!card)
+		return NOTIFY_DONE;
+	QETH_CARD_TEXT(card, 3, "ipevent");
+
+	qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
+	addr.u.a4.addr = be32_to_cpu(ifa->ifa_address);
+	addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask);
+
+	return qeth_l3_handle_ip_event(card, &addr, event);
+}
+
+static struct notifier_block qeth_l3_ip_notifier = {
+	qeth_l3_ip_event,
+	NULL,
+};
+
+static int qeth_l3_ip6_event(struct notifier_block *this,
+			     unsigned long event, void *ptr)
+{
+	struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
+	struct net_device *dev = ifa->idev->dev;
+	struct qeth_ipaddr addr;
+	struct qeth_card *card;
+
+	card = qeth_l3_get_card_from_dev(dev);
+	if (!card)
+		return NOTIFY_DONE;
+	QETH_CARD_TEXT(card, 3, "ip6event");
+	if (!qeth_is_supported(card, IPA_IPV6))
+		return NOTIFY_DONE;
+
+	qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
+	addr.u.a6.addr = ifa->addr;
+	addr.u.a6.pfxlen = ifa->prefix_len;
+
+	return qeth_l3_handle_ip_event(card, &addr, event);
+}
+
+static struct notifier_block qeth_l3_ip6_notifier = {
+	qeth_l3_ip6_event,
+	NULL,
+};
+
+static int qeth_l3_register_notifiers(void)
+{
+	int rc;
+
+	QETH_DBF_TEXT(SETUP, 5, "regnotif");
+	rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
+	if (rc)
+		return rc;
+	rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
+	if (rc) {
+		unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
+		return rc;
+	}
+	return 0;
+}
+
+static void qeth_l3_unregister_notifiers(void)
+{
+	QETH_DBF_TEXT(SETUP, 5, "unregnot");
+	WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
+	WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
+}
+
+static int __init qeth_l3_init(void)
+{
+	pr_info("register layer 3 discipline\n");
+	return qeth_l3_register_notifiers();
+}
+
+static void __exit qeth_l3_exit(void)
+{
+	qeth_l3_unregister_notifiers();
+	pr_info("unregister layer 3 discipline\n");
+}
+
+module_init(qeth_l3_init);
+module_exit(qeth_l3_exit);
+MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
+MODULE_DESCRIPTION("qeth layer 3 discipline");
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
new file mode 100644
index 0000000..45ac6d8
--- /dev/null
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -0,0 +1,1007 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    Copyright IBM Corp. 2007
+ *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+ *		 Frank Pavlic <fpavlic@de.ibm.com>,
+ *		 Thomas Spatzier <tspat@de.ibm.com>,
+ *		 Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#include <linux/slab.h>
+#include <asm/ebcdic.h>
+#include <linux/hashtable.h>
+#include <linux/inet.h>
+#include "qeth_l3.h"
+
+#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
+struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
+
+static int qeth_l3_string_to_ipaddr(const char *buf,
+				    enum qeth_prot_versions proto, u8 *addr)
+{
+	const char *end;
+
+	if ((proto == QETH_PROT_IPV4 && !in4_pton(buf, -1, addr, -1, &end)) ||
+	    (proto == QETH_PROT_IPV6 && !in6_pton(buf, -1, addr, -1, &end)))
+		return -EINVAL;
+	return 0;
+}
+
+static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
+			struct qeth_routing_info *route, char *buf)
+{
+	switch (route->type) {
+	case PRIMARY_ROUTER:
+		return sprintf(buf, "%s\n", "primary router");
+	case SECONDARY_ROUTER:
+		return sprintf(buf, "%s\n", "secondary router");
+	case MULTICAST_ROUTER:
+		if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
+			return sprintf(buf, "%s\n", "multicast router+");
+		else
+			return sprintf(buf, "%s\n", "multicast router");
+	case PRIMARY_CONNECTOR:
+		if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
+			return sprintf(buf, "%s\n", "primary connector+");
+		else
+			return sprintf(buf, "%s\n", "primary connector");
+	case SECONDARY_CONNECTOR:
+		if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
+			return sprintf(buf, "%s\n", "secondary connector+");
+		else
+			return sprintf(buf, "%s\n", "secondary connector");
+	default:
+		return sprintf(buf, "%s\n", "no");
+	}
+}
+
+static ssize_t qeth_l3_dev_route4_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_route_show(card, &card->options.route4, buf);
+}
+
+static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
+		struct qeth_routing_info *route, enum qeth_prot_versions prot,
+		const char *buf, size_t count)
+{
+	enum qeth_routing_types old_route_type = route->type;
+	int rc = 0;
+
+	mutex_lock(&card->conf_mutex);
+	if (sysfs_streq(buf, "no_router")) {
+		route->type = NO_ROUTER;
+	} else if (sysfs_streq(buf, "primary_connector")) {
+		route->type = PRIMARY_CONNECTOR;
+	} else if (sysfs_streq(buf, "secondary_connector")) {
+		route->type = SECONDARY_CONNECTOR;
+	} else if (sysfs_streq(buf, "primary_router")) {
+		route->type = PRIMARY_ROUTER;
+	} else if (sysfs_streq(buf, "secondary_router")) {
+		route->type = SECONDARY_ROUTER;
+	} else if (sysfs_streq(buf, "multicast_router")) {
+		route->type = MULTICAST_ROUTER;
+	} else {
+		rc = -EINVAL;
+		goto out;
+	}
+	if (qeth_card_hw_is_reachable(card) &&
+	    (old_route_type != route->type)) {
+		if (prot == QETH_PROT_IPV4)
+			rc = qeth_l3_setrouting_v4(card);
+		else if (prot == QETH_PROT_IPV6)
+			rc = qeth_l3_setrouting_v6(card);
+	}
+out:
+	if (rc)
+		route->type = old_route_type;
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static ssize_t qeth_l3_dev_route4_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_route_store(card, &card->options.route4,
+				QETH_PROT_IPV4, buf, count);
+}
+
+static DEVICE_ATTR(route4, 0644, qeth_l3_dev_route4_show,
+			qeth_l3_dev_route4_store);
+
+static ssize_t qeth_l3_dev_route6_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_route_show(card, &card->options.route6, buf);
+}
+
+static ssize_t qeth_l3_dev_route6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_route_store(card, &card->options.route6,
+				QETH_PROT_IPV6, buf, count);
+}
+
+static DEVICE_ATTR(route6, 0644, qeth_l3_dev_route6_show,
+			qeth_l3_dev_route6_store);
+
+static ssize_t qeth_l3_dev_fake_broadcast_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
+}
+
+static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int i, rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	i = simple_strtoul(buf, &tmp, 16);
+	if ((i == 0) || (i == 1))
+		card->options.fake_broadcast = i;
+	else
+		rc = -EINVAL;
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show,
+		   qeth_l3_dev_fake_broadcast_store);
+
+static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0);
+}
+
+static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	int rc = 0;
+	unsigned long i;
+
+	if (!card)
+		return -EINVAL;
+
+	if (card->info.type != QETH_CARD_TYPE_IQD)
+		return -EPERM;
+	if (card->options.cq == QETH_CQ_ENABLED)
+		return -EPERM;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	rc = kstrtoul(buf, 16, &i);
+	if (rc) {
+		rc = -EINVAL;
+		goto out;
+	}
+	switch (i) {
+	case 0:
+		card->options.sniffer = i;
+		break;
+	case 1:
+		qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
+		if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) {
+			card->options.sniffer = i;
+			if (card->qdio.init_pool.buf_count !=
+					QETH_IN_BUF_COUNT_MAX)
+				qeth_realloc_buffer_pool(card,
+					QETH_IN_BUF_COUNT_MAX);
+		} else
+			rc = -EPERM;
+		break;
+	default:
+		rc = -EINVAL;
+	}
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
+		qeth_l3_dev_sniffer_store);
+
+
+static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char tmp_hsuid[9];
+
+	if (!card)
+		return -EINVAL;
+
+	if (card->info.type != QETH_CARD_TYPE_IQD)
+		return -EPERM;
+
+	memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid));
+	EBCASC(tmp_hsuid, 8);
+	return sprintf(buf, "%s\n", tmp_hsuid);
+}
+
+static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	char *tmp;
+	int rc;
+
+	if (!card)
+		return -EINVAL;
+
+	if (card->info.type != QETH_CARD_TYPE_IQD)
+		return -EPERM;
+	if (card->state != CARD_STATE_DOWN &&
+	    card->state != CARD_STATE_RECOVER)
+		return -EPERM;
+	if (card->options.sniffer)
+		return -EPERM;
+	if (card->options.cq == QETH_CQ_NOTAVAILABLE)
+		return -EPERM;
+
+	tmp = strsep((char **)&buf, "\n");
+	if (strlen(tmp) > 8)
+		return -EINVAL;
+
+	if (card->options.hsuid[0])
+		/* delete old ip address */
+		qeth_l3_modify_hsuid(card, false);
+
+	if (strlen(tmp) == 0) {
+		/* delete ip address only */
+		card->options.hsuid[0] = '\0';
+		memcpy(card->dev->perm_addr, card->options.hsuid, 9);
+		qeth_configure_cq(card, QETH_CQ_DISABLED);
+		return count;
+	}
+
+	if (qeth_configure_cq(card, QETH_CQ_ENABLED))
+		return -EPERM;
+
+	snprintf(card->options.hsuid, sizeof(card->options.hsuid),
+		 "%-8s", tmp);
+	ASCEBC(card->options.hsuid, 8);
+	memcpy(card->dev->perm_addr, card->options.hsuid, 9);
+
+	rc = qeth_l3_modify_hsuid(card, true);
+
+	return rc ? rc : count;
+}
+
+static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show,
+		   qeth_l3_dev_hsuid_store);
+
+
+static struct attribute *qeth_l3_device_attrs[] = {
+	&dev_attr_route4.attr,
+	&dev_attr_route6.attr,
+	&dev_attr_fake_broadcast.attr,
+	&dev_attr_sniffer.attr,
+	&dev_attr_hsuid.attr,
+	NULL,
+};
+
+static const struct attribute_group qeth_l3_device_attr_group = {
+	.attrs = qeth_l3_device_attrs,
+};
+
+static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
+}
+
+static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	bool enable;
+	int rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if ((card->state != CARD_STATE_DOWN) &&
+	    (card->state != CARD_STATE_RECOVER)) {
+		rc = -EPERM;
+		goto out;
+	}
+
+	if (sysfs_streq(buf, "toggle")) {
+		enable = !card->ipato.enabled;
+	} else if (kstrtobool(buf, &enable)) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (card->ipato.enabled != enable) {
+		card->ipato.enabled = enable;
+		spin_lock_bh(&card->ip_lock);
+		qeth_l3_update_ipato(card);
+		spin_unlock_bh(&card->ip_lock);
+	}
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static QETH_DEVICE_ATTR(ipato_enable, enable, 0644,
+			qeth_l3_dev_ipato_enable_show,
+			qeth_l3_dev_ipato_enable_store);
+
+static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
+}
+
+static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	bool invert;
+	int rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if (sysfs_streq(buf, "toggle")) {
+		invert = !card->ipato.invert4;
+	} else if (kstrtobool(buf, &invert)) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (card->ipato.invert4 != invert) {
+		card->ipato.invert4 = invert;
+		spin_lock_bh(&card->ip_lock);
+		qeth_l3_update_ipato(card);
+		spin_unlock_bh(&card->ip_lock);
+	}
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
+			qeth_l3_dev_ipato_invert4_show,
+			qeth_l3_dev_ipato_invert4_store);
+
+static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
+			enum qeth_prot_versions proto)
+{
+	struct qeth_ipato_entry *ipatoe;
+	char addr_str[40];
+	int entry_len; /* length of 1 entry string, differs between v4 and v6 */
+	int i = 0;
+
+	entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
+	/* add strlen for "/<mask>\n" */
+	entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
+	spin_lock_bh(&card->ip_lock);
+	list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
+		if (ipatoe->proto != proto)
+			continue;
+		/* String must not be longer than PAGE_SIZE. So we check if
+		 * string length gets near PAGE_SIZE. Then we can savely display
+		 * the next IPv6 address (worst case, compared to IPv4) */
+		if ((PAGE_SIZE - i) <= entry_len)
+			break;
+		qeth_l3_ipaddr_to_string(proto, ipatoe->addr, addr_str);
+		i += snprintf(buf + i, PAGE_SIZE - i,
+			      "%s/%i\n", addr_str, ipatoe->mask_bits);
+	}
+	spin_unlock_bh(&card->ip_lock);
+	i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+
+	return i;
+}
+
+static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
+}
+
+static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto,
+		  u8 *addr, int *mask_bits)
+{
+	const char *start, *end;
+	char *tmp;
+	char buffer[40] = {0, };
+
+	start = buf;
+	/* get address string */
+	end = strchr(start, '/');
+	if (!end || (end - start >= 40)) {
+		return -EINVAL;
+	}
+	strncpy(buffer, start, end - start);
+	if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) {
+		return -EINVAL;
+	}
+	start = end + 1;
+	*mask_bits = simple_strtoul(start, &tmp, 10);
+	if (!strlen(start) ||
+	    (tmp == start) ||
+	    (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) {
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
+			 struct qeth_card *card, enum qeth_prot_versions proto)
+{
+	struct qeth_ipato_entry *ipatoe;
+	u8 addr[16];
+	int mask_bits;
+	int rc = 0;
+
+	mutex_lock(&card->conf_mutex);
+	rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
+	if (rc)
+		goto out;
+
+	ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL);
+	if (!ipatoe) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	ipatoe->proto = proto;
+	memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
+	ipatoe->mask_bits = mask_bits;
+
+	rc = qeth_l3_add_ipato_entry(card, ipatoe);
+	if (rc)
+		kfree(ipatoe);
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
+			qeth_l3_dev_ipato_add4_show,
+			qeth_l3_dev_ipato_add4_store);
+
+static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
+			 struct qeth_card *card, enum qeth_prot_versions proto)
+{
+	u8 addr[16];
+	int mask_bits;
+	int rc = 0;
+
+	mutex_lock(&card->conf_mutex);
+	rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
+	if (!rc)
+		rc = qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(ipato_del4, del4, 0200, NULL,
+			qeth_l3_dev_ipato_del4_store);
+
+static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
+}
+
+static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	bool invert;
+	int rc = 0;
+
+	if (!card)
+		return -EINVAL;
+
+	mutex_lock(&card->conf_mutex);
+	if (sysfs_streq(buf, "toggle")) {
+		invert = !card->ipato.invert6;
+	} else if (kstrtobool(buf, &invert)) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (card->ipato.invert6 != invert) {
+		card->ipato.invert6 = invert;
+		spin_lock_bh(&card->ip_lock);
+		qeth_l3_update_ipato(card);
+		spin_unlock_bh(&card->ip_lock);
+	}
+out:
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644,
+			qeth_l3_dev_ipato_invert6_show,
+			qeth_l3_dev_ipato_invert6_store);
+
+
+static ssize_t qeth_l3_dev_ipato_add6_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV6);
+}
+
+static ssize_t qeth_l3_dev_ipato_add6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(ipato_add6, add6, 0644,
+			qeth_l3_dev_ipato_add6_show,
+			qeth_l3_dev_ipato_add6_store);
+
+static ssize_t qeth_l3_dev_ipato_del6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(ipato_del6, del6, 0200, NULL,
+			qeth_l3_dev_ipato_del6_store);
+
+static struct attribute *qeth_ipato_device_attrs[] = {
+	&dev_attr_ipato_enable.attr,
+	&dev_attr_ipato_invert4.attr,
+	&dev_attr_ipato_add4.attr,
+	&dev_attr_ipato_del4.attr,
+	&dev_attr_ipato_invert6.attr,
+	&dev_attr_ipato_add6.attr,
+	&dev_attr_ipato_del6.attr,
+	NULL,
+};
+
+static const struct attribute_group qeth_device_ipato_group = {
+	.name = "ipa_takeover",
+	.attrs = qeth_ipato_device_attrs,
+};
+
+static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
+				       enum qeth_prot_versions proto,
+				       enum qeth_ip_types type)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+	struct qeth_ipaddr *ipaddr;
+	char addr_str[40];
+	int str_len = 0;
+	int entry_len; /* length of 1 entry string, differs between v4 and v6 */
+	int i;
+
+	if (!card)
+		return -EINVAL;
+
+	entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
+	entry_len += 2; /* \n + terminator */
+	spin_lock_bh(&card->ip_lock);
+	hash_for_each(card->ip_htable, i, ipaddr, hnode) {
+		if (ipaddr->proto != proto || ipaddr->type != type)
+			continue;
+		/* String must not be longer than PAGE_SIZE. So we check if
+		 * string length gets near PAGE_SIZE. Then we can savely display
+		 * the next IPv6 address (worst case, compared to IPv4) */
+		if ((PAGE_SIZE - str_len) <= entry_len)
+			break;
+		qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
+			addr_str);
+		str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
+				    addr_str);
+	}
+	spin_unlock_bh(&card->ip_lock);
+	str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
+
+	return str_len;
+}
+
+static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4,
+				       QETH_IP_TYPE_VIPA);
+}
+
+static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto,
+		 u8 *addr)
+{
+	if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count,
+			struct qeth_card *card, enum qeth_prot_versions proto)
+{
+	u8 addr[16] = {0, };
+	int rc;
+
+	mutex_lock(&card->conf_mutex);
+	rc = qeth_l3_parse_vipae(buf, proto, addr);
+	if (!rc)
+		rc = qeth_l3_modify_rxip_vipa(card, true, addr,
+					      QETH_IP_TYPE_VIPA, proto);
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
+			qeth_l3_dev_vipa_add4_show,
+			qeth_l3_dev_vipa_add4_store);
+
+static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
+			 struct qeth_card *card, enum qeth_prot_versions proto)
+{
+	u8 addr[16];
+	int rc;
+
+	mutex_lock(&card->conf_mutex);
+	rc = qeth_l3_parse_vipae(buf, proto, addr);
+	if (!rc)
+		rc = qeth_l3_modify_rxip_vipa(card, false, addr,
+					      QETH_IP_TYPE_VIPA, proto);
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
+			qeth_l3_dev_vipa_del4_store);
+
+static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6,
+				       QETH_IP_TYPE_VIPA);
+}
+
+static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
+			qeth_l3_dev_vipa_add6_show,
+			qeth_l3_dev_vipa_add6_store);
+
+static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL,
+			qeth_l3_dev_vipa_del6_store);
+
+static struct attribute *qeth_vipa_device_attrs[] = {
+	&dev_attr_vipa_add4.attr,
+	&dev_attr_vipa_del4.attr,
+	&dev_attr_vipa_add6.attr,
+	&dev_attr_vipa_del6.attr,
+	NULL,
+};
+
+static const struct attribute_group qeth_device_vipa_group = {
+	.name = "vipa",
+	.attrs = qeth_vipa_device_attrs,
+};
+
+static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4,
+				       QETH_IP_TYPE_RXIP);
+}
+
+static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
+		 u8 *addr)
+{
+	__be32 ipv4_addr;
+	struct in6_addr ipv6_addr;
+
+	if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
+		return -EINVAL;
+	}
+	if (proto == QETH_PROT_IPV4) {
+		memcpy(&ipv4_addr, addr, sizeof(ipv4_addr));
+		if (ipv4_is_multicast(ipv4_addr)) {
+			QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n");
+			return -EINVAL;
+		}
+	} else if (proto == QETH_PROT_IPV6) {
+		memcpy(&ipv6_addr, addr, sizeof(ipv6_addr));
+		if (ipv6_addr_is_multicast(&ipv6_addr)) {
+			QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count,
+			struct qeth_card *card, enum qeth_prot_versions proto)
+{
+	u8 addr[16] = {0, };
+	int rc;
+
+	mutex_lock(&card->conf_mutex);
+	rc = qeth_l3_parse_rxipe(buf, proto, addr);
+	if (!rc)
+		rc = qeth_l3_modify_rxip_vipa(card, true, addr,
+					      QETH_IP_TYPE_RXIP, proto);
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
+			qeth_l3_dev_rxip_add4_show,
+			qeth_l3_dev_rxip_add4_store);
+
+static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
+			struct qeth_card *card, enum qeth_prot_versions proto)
+{
+	u8 addr[16];
+	int rc;
+
+	mutex_lock(&card->conf_mutex);
+	rc = qeth_l3_parse_rxipe(buf, proto, addr);
+	if (!rc)
+		rc = qeth_l3_modify_rxip_vipa(card, false, addr,
+					      QETH_IP_TYPE_RXIP, proto);
+	mutex_unlock(&card->conf_mutex);
+	return rc ? rc : count;
+}
+
+static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
+}
+
+static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
+			qeth_l3_dev_rxip_del4_store);
+
+static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6,
+				       QETH_IP_TYPE_RXIP);
+}
+
+static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
+			qeth_l3_dev_rxip_add6_show,
+			qeth_l3_dev_rxip_add6_store);
+
+static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qeth_card *card = dev_get_drvdata(dev);
+
+	if (!card)
+		return -EINVAL;
+
+	return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
+}
+
+static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL,
+			qeth_l3_dev_rxip_del6_store);
+
+static struct attribute *qeth_rxip_device_attrs[] = {
+	&dev_attr_rxip_add4.attr,
+	&dev_attr_rxip_del4.attr,
+	&dev_attr_rxip_add6.attr,
+	&dev_attr_rxip_del6.attr,
+	NULL,
+};
+
+static const struct attribute_group qeth_device_rxip_group = {
+	.name = "rxip",
+	.attrs = qeth_rxip_device_attrs,
+};
+
+static const struct attribute_group *qeth_l3_only_attr_groups[] = {
+	&qeth_l3_device_attr_group,
+	&qeth_device_ipato_group,
+	&qeth_device_vipa_group,
+	&qeth_device_rxip_group,
+	NULL,
+};
+
+int qeth_l3_create_device_attributes(struct device *dev)
+{
+	return sysfs_create_groups(&dev->kobj, qeth_l3_only_attr_groups);
+}
+
+void qeth_l3_remove_device_attributes(struct device *dev)
+{
+	sysfs_remove_groups(&dev->kobj, qeth_l3_only_attr_groups);
+}
+
+const struct attribute_group *qeth_l3_attr_groups[] = {
+	&qeth_device_attr_group,
+	&qeth_device_blkt_group,
+	/* l3 specific, see qeth_l3_only_attr_groups: */
+	&qeth_l3_device_attr_group,
+	&qeth_device_ipato_group,
+	&qeth_device_vipa_group,
+	&qeth_device_rxip_group,
+	NULL,
+};
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
new file mode 100644
index 0000000..066b5c3
--- /dev/null
+++ b/drivers/s390/net/smsgiucv.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * IUCV special message driver
+ *
+ * Copyright IBM Corp. 2003, 2009
+ *
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <net/iucv/iucv.h>
+#include <asm/cpcmd.h>
+#include <asm/ebcdic.h>
+#include "smsgiucv.h"
+
+struct smsg_callback {
+	struct list_head list;
+	const char *prefix;
+	int len;
+	void (*callback)(const char *from, char *str);
+};
+
+MODULE_AUTHOR
+   ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)");
+MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
+
+static struct iucv_path *smsg_path;
+/* dummy device used as trigger for PM functions */
+static struct device *smsg_dev;
+
+static DEFINE_SPINLOCK(smsg_list_lock);
+static LIST_HEAD(smsg_list);
+static int iucv_path_connected;
+
+static int smsg_path_pending(struct iucv_path *, u8 *, u8 *);
+static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
+
+static struct iucv_handler smsg_handler = {
+	.path_pending	 = smsg_path_pending,
+	.message_pending = smsg_message_pending,
+};
+
+static int smsg_path_pending(struct iucv_path *path, u8 *ipvmid, u8 *ipuser)
+{
+	if (strncmp(ipvmid, "*MSG    ", 8) != 0)
+		return -EINVAL;
+	/* Path pending from *MSG. */
+	return iucv_path_accept(path, &smsg_handler, "SMSGIUCV        ", NULL);
+}
+
+static void smsg_message_pending(struct iucv_path *path,
+				 struct iucv_message *msg)
+{
+	struct smsg_callback *cb;
+	unsigned char *buffer;
+	unsigned char sender[9];
+	int rc, i;
+
+	buffer = kmalloc(msg->length + 1, GFP_ATOMIC | GFP_DMA);
+	if (!buffer) {
+		iucv_message_reject(path, msg);
+		return;
+	}
+	rc = iucv_message_receive(path, msg, 0, buffer, msg->length, NULL);
+	if (rc == 0) {
+		buffer[msg->length] = 0;
+		EBCASC(buffer, msg->length);
+		memcpy(sender, buffer, 8);
+		sender[8] = 0;
+		/* Remove trailing whitespace from the sender name. */
+		for (i = 7; i >= 0; i--) {
+			if (sender[i] != ' ' && sender[i] != '\t')
+				break;
+			sender[i] = 0;
+		}
+		spin_lock(&smsg_list_lock);
+		list_for_each_entry(cb, &smsg_list, list)
+			if (strncmp(buffer + 8, cb->prefix, cb->len) == 0) {
+				cb->callback(sender, buffer + 8);
+				break;
+			}
+		spin_unlock(&smsg_list_lock);
+	}
+	kfree(buffer);
+}
+
+int smsg_register_callback(const char *prefix,
+			   void (*callback)(const char *from, char *str))
+{
+	struct smsg_callback *cb;
+
+	cb = kmalloc(sizeof(struct smsg_callback), GFP_KERNEL);
+	if (!cb)
+		return -ENOMEM;
+	cb->prefix = prefix;
+	cb->len = strlen(prefix);
+	cb->callback = callback;
+	spin_lock_bh(&smsg_list_lock);
+	list_add_tail(&cb->list, &smsg_list);
+	spin_unlock_bh(&smsg_list_lock);
+	return 0;
+}
+
+void smsg_unregister_callback(const char *prefix,
+			      void (*callback)(const char *from,
+					       char *str))
+{
+	struct smsg_callback *cb, *tmp;
+
+	spin_lock_bh(&smsg_list_lock);
+	cb = NULL;
+	list_for_each_entry(tmp, &smsg_list, list)
+		if (tmp->callback == callback &&
+		    strcmp(tmp->prefix, prefix) == 0) {
+			cb = tmp;
+			list_del(&cb->list);
+			break;
+		}
+	spin_unlock_bh(&smsg_list_lock);
+	kfree(cb);
+}
+
+static int smsg_pm_freeze(struct device *dev)
+{
+#ifdef CONFIG_PM_DEBUG
+	printk(KERN_WARNING "smsg_pm_freeze\n");
+#endif
+	if (smsg_path && iucv_path_connected) {
+		iucv_path_sever(smsg_path, NULL);
+		iucv_path_connected = 0;
+	}
+	return 0;
+}
+
+static int smsg_pm_restore_thaw(struct device *dev)
+{
+	int rc;
+
+#ifdef CONFIG_PM_DEBUG
+	printk(KERN_WARNING "smsg_pm_restore_thaw\n");
+#endif
+	if (smsg_path && !iucv_path_connected) {
+		memset(smsg_path, 0, sizeof(*smsg_path));
+		smsg_path->msglim = 255;
+		smsg_path->flags = 0;
+		rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG    ",
+				       NULL, NULL, NULL);
+#ifdef CONFIG_PM_DEBUG
+		if (rc)
+			printk(KERN_ERR
+			       "iucv_path_connect returned with rc %i\n", rc);
+#endif
+		if (!rc)
+			iucv_path_connected = 1;
+		cpcmd("SET SMSG IUCV", NULL, 0, NULL);
+	}
+	return 0;
+}
+
+static const struct dev_pm_ops smsg_pm_ops = {
+	.freeze = smsg_pm_freeze,
+	.thaw = smsg_pm_restore_thaw,
+	.restore = smsg_pm_restore_thaw,
+};
+
+static struct device_driver smsg_driver = {
+	.owner = THIS_MODULE,
+	.name = SMSGIUCV_DRV_NAME,
+	.bus  = &iucv_bus,
+	.pm = &smsg_pm_ops,
+};
+
+static void __exit smsg_exit(void)
+{
+	cpcmd("SET SMSG OFF", NULL, 0, NULL);
+	device_unregister(smsg_dev);
+	iucv_unregister(&smsg_handler, 1);
+	driver_unregister(&smsg_driver);
+}
+
+static int __init smsg_init(void)
+{
+	int rc;
+
+	if (!MACHINE_IS_VM) {
+		rc = -EPROTONOSUPPORT;
+		goto out;
+	}
+	rc = driver_register(&smsg_driver);
+	if (rc != 0)
+		goto out;
+	rc = iucv_register(&smsg_handler, 1);
+	if (rc)
+		goto out_driver;
+	smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL);
+	if (!smsg_path) {
+		rc = -ENOMEM;
+		goto out_register;
+	}
+	rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG    ",
+			       NULL, NULL, NULL);
+	if (rc)
+		goto out_free_path;
+	else
+		iucv_path_connected = 1;
+	smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (!smsg_dev) {
+		rc = -ENOMEM;
+		goto out_free_path;
+	}
+	dev_set_name(smsg_dev, "smsg_iucv");
+	smsg_dev->bus = &iucv_bus;
+	smsg_dev->parent = iucv_root;
+	smsg_dev->release = (void (*)(struct device *))kfree;
+	smsg_dev->driver = &smsg_driver;
+	rc = device_register(smsg_dev);
+	if (rc)
+		goto out_put;
+
+	cpcmd("SET SMSG IUCV", NULL, 0, NULL);
+	return 0;
+
+out_put:
+	put_device(smsg_dev);
+out_free_path:
+	iucv_path_free(smsg_path);
+	smsg_path = NULL;
+out_register:
+	iucv_unregister(&smsg_handler, 1);
+out_driver:
+	driver_unregister(&smsg_driver);
+out:
+	return rc;
+}
+
+module_init(smsg_init);
+module_exit(smsg_exit);
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(smsg_register_callback);
+EXPORT_SYMBOL(smsg_unregister_callback);
diff --git a/drivers/s390/net/smsgiucv.h b/drivers/s390/net/smsgiucv.h
new file mode 100644
index 0000000..a0d6c61
--- /dev/null
+++ b/drivers/s390/net/smsgiucv.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IUCV special message driver
+ *
+ * Copyright IBM Corp. 2003
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#define SMSGIUCV_DRV_NAME     "SMSGIUCV"
+
+int  smsg_register_callback(const char *,
+			    void (*)(const char *, char *));
+void smsg_unregister_callback(const char *,
+			      void (*)(const char *, char *));
+
diff --git a/drivers/s390/net/smsgiucv_app.c b/drivers/s390/net/smsgiucv_app.c
new file mode 100644
index 0000000..0a26399
--- /dev/null
+++ b/drivers/s390/net/smsgiucv_app.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Deliver z/VM CP special messages (SMSG) as uevents.
+ *
+ * The driver registers for z/VM CP special messages with the
+ * "APP" prefix. Incoming messages are delivered to user space
+ * as uevents.
+ *
+ * Copyright IBM Corp. 2010
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ *
+ */
+#define KMSG_COMPONENT		"smsgiucv_app"
+#define pr_fmt(fmt)		KMSG_COMPONENT ": " fmt
+
+#include <linux/ctype.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <net/iucv/iucv.h>
+#include "smsgiucv.h"
+
+/* prefix used for SMSG registration */
+#define SMSG_PREFIX		"APP"
+
+/* SMSG related uevent environment variables */
+#define ENV_SENDER_STR		"SMSG_SENDER="
+#define ENV_SENDER_LEN		(strlen(ENV_SENDER_STR) + 8 + 1)
+#define ENV_PREFIX_STR		"SMSG_ID="
+#define ENV_PREFIX_LEN		(strlen(ENV_PREFIX_STR) + \
+				 strlen(SMSG_PREFIX) + 1)
+#define ENV_TEXT_STR		"SMSG_TEXT="
+#define ENV_TEXT_LEN(msg)	(strlen(ENV_TEXT_STR) + strlen((msg)) + 1)
+
+/* z/VM user ID which is permitted to send SMSGs
+ * If the value is undefined or empty (""), special messages are
+ * accepted from any z/VM user ID. */
+static char *sender;
+module_param(sender, charp, 0400);
+MODULE_PARM_DESC(sender, "z/VM user ID from which CP SMSGs are accepted");
+
+/* SMSG device representation */
+static struct device *smsg_app_dev;
+
+/* list element for queuing received messages for delivery */
+struct smsg_app_event {
+	struct list_head list;
+	char *buf;
+	char *envp[4];
+};
+
+/* queue for outgoing uevents */
+static LIST_HEAD(smsg_event_queue);
+static DEFINE_SPINLOCK(smsg_event_queue_lock);
+
+static void smsg_app_event_free(struct smsg_app_event *ev)
+{
+	kfree(ev->buf);
+	kfree(ev);
+}
+
+static struct smsg_app_event *smsg_app_event_alloc(const char *from,
+						   const char *msg)
+{
+	struct smsg_app_event *ev;
+
+	ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
+	if (!ev)
+		return NULL;
+
+	ev->buf = kzalloc(ENV_SENDER_LEN + ENV_PREFIX_LEN +
+			  ENV_TEXT_LEN(msg), GFP_ATOMIC);
+	if (!ev->buf) {
+		kfree(ev);
+		return NULL;
+	}
+
+	/* setting up environment pointers into buf */
+	ev->envp[0] = ev->buf;
+	ev->envp[1] = ev->envp[0] + ENV_SENDER_LEN;
+	ev->envp[2] = ev->envp[1] + ENV_PREFIX_LEN;
+	ev->envp[3] = NULL;
+
+	/* setting up environment: sender, prefix name, and message text */
+	snprintf(ev->envp[0], ENV_SENDER_LEN, ENV_SENDER_STR "%s", from);
+	snprintf(ev->envp[1], ENV_PREFIX_LEN, ENV_PREFIX_STR "%s", SMSG_PREFIX);
+	snprintf(ev->envp[2], ENV_TEXT_LEN(msg), ENV_TEXT_STR "%s", msg);
+
+	return ev;
+}
+
+static void smsg_event_work_fn(struct work_struct *work)
+{
+	LIST_HEAD(event_queue);
+	struct smsg_app_event *p, *n;
+	struct device *dev;
+
+	dev = get_device(smsg_app_dev);
+	if (!dev)
+		return;
+
+	spin_lock_bh(&smsg_event_queue_lock);
+	list_splice_init(&smsg_event_queue, &event_queue);
+	spin_unlock_bh(&smsg_event_queue_lock);
+
+	list_for_each_entry_safe(p, n, &event_queue, list) {
+		list_del(&p->list);
+		kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, p->envp);
+		smsg_app_event_free(p);
+	}
+
+	put_device(dev);
+}
+static DECLARE_WORK(smsg_event_work, smsg_event_work_fn);
+
+static void smsg_app_callback(const char *from, char *msg)
+{
+	struct smsg_app_event *se;
+
+	/* check if the originating z/VM user ID matches
+	 * the configured sender. */
+	if (sender && strlen(sender) > 0 && strcmp(from, sender) != 0)
+		return;
+
+	/* get start of message text (skip prefix and leading blanks) */
+	msg += strlen(SMSG_PREFIX);
+	while (*msg && isspace(*msg))
+		msg++;
+	if (*msg == '\0')
+		return;
+
+	/* allocate event list element and its environment */
+	se = smsg_app_event_alloc(from, msg);
+	if (!se)
+		return;
+
+	/* queue event and schedule work function */
+	spin_lock(&smsg_event_queue_lock);
+	list_add_tail(&se->list, &smsg_event_queue);
+	spin_unlock(&smsg_event_queue_lock);
+
+	schedule_work(&smsg_event_work);
+	return;
+}
+
+static int __init smsgiucv_app_init(void)
+{
+	struct device_driver *smsgiucv_drv;
+	int rc;
+
+	if (!MACHINE_IS_VM)
+		return -ENODEV;
+
+	smsg_app_dev = kzalloc(sizeof(*smsg_app_dev), GFP_KERNEL);
+	if (!smsg_app_dev)
+		return -ENOMEM;
+
+	smsgiucv_drv = driver_find(SMSGIUCV_DRV_NAME, &iucv_bus);
+	if (!smsgiucv_drv) {
+		kfree(smsg_app_dev);
+		return -ENODEV;
+	}
+
+	rc = dev_set_name(smsg_app_dev, KMSG_COMPONENT);
+	if (rc) {
+		kfree(smsg_app_dev);
+		goto fail;
+	}
+	smsg_app_dev->bus = &iucv_bus;
+	smsg_app_dev->parent = iucv_root;
+	smsg_app_dev->release = (void (*)(struct device *)) kfree;
+	smsg_app_dev->driver = smsgiucv_drv;
+	rc = device_register(smsg_app_dev);
+	if (rc) {
+		put_device(smsg_app_dev);
+		goto fail;
+	}
+
+	/* convert sender to uppercase characters */
+	if (sender) {
+		int len = strlen(sender);
+		while (len--)
+			sender[len] = toupper(sender[len]);
+	}
+
+	/* register with the smsgiucv device driver */
+	rc = smsg_register_callback(SMSG_PREFIX, smsg_app_callback);
+	if (rc) {
+		device_unregister(smsg_app_dev);
+		goto fail;
+	}
+
+	rc = 0;
+fail:
+	return rc;
+}
+module_init(smsgiucv_app_init);
+
+static void __exit smsgiucv_app_exit(void)
+{
+	/* unregister callback */
+	smsg_unregister_callback(SMSG_PREFIX, smsg_app_callback);
+
+	/* cancel pending work and flush any queued event work */
+	cancel_work_sync(&smsg_event_work);
+	smsg_event_work_fn(&smsg_event_work);
+
+	device_unregister(smsg_app_dev);
+}
+module_exit(smsgiucv_app_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Deliver z/VM CP SMSG as uevents");
+MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>");
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
new file mode 100644
index 0000000..9dda431
--- /dev/null
+++ b/drivers/s390/scsi/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the S/390 specific device drivers
+#
+
+zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_dbf.o zfcp_erp.o \
+	     zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \
+	     zfcp_unit.o
+
+obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
new file mode 100644
index 0000000..94f4d8f
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -0,0 +1,586 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * zfcp device driver
+ *
+ * Module interface and handling of zfcp data structures.
+ *
+ * Copyright IBM Corp. 2002, 2013
+ */
+
+/*
+ * Driver authors:
+ *            Martin Peschke (originator of the driver)
+ *            Raimund Schroeder
+ *            Aron Zeh
+ *            Wolfgang Taphorn
+ *            Stefan Bader
+ *            Heiko Carstens (kernel 2.6 port of the driver)
+ *            Andreas Herrmann
+ *            Maxim Shchetynin
+ *            Volker Sameske
+ *            Ralph Wuerthner
+ *            Michael Loehr
+ *            Swen Schillig
+ *            Christof Schmitt
+ *            Martin Petermann
+ *            Sven Schuetz
+ *            Steffen Maier
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "zfcp_ext.h"
+#include "zfcp_fc.h"
+#include "zfcp_reqlist.h"
+
+#define ZFCP_BUS_ID_SIZE	20
+
+MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com");
+MODULE_DESCRIPTION("FCP HBA driver");
+MODULE_LICENSE("GPL");
+
+static char *init_device;
+module_param_named(device, init_device, charp, 0400);
+MODULE_PARM_DESC(device, "specify initial device");
+
+static struct kmem_cache * __init zfcp_cache_hw_align(const char *name,
+						      unsigned long size)
+{
+	return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
+}
+
+static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
+{
+	struct ccw_device *cdev;
+	struct zfcp_adapter *adapter;
+	struct zfcp_port *port;
+
+	cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
+	if (!cdev)
+		return;
+
+	if (ccw_device_set_online(cdev))
+		goto out_ccw_device;
+
+	adapter = zfcp_ccw_adapter_by_cdev(cdev);
+	if (!adapter)
+		goto out_ccw_device;
+
+	port = zfcp_get_port_by_wwpn(adapter, wwpn);
+	if (!port)
+		goto out_port;
+	flush_work(&port->rport_work);
+
+	zfcp_unit_add(port, lun);
+	put_device(&port->dev);
+
+out_port:
+	zfcp_ccw_adapter_put(adapter);
+out_ccw_device:
+	put_device(&cdev->dev);
+	return;
+}
+
+static void __init zfcp_init_device_setup(char *devstr)
+{
+	char *token;
+	char *str, *str_saved;
+	char busid[ZFCP_BUS_ID_SIZE];
+	u64 wwpn, lun;
+
+	/* duplicate devstr and keep the original for sysfs presentation*/
+	str_saved = kstrdup(devstr, GFP_KERNEL);
+	str = str_saved;
+	if (!str)
+		return;
+
+	token = strsep(&str, ",");
+	if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
+		goto err_out;
+	strlcpy(busid, token, ZFCP_BUS_ID_SIZE);
+
+	token = strsep(&str, ",");
+	if (!token || kstrtoull(token, 0, (unsigned long long *) &wwpn))
+		goto err_out;
+
+	token = strsep(&str, ",");
+	if (!token || kstrtoull(token, 0, (unsigned long long *) &lun))
+		goto err_out;
+
+	kfree(str_saved);
+	zfcp_init_device_configure(busid, wwpn, lun);
+	return;
+
+err_out:
+	kfree(str_saved);
+	pr_err("%s is not a valid SCSI device\n", devstr);
+}
+
+static int __init zfcp_module_init(void)
+{
+	int retval = -ENOMEM;
+
+	zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
+						  sizeof(struct fsf_qtcb));
+	if (!zfcp_fsf_qtcb_cache)
+		goto out_qtcb_cache;
+
+	zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req",
+						sizeof(struct zfcp_fc_req));
+	if (!zfcp_fc_req_cache)
+		goto out_fc_cache;
+
+	zfcp_scsi_transport_template =
+		fc_attach_transport(&zfcp_transport_functions);
+	if (!zfcp_scsi_transport_template)
+		goto out_transport;
+	scsi_transport_reserve_device(zfcp_scsi_transport_template,
+				      sizeof(struct zfcp_scsi_dev));
+
+	retval = ccw_driver_register(&zfcp_ccw_driver);
+	if (retval) {
+		pr_err("The zfcp device driver could not register with "
+		       "the common I/O layer\n");
+		goto out_ccw_register;
+	}
+
+	if (init_device)
+		zfcp_init_device_setup(init_device);
+	return 0;
+
+out_ccw_register:
+	fc_release_transport(zfcp_scsi_transport_template);
+out_transport:
+	kmem_cache_destroy(zfcp_fc_req_cache);
+out_fc_cache:
+	kmem_cache_destroy(zfcp_fsf_qtcb_cache);
+out_qtcb_cache:
+	return retval;
+}
+
+module_init(zfcp_module_init);
+
+static void __exit zfcp_module_exit(void)
+{
+	ccw_driver_unregister(&zfcp_ccw_driver);
+	fc_release_transport(zfcp_scsi_transport_template);
+	kmem_cache_destroy(zfcp_fc_req_cache);
+	kmem_cache_destroy(zfcp_fsf_qtcb_cache);
+}
+
+module_exit(zfcp_module_exit);
+
+/**
+ * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
+ * @adapter: pointer to adapter to search for port
+ * @wwpn: wwpn to search for
+ *
+ * Returns: pointer to zfcp_port or NULL
+ */
+struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
+					u64 wwpn)
+{
+	unsigned long flags;
+	struct zfcp_port *port;
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list)
+		if (port->wwpn == wwpn) {
+			if (!get_device(&port->dev))
+				port = NULL;
+			read_unlock_irqrestore(&adapter->port_list_lock, flags);
+			return port;
+		}
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+	return NULL;
+}
+
+static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
+{
+	adapter->pool.erp_req =
+		mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
+	if (!adapter->pool.erp_req)
+		return -ENOMEM;
+
+	adapter->pool.gid_pn_req =
+		mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
+	if (!adapter->pool.gid_pn_req)
+		return -ENOMEM;
+
+	adapter->pool.scsi_req =
+		mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
+	if (!adapter->pool.scsi_req)
+		return -ENOMEM;
+
+	adapter->pool.scsi_abort =
+		mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
+	if (!adapter->pool.scsi_abort)
+		return -ENOMEM;
+
+	adapter->pool.status_read_req =
+		mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM,
+					    sizeof(struct zfcp_fsf_req));
+	if (!adapter->pool.status_read_req)
+		return -ENOMEM;
+
+	adapter->pool.qtcb_pool =
+		mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache);
+	if (!adapter->pool.qtcb_pool)
+		return -ENOMEM;
+
+	BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE);
+	adapter->pool.sr_data =
+		mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0);
+	if (!adapter->pool.sr_data)
+		return -ENOMEM;
+
+	adapter->pool.gid_pn =
+		mempool_create_slab_pool(1, zfcp_fc_req_cache);
+	if (!adapter->pool.gid_pn)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
+{
+	if (adapter->pool.erp_req)
+		mempool_destroy(adapter->pool.erp_req);
+	if (adapter->pool.scsi_req)
+		mempool_destroy(adapter->pool.scsi_req);
+	if (adapter->pool.scsi_abort)
+		mempool_destroy(adapter->pool.scsi_abort);
+	if (adapter->pool.qtcb_pool)
+		mempool_destroy(adapter->pool.qtcb_pool);
+	if (adapter->pool.status_read_req)
+		mempool_destroy(adapter->pool.status_read_req);
+	if (adapter->pool.sr_data)
+		mempool_destroy(adapter->pool.sr_data);
+	if (adapter->pool.gid_pn)
+		mempool_destroy(adapter->pool.gid_pn);
+}
+
+/**
+ * zfcp_status_read_refill - refill the long running status_read_requests
+ * @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
+ *
+ * Returns: 0 on success, 1 otherwise
+ *
+ * if there are 16 or more status_read requests missing an adapter_reopen
+ * is triggered
+ */
+int zfcp_status_read_refill(struct zfcp_adapter *adapter)
+{
+	while (atomic_read(&adapter->stat_miss) > 0)
+		if (zfcp_fsf_status_read(adapter->qdio)) {
+			if (atomic_read(&adapter->stat_miss) >=
+			    adapter->stat_read_buf_num) {
+				zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
+				return 1;
+			}
+			break;
+		} else
+			atomic_dec(&adapter->stat_miss);
+	return 0;
+}
+
+static void _zfcp_status_read_scheduler(struct work_struct *work)
+{
+	zfcp_status_read_refill(container_of(work, struct zfcp_adapter,
+					     stat_work));
+}
+
+static void zfcp_print_sl(struct seq_file *m, struct service_level *sl)
+{
+	struct zfcp_adapter *adapter =
+		container_of(sl, struct zfcp_adapter, service_level);
+
+	seq_printf(m, "zfcp: %s microcode level %x\n",
+		   dev_name(&adapter->ccw_device->dev),
+		   adapter->fsf_lic_version);
+}
+
+static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter)
+{
+	char name[TASK_COMM_LEN];
+
+	snprintf(name, sizeof(name), "zfcp_q_%s",
+		 dev_name(&adapter->ccw_device->dev));
+	adapter->work_queue = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+
+	if (adapter->work_queue)
+		return 0;
+	return -ENOMEM;
+}
+
+static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter)
+{
+	if (adapter->work_queue)
+		destroy_workqueue(adapter->work_queue);
+	adapter->work_queue = NULL;
+
+}
+
+/**
+ * zfcp_adapter_enqueue - enqueue a new adapter to the list
+ * @ccw_device: pointer to the struct cc_device
+ *
+ * Returns:	struct zfcp_adapter*
+ * Enqueues an adapter at the end of the adapter list in the driver data.
+ * All adapter internal structures are set up.
+ * Proc-fs entries are also created.
+ */
+struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
+{
+	struct zfcp_adapter *adapter;
+
+	if (!get_device(&ccw_device->dev))
+		return ERR_PTR(-ENODEV);
+
+	adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
+	if (!adapter) {
+		put_device(&ccw_device->dev);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	kref_init(&adapter->ref);
+
+	ccw_device->handler = NULL;
+	adapter->ccw_device = ccw_device;
+
+	INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
+	INIT_DELAYED_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
+	INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
+
+	adapter->next_port_scan = jiffies;
+
+	adapter->erp_action.adapter = adapter;
+
+	if (zfcp_qdio_setup(adapter))
+		goto failed;
+
+	if (zfcp_allocate_low_mem_buffers(adapter))
+		goto failed;
+
+	adapter->req_list = zfcp_reqlist_alloc();
+	if (!adapter->req_list)
+		goto failed;
+
+	if (zfcp_dbf_adapter_register(adapter))
+		goto failed;
+
+	if (zfcp_setup_adapter_work_queue(adapter))
+		goto failed;
+
+	if (zfcp_fc_gs_setup(adapter))
+		goto failed;
+
+	rwlock_init(&adapter->port_list_lock);
+	INIT_LIST_HEAD(&adapter->port_list);
+
+	INIT_LIST_HEAD(&adapter->events.list);
+	INIT_WORK(&adapter->events.work, zfcp_fc_post_event);
+	spin_lock_init(&adapter->events.list_lock);
+
+	init_waitqueue_head(&adapter->erp_ready_wq);
+	init_waitqueue_head(&adapter->erp_done_wqh);
+
+	INIT_LIST_HEAD(&adapter->erp_ready_head);
+	INIT_LIST_HEAD(&adapter->erp_running_head);
+
+	rwlock_init(&adapter->erp_lock);
+	rwlock_init(&adapter->abort_lock);
+
+	if (zfcp_erp_thread_setup(adapter))
+		goto failed;
+
+	adapter->service_level.seq_print = zfcp_print_sl;
+
+	dev_set_drvdata(&ccw_device->dev, adapter);
+
+	if (sysfs_create_group(&ccw_device->dev.kobj,
+			       &zfcp_sysfs_adapter_attrs))
+		goto failed;
+
+	/* report size limit per scatter-gather segment */
+	adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
+	adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
+
+	adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
+
+	if (!zfcp_scsi_adapter_register(adapter))
+		return adapter;
+
+failed:
+	zfcp_adapter_unregister(adapter);
+	return ERR_PTR(-ENOMEM);
+}
+
+void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
+{
+	struct ccw_device *cdev = adapter->ccw_device;
+
+	cancel_delayed_work_sync(&adapter->scan_work);
+	cancel_work_sync(&adapter->stat_work);
+	cancel_work_sync(&adapter->ns_up_work);
+	zfcp_destroy_adapter_work_queue(adapter);
+
+	zfcp_fc_wka_ports_force_offline(adapter->gs);
+	zfcp_scsi_adapter_unregister(adapter);
+	sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
+
+	zfcp_erp_thread_kill(adapter);
+	zfcp_dbf_adapter_unregister(adapter);
+	zfcp_qdio_destroy(adapter->qdio);
+
+	zfcp_ccw_adapter_put(adapter); /* final put to release */
+}
+
+/**
+ * zfcp_adapter_release - remove the adapter from the resource list
+ * @ref: pointer to struct kref
+ * locks:	adapter list write lock is assumed to be held by caller
+ */
+void zfcp_adapter_release(struct kref *ref)
+{
+	struct zfcp_adapter *adapter = container_of(ref, struct zfcp_adapter,
+						    ref);
+	struct ccw_device *cdev = adapter->ccw_device;
+
+	dev_set_drvdata(&adapter->ccw_device->dev, NULL);
+	zfcp_fc_gs_destroy(adapter);
+	zfcp_free_low_mem_buffers(adapter);
+	kfree(adapter->req_list);
+	kfree(adapter->fc_stats);
+	kfree(adapter->stats_reset_data);
+	kfree(adapter);
+	put_device(&cdev->dev);
+}
+
+static void zfcp_port_release(struct device *dev)
+{
+	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+
+	zfcp_ccw_adapter_put(port->adapter);
+	kfree(port);
+}
+
+/**
+ * zfcp_port_enqueue - enqueue port to port list of adapter
+ * @adapter: adapter where remote port is added
+ * @wwpn: WWPN of the remote port to be enqueued
+ * @status: initial status for the port
+ * @d_id: destination id of the remote port to be enqueued
+ * Returns: pointer to enqueued port on success, ERR_PTR on error
+ *
+ * All port internal structures are set up and the sysfs entry is generated.
+ * d_id is used to enqueue ports with a well known address like the Directory
+ * Service for nameserver lookup.
+ */
+struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
+				     u32 status, u32 d_id)
+{
+	struct zfcp_port *port;
+	int retval = -ENOMEM;
+
+	kref_get(&adapter->ref);
+
+	port = zfcp_get_port_by_wwpn(adapter, wwpn);
+	if (port) {
+		put_device(&port->dev);
+		retval = -EEXIST;
+		goto err_out;
+	}
+
+	port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
+	if (!port)
+		goto err_out;
+
+	rwlock_init(&port->unit_list_lock);
+	INIT_LIST_HEAD(&port->unit_list);
+	atomic_set(&port->units, 0);
+
+	INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
+	INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
+	INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
+
+	port->adapter = adapter;
+	port->d_id = d_id;
+	port->wwpn = wwpn;
+	port->rport_task = RPORT_NONE;
+	port->dev.parent = &adapter->ccw_device->dev;
+	port->dev.groups = zfcp_port_attr_groups;
+	port->dev.release = zfcp_port_release;
+
+	port->erp_action.adapter = adapter;
+	port->erp_action.port = port;
+
+	if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
+		kfree(port);
+		goto err_out;
+	}
+	retval = -EINVAL;
+
+	if (device_register(&port->dev)) {
+		put_device(&port->dev);
+		goto err_out;
+	}
+
+	write_lock_irq(&adapter->port_list_lock);
+	list_add_tail(&port->list, &adapter->port_list);
+	write_unlock_irq(&adapter->port_list_lock);
+
+	atomic_or(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
+
+	return port;
+
+err_out:
+	zfcp_ccw_adapter_put(adapter);
+	return ERR_PTR(retval);
+}
+
+/**
+ * zfcp_sg_free_table - free memory used by scatterlists
+ * @sg: pointer to scatterlist
+ * @count: number of scatterlist which are to be free'ed
+ * the scatterlist are expected to reference pages always
+ */
+void zfcp_sg_free_table(struct scatterlist *sg, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++, sg++)
+		if (sg)
+			free_page((unsigned long) sg_virt(sg));
+		else
+			break;
+}
+
+/**
+ * zfcp_sg_setup_table - init scatterlist and allocate, assign buffers
+ * @sg: pointer to struct scatterlist
+ * @count: number of scatterlists which should be assigned with buffers
+ * of size page
+ *
+ * Returns: 0 on success, -ENOMEM otherwise
+ */
+int zfcp_sg_setup_table(struct scatterlist *sg, int count)
+{
+	void *addr;
+	int i;
+
+	sg_init_table(sg, count);
+	for (i = 0; i < count; i++, sg++) {
+		addr = (void *) get_zeroed_page(GFP_KERNEL);
+		if (!addr) {
+			zfcp_sg_free_table(sg, i);
+			return -ENOMEM;
+		}
+		sg_set_buf(sg, addr, PAGE_SIZE);
+	}
+	return 0;
+}
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
new file mode 100644
index 0000000..49eda14
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * zfcp device driver
+ *
+ * Registration and callback for the s390 common I/O layer.
+ *
+ * Copyright IBM Corp. 2002, 2010
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include "zfcp_ext.h"
+#include "zfcp_reqlist.h"
+
+#define ZFCP_MODEL_PRIV 0x4
+
+static DEFINE_SPINLOCK(zfcp_ccw_adapter_ref_lock);
+
+struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *cdev)
+{
+	struct zfcp_adapter *adapter;
+	unsigned long flags;
+
+	spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
+	adapter = dev_get_drvdata(&cdev->dev);
+	if (adapter)
+		kref_get(&adapter->ref);
+	spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
+	return adapter;
+}
+
+void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
+	kref_put(&adapter->ref, zfcp_adapter_release);
+	spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
+}
+
+/**
+ * zfcp_ccw_activate - activate adapter and wait for it to finish
+ * @cdev: pointer to belonging ccw device
+ * @clear: Status flags to clear.
+ * @tag: s390dbf trace record tag
+ */
+static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
+{
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+	if (!adapter)
+		return 0;
+
+	zfcp_erp_clear_adapter_status(adapter, clear);
+	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
+	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+				tag);
+
+	/*
+	 * We want to scan ports here, with some random backoff and without
+	 * rate limit. Recovery has already scheduled a port scan for us,
+	 * but with both random delay and rate limit. Nevertheless we get
+	 * what we want here by flushing the scheduled work after sleeping
+	 * an equivalent random time.
+	 * Let the port scan random delay elapse first. If recovery finishes
+	 * up to that point in time, that would be perfect for both recovery
+	 * and port scan. If not, i.e. recovery takes ages, there was no
+	 * point in waiting a random delay on top of the time consumed by
+	 * recovery.
+	 */
+	msleep(zfcp_fc_port_scan_backoff());
+	zfcp_erp_wait(adapter);
+	flush_delayed_work(&adapter->scan_work);
+
+	zfcp_ccw_adapter_put(adapter);
+
+	return 0;
+}
+
+static struct ccw_device_id zfcp_ccw_device_id[] = {
+	{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
+	{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) },
+	{},
+};
+MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
+
+/**
+ * zfcp_ccw_probe - probe function of zfcp driver
+ * @cdev: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer for each FCP
+ * device found on the current system. This is only a stub to make cio
+ * work: To only allocate adapter resources for devices actually used,
+ * the allocation is deferred to the first call to ccw_set_online.
+ */
+static int zfcp_ccw_probe(struct ccw_device *cdev)
+{
+	return 0;
+}
+
+/**
+ * zfcp_ccw_remove - remove function of zfcp driver
+ * @cdev: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer and removes an adapter
+ * from the system. Task of this function is to get rid of all units and
+ * ports that belong to this adapter. And in addition all resources of this
+ * adapter will be freed too.
+ */
+static void zfcp_ccw_remove(struct ccw_device *cdev)
+{
+	struct zfcp_adapter *adapter;
+	struct zfcp_port *port, *p;
+	struct zfcp_unit *unit, *u;
+	LIST_HEAD(unit_remove_lh);
+	LIST_HEAD(port_remove_lh);
+
+	ccw_device_set_offline(cdev);
+
+	adapter = zfcp_ccw_adapter_by_cdev(cdev);
+	if (!adapter)
+		return;
+
+	write_lock_irq(&adapter->port_list_lock);
+	list_for_each_entry_safe(port, p, &adapter->port_list, list) {
+		write_lock(&port->unit_list_lock);
+		list_for_each_entry_safe(unit, u, &port->unit_list, list)
+			list_move(&unit->list, &unit_remove_lh);
+		write_unlock(&port->unit_list_lock);
+		list_move(&port->list, &port_remove_lh);
+	}
+	write_unlock_irq(&adapter->port_list_lock);
+	zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
+
+	list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
+		device_unregister(&unit->dev);
+
+	list_for_each_entry_safe(port, p, &port_remove_lh, list)
+		device_unregister(&port->dev);
+
+	zfcp_adapter_unregister(adapter);
+}
+
+/**
+ * zfcp_ccw_set_online - set_online function of zfcp driver
+ * @cdev: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer and sets an
+ * adapter into state online.  The first call will allocate all
+ * adapter resources that will be retained until the device is removed
+ * via zfcp_ccw_remove.
+ *
+ * Setting an fcp device online means that it will be registered with
+ * the SCSI stack, that the QDIO queues will be set up and that the
+ * adapter will be opened.
+ */
+static int zfcp_ccw_set_online(struct ccw_device *cdev)
+{
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+	if (!adapter) {
+		adapter = zfcp_adapter_enqueue(cdev);
+
+		if (IS_ERR(adapter)) {
+			dev_err(&cdev->dev,
+				"Setting up data structures for the "
+				"FCP adapter failed\n");
+			return PTR_ERR(adapter);
+		}
+		kref_get(&adapter->ref);
+	}
+
+	/* initialize request counter */
+	BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
+	adapter->req_no = 0;
+
+	zfcp_ccw_activate(cdev, 0, "ccsonl1");
+
+	/*
+	 * We want to scan ports here, always, with some random delay and
+	 * without rate limit - basically what zfcp_ccw_activate() has
+	 * achieved for us. Not quite! That port scan depended on
+	 * !no_auto_port_rescan. So let's cover the no_auto_port_rescan
+	 * case here to make sure a port scan is done unconditionally.
+	 * Since zfcp_ccw_activate() has waited the desired random time,
+	 * we can immediately schedule and flush a port scan for the
+	 * remaining cases.
+	 */
+	zfcp_fc_inverse_conditional_port_scan(adapter);
+	flush_delayed_work(&adapter->scan_work);
+	zfcp_ccw_adapter_put(adapter);
+	return 0;
+}
+
+/**
+ * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish
+ * @cdev: pointer to belonging ccw device
+ * @set: Status flags to set.
+ * @tag: s390dbf trace record tag
+ *
+ * This function gets called by the common i/o layer and sets an adapter
+ * into state offline.
+ */
+static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag)
+{
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+	if (!adapter)
+		return 0;
+
+	zfcp_erp_set_adapter_status(adapter, set);
+	zfcp_erp_adapter_shutdown(adapter, 0, tag);
+	zfcp_erp_wait(adapter);
+
+	zfcp_ccw_adapter_put(adapter);
+	return 0;
+}
+
+/**
+ * zfcp_ccw_set_offline - set_offline function of zfcp driver
+ * @cdev: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer and sets an adapter
+ * into state offline.
+ */
+static int zfcp_ccw_set_offline(struct ccw_device *cdev)
+{
+	return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1");
+}
+
+/**
+ * zfcp_ccw_notify - ccw notify function
+ * @cdev: pointer to belonging ccw device
+ * @event: indicates if adapter was detached or attached
+ *
+ * This function gets called by the common i/o layer if an adapter has gone
+ * or reappeared.
+ */
+static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
+{
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+	if (!adapter)
+		return 1;
+
+	switch (event) {
+	case CIO_GONE:
+		if (atomic_read(&adapter->status) &
+		    ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
+			zfcp_dbf_hba_basic("ccnigo1", adapter);
+			break;
+		}
+		dev_warn(&cdev->dev, "The FCP device has been detached\n");
+		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
+		break;
+	case CIO_NO_PATH:
+		dev_warn(&cdev->dev,
+			 "The CHPID for the FCP device is offline\n");
+		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
+		break;
+	case CIO_OPER:
+		if (atomic_read(&adapter->status) &
+		    ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
+			zfcp_dbf_hba_basic("ccniop1", adapter);
+			break;
+		}
+		dev_info(&cdev->dev, "The FCP device is operational again\n");
+		zfcp_erp_set_adapter_status(adapter,
+					    ZFCP_STATUS_COMMON_RUNNING);
+		zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+					"ccnoti4");
+		break;
+	case CIO_BOXED:
+		dev_warn(&cdev->dev, "The FCP device did not respond within "
+				     "the specified time\n");
+		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5");
+		break;
+	}
+
+	zfcp_ccw_adapter_put(adapter);
+	return 1;
+}
+
+/**
+ * zfcp_ccw_shutdown - handle shutdown from cio
+ * @cdev: device for adapter to shutdown.
+ */
+static void zfcp_ccw_shutdown(struct ccw_device *cdev)
+{
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+	if (!adapter)
+		return;
+
+	zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1");
+	zfcp_erp_wait(adapter);
+	zfcp_erp_thread_kill(adapter);
+
+	zfcp_ccw_adapter_put(adapter);
+}
+
+static int zfcp_ccw_suspend(struct ccw_device *cdev)
+{
+	zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1");
+	return 0;
+}
+
+static int zfcp_ccw_thaw(struct ccw_device *cdev)
+{
+	/* trace records for thaw and final shutdown during suspend
+	   can only be found in system dump until the end of suspend
+	   but not after resume because it's based on the memory image
+	   right after the very first suspend (freeze) callback */
+	zfcp_ccw_activate(cdev, 0, "ccthaw1");
+	return 0;
+}
+
+static int zfcp_ccw_resume(struct ccw_device *cdev)
+{
+	zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1");
+	return 0;
+}
+
+struct ccw_driver zfcp_ccw_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "zfcp",
+	},
+	.ids         = zfcp_ccw_device_id,
+	.probe       = zfcp_ccw_probe,
+	.remove      = zfcp_ccw_remove,
+	.set_online  = zfcp_ccw_set_online,
+	.set_offline = zfcp_ccw_set_offline,
+	.notify      = zfcp_ccw_notify,
+	.shutdown    = zfcp_ccw_shutdown,
+	.freeze      = zfcp_ccw_suspend,
+	.thaw	     = zfcp_ccw_thaw,
+	.restore     = zfcp_ccw_resume,
+};
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
new file mode 100644
index 0000000..3b368fc
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * zfcp device driver
+ *
+ * Debug traces for zfcp.
+ *
+ * Copyright IBM Corp. 2002, 2018
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <asm/debug.h>
+#include "zfcp_dbf.h"
+#include "zfcp_ext.h"
+#include "zfcp_fc.h"
+
+static u32 dbfsize = 4;
+
+module_param(dbfsize, uint, 0400);
+MODULE_PARM_DESC(dbfsize,
+		 "number of pages for each debug feature area (default 4)");
+
+static u32 dbflevel = 3;
+
+module_param(dbflevel, uint, 0400);
+MODULE_PARM_DESC(dbflevel,
+		 "log level for each debug feature area "
+		 "(default 3, range 0..6)");
+
+static inline unsigned int zfcp_dbf_plen(unsigned int offset)
+{
+	return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
+}
+
+static inline
+void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
+		       u64 req_id)
+{
+	struct zfcp_dbf_pay *pl = &dbf->pay_buf;
+	u16 offset = 0, rec_length;
+
+	spin_lock(&dbf->pay_lock);
+	memset(pl, 0, sizeof(*pl));
+	pl->fsf_req_id = req_id;
+	memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);
+
+	while (offset < length) {
+		rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
+				 (u16) (length - offset));
+		memcpy(pl->data, data + offset, rec_length);
+		debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));
+
+		offset += rec_length;
+		pl->counter++;
+	}
+
+	spin_unlock(&dbf->pay_lock);
+}
+
+/**
+ * zfcp_dbf_hba_fsf_res - trace event for fsf responses
+ * @tag: tag indicating which kind of unsolicited status has been received
+ * @req: request for which a response was received
+ */
+void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
+{
+	struct zfcp_dbf *dbf = req->adapter->dbf;
+	struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
+	struct fsf_qtcb_header *q_head = &req->qtcb->header;
+	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dbf->hba_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->id = ZFCP_DBF_HBA_RES;
+	rec->fsf_req_id = req->req_id;
+	rec->fsf_req_status = req->status;
+	rec->fsf_cmd = req->fsf_command;
+	rec->fsf_seq_no = req->seq_no;
+	rec->u.res.req_issued = req->issued;
+	rec->u.res.prot_status = q_pref->prot_status;
+	rec->u.res.fsf_status = q_head->fsf_status;
+	rec->u.res.port_handle = q_head->port_handle;
+	rec->u.res.lun_handle = q_head->lun_handle;
+
+	memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
+	       FSF_PROT_STATUS_QUAL_SIZE);
+	memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
+	       FSF_STATUS_QUALIFIER_SIZE);
+
+	if (req->fsf_command != FSF_QTCB_FCP_CMND) {
+		rec->pl_len = q_head->log_length;
+		zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
+				  rec->pl_len, "fsf_res", req->req_id);
+	}
+
+	debug_event(dbf->hba, level, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->hba_lock, flags);
+}
+
+/**
+ * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
+ * @tag: tag indicating which kind of unsolicited status has been received
+ * @req: request providing the unsolicited status
+ */
+void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
+{
+	struct zfcp_dbf *dbf = req->adapter->dbf;
+	struct fsf_status_read_buffer *srb = req->data;
+	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+	static int const level = 2;
+	unsigned long flags;
+
+	if (unlikely(!debug_level_enabled(dbf->hba, level)))
+		return;
+
+	spin_lock_irqsave(&dbf->hba_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->id = ZFCP_DBF_HBA_USS;
+	rec->fsf_req_id = req->req_id;
+	rec->fsf_req_status = req->status;
+	rec->fsf_cmd = req->fsf_command;
+
+	if (!srb)
+		goto log;
+
+	rec->u.uss.status_type = srb->status_type;
+	rec->u.uss.status_subtype = srb->status_subtype;
+	rec->u.uss.d_id = ntoh24(srb->d_id);
+	rec->u.uss.lun = srb->fcp_lun;
+	memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
+	       sizeof(rec->u.uss.queue_designator));
+
+	/* status read buffer payload length */
+	rec->pl_len = (!srb->length) ? 0 : srb->length -
+			offsetof(struct fsf_status_read_buffer, payload);
+
+	if (rec->pl_len)
+		zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
+				  "fsf_uss", req->req_id);
+log:
+	debug_event(dbf->hba, level, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->hba_lock, flags);
+}
+
+/**
+ * zfcp_dbf_hba_bit_err - trace event for bit error conditions
+ * @tag: tag indicating which kind of unsolicited status has been received
+ * @req: request which caused the bit_error condition
+ */
+void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
+{
+	struct zfcp_dbf *dbf = req->adapter->dbf;
+	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+	struct fsf_status_read_buffer *sr_buf = req->data;
+	static int const level = 1;
+	unsigned long flags;
+
+	if (unlikely(!debug_level_enabled(dbf->hba, level)))
+		return;
+
+	spin_lock_irqsave(&dbf->hba_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->id = ZFCP_DBF_HBA_BIT;
+	rec->fsf_req_id = req->req_id;
+	rec->fsf_req_status = req->status;
+	rec->fsf_cmd = req->fsf_command;
+	memcpy(&rec->u.be, &sr_buf->payload.bit_error,
+	       sizeof(struct fsf_bit_error_payload));
+
+	debug_event(dbf->hba, level, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->hba_lock, flags);
+}
+
+/**
+ * zfcp_dbf_hba_def_err - trace event for deferred error messages
+ * @adapter: pointer to struct zfcp_adapter
+ * @req_id: request id which caused the deferred error message
+ * @scount: number of sbals incl. the signaling sbal
+ * @pl: array of all involved sbals
+ */
+void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
+			  void **pl)
+{
+	struct zfcp_dbf *dbf = adapter->dbf;
+	struct zfcp_dbf_pay *payload = &dbf->pay_buf;
+	unsigned long flags;
+	static int const level = 1;
+	u16 length;
+
+	if (unlikely(!debug_level_enabled(dbf->pay, level)))
+		return;
+
+	if (!pl)
+		return;
+
+	spin_lock_irqsave(&dbf->pay_lock, flags);
+	memset(payload, 0, sizeof(*payload));
+
+	memcpy(payload->area, "def_err", 7);
+	payload->fsf_req_id = req_id;
+	payload->counter = 0;
+	length = min((u16)sizeof(struct qdio_buffer),
+		     (u16)ZFCP_DBF_PAY_MAX_REC);
+
+	while (payload->counter < scount && (char *)pl[payload->counter]) {
+		memcpy(payload->data, (char *)pl[payload->counter], length);
+		debug_event(dbf->pay, level, payload, zfcp_dbf_plen(length));
+		payload->counter++;
+	}
+
+	spin_unlock_irqrestore(&dbf->pay_lock, flags);
+}
+
+/**
+ * zfcp_dbf_hba_basic - trace event for basic adapter events
+ * @adapter: pointer to struct zfcp_adapter
+ */
+void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
+{
+	struct zfcp_dbf *dbf = adapter->dbf;
+	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+	static int const level = 1;
+	unsigned long flags;
+
+	if (unlikely(!debug_level_enabled(dbf->hba, level)))
+		return;
+
+	spin_lock_irqsave(&dbf->hba_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->id = ZFCP_DBF_HBA_BASIC;
+
+	debug_event(dbf->hba, level, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->hba_lock, flags);
+}
+
+static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
+				struct zfcp_adapter *adapter,
+				struct zfcp_port *port,
+				struct scsi_device *sdev)
+{
+	rec->adapter_status = atomic_read(&adapter->status);
+	if (port) {
+		rec->port_status = atomic_read(&port->status);
+		rec->wwpn = port->wwpn;
+		rec->d_id = port->d_id;
+	}
+	if (sdev) {
+		rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
+		rec->lun = zfcp_scsi_dev_lun(sdev);
+	} else
+		rec->lun = ZFCP_DBF_INVALID_LUN;
+}
+
+/**
+ * zfcp_dbf_rec_trig - trace event related to triggered recovery
+ * @tag: identifier for event
+ * @adapter: adapter on which the erp_action should run
+ * @port: remote port involved in the erp_action
+ * @sdev: scsi device involved in the erp_action
+ * @want: wanted erp_action
+ * @need: required erp_action
+ *
+ * The adapter->erp_lock has to be held.
+ */
+void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
+		       struct zfcp_port *port, struct scsi_device *sdev,
+		       u8 want, u8 need)
+{
+	struct zfcp_dbf *dbf = adapter->dbf;
+	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+	static int const level = 1;
+	struct list_head *entry;
+	unsigned long flags;
+
+	lockdep_assert_held(&adapter->erp_lock);
+
+	if (unlikely(!debug_level_enabled(dbf->rec, level)))
+		return;
+
+	spin_lock_irqsave(&dbf->rec_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	rec->id = ZFCP_DBF_REC_TRIG;
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	zfcp_dbf_set_common(rec, adapter, port, sdev);
+
+	list_for_each(entry, &adapter->erp_ready_head)
+		rec->u.trig.ready++;
+
+	list_for_each(entry, &adapter->erp_running_head)
+		rec->u.trig.running++;
+
+	rec->u.trig.want = want;
+	rec->u.trig.need = need;
+
+	debug_event(dbf->rec, level, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->rec_lock, flags);
+}
+
+/**
+ * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
+ * @tag: identifier for event
+ * @adapter: adapter on which the erp_action should run
+ * @port: remote port involved in the erp_action
+ * @sdev: scsi device involved in the erp_action
+ * @want: wanted erp_action
+ * @need: required erp_action
+ *
+ * The adapter->erp_lock must not be held.
+ */
+void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
+			    struct zfcp_port *port, struct scsi_device *sdev,
+			    u8 want, u8 need)
+{
+	unsigned long flags;
+
+	read_lock_irqsave(&adapter->erp_lock, flags);
+	zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
+	read_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
+/**
+ * zfcp_dbf_rec_run_lvl - trace event related to running recovery
+ * @level: trace level to be used for event
+ * @tag: identifier for event
+ * @erp: erp_action running
+ */
+void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
+{
+	struct zfcp_dbf *dbf = erp->adapter->dbf;
+	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+	unsigned long flags;
+
+	if (!debug_level_enabled(dbf->rec, level))
+		return;
+
+	spin_lock_irqsave(&dbf->rec_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	rec->id = ZFCP_DBF_REC_RUN;
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);
+
+	rec->u.run.fsf_req_id = erp->fsf_req_id;
+	rec->u.run.rec_status = erp->status;
+	rec->u.run.rec_step = erp->step;
+	rec->u.run.rec_action = erp->action;
+
+	if (erp->sdev)
+		rec->u.run.rec_count =
+			atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
+	else if (erp->port)
+		rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
+	else
+		rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
+
+	debug_event(dbf->rec, level, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->rec_lock, flags);
+}
+
+/**
+ * zfcp_dbf_rec_run - trace event related to running recovery
+ * @tag: identifier for event
+ * @erp: erp_action running
+ */
+void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+{
+	zfcp_dbf_rec_run_lvl(1, tag, erp);
+}
+
+/**
+ * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
+ * @tag: identifier for event
+ * @wka_port: well known address port
+ * @req_id: request ID to correlate with potential HBA trace record
+ */
+void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
+			  u64 req_id)
+{
+	struct zfcp_dbf *dbf = wka_port->adapter->dbf;
+	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+	static int const level = 1;
+	unsigned long flags;
+
+	if (unlikely(!debug_level_enabled(dbf->rec, level)))
+		return;
+
+	spin_lock_irqsave(&dbf->rec_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	rec->id = ZFCP_DBF_REC_RUN;
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->port_status = wka_port->status;
+	rec->d_id = wka_port->d_id;
+	rec->lun = ZFCP_DBF_INVALID_LUN;
+
+	rec->u.run.fsf_req_id = req_id;
+	rec->u.run.rec_status = ~0;
+	rec->u.run.rec_step = ~0;
+	rec->u.run.rec_action = ~0;
+	rec->u.run.rec_count = ~0;
+
+	debug_event(dbf->rec, level, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->rec_lock, flags);
+}
+
+#define ZFCP_DBF_SAN_LEVEL 1
+
+static inline
+void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
+		  char *paytag, struct scatterlist *sg, u8 id, u16 len,
+		  u64 req_id, u32 d_id, u16 cap_len)
+{
+	struct zfcp_dbf_san *rec = &dbf->san_buf;
+	u16 rec_len;
+	unsigned long flags;
+	struct zfcp_dbf_pay *payload = &dbf->pay_buf;
+	u16 pay_sum = 0;
+
+	spin_lock_irqsave(&dbf->san_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	rec->id = id;
+	rec->fsf_req_id = req_id;
+	rec->d_id = d_id;
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->pl_len = len; /* full length even if we cap pay below */
+	if (!sg)
+		goto out;
+	rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
+	memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
+	if (len <= rec_len)
+		goto out; /* skip pay record if full content in rec->payload */
+
+	/* if (len > rec_len):
+	 * dump data up to cap_len ignoring small duplicate in rec->payload
+	 */
+	spin_lock(&dbf->pay_lock);
+	memset(payload, 0, sizeof(*payload));
+	memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
+	payload->fsf_req_id = req_id;
+	payload->counter = 0;
+	for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
+		u16 pay_len, offset = 0;
+
+		while (offset < sg->length && pay_sum < cap_len) {
+			pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
+				      (u16)(sg->length - offset));
+			/* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
+			memcpy(payload->data, sg_virt(sg) + offset, pay_len);
+			debug_event(dbf->pay, ZFCP_DBF_SAN_LEVEL, payload,
+				    zfcp_dbf_plen(pay_len));
+			payload->counter++;
+			offset += pay_len;
+			pay_sum += pay_len;
+		}
+	}
+	spin_unlock(&dbf->pay_lock);
+
+out:
+	debug_event(dbf->san, ZFCP_DBF_SAN_LEVEL, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->san_lock, flags);
+}
+
+/**
+ * zfcp_dbf_san_req - trace event for issued SAN request
+ * @tag: identifier for event
+ * @fsf_req: request containing issued CT data
+ * d_id: destination ID
+ */
+void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
+{
+	struct zfcp_dbf *dbf = fsf->adapter->dbf;
+	struct zfcp_fsf_ct_els *ct_els = fsf->data;
+	u16 length;
+
+	if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
+		return;
+
+	length = (u16)zfcp_qdio_real_bytes(ct_els->req);
+	zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
+		     length, fsf->req_id, d_id, length);
+}
+
+static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
+					      struct zfcp_fsf_req *fsf,
+					      u16 len)
+{
+	struct zfcp_fsf_ct_els *ct_els = fsf->data;
+	struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
+	struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
+	struct scatterlist *resp_entry = ct_els->resp;
+	struct fc_ct_hdr *resph;
+	struct fc_gpn_ft_resp *acc;
+	int max_entries, x, last = 0;
+
+	if (!(memcmp(tag, "fsscth2", 7) == 0
+	      && ct_els->d_id == FC_FID_DIR_SERV
+	      && reqh->ct_rev == FC_CT_REV
+	      && reqh->ct_in_id[0] == 0
+	      && reqh->ct_in_id[1] == 0
+	      && reqh->ct_in_id[2] == 0
+	      && reqh->ct_fs_type == FC_FST_DIR
+	      && reqh->ct_fs_subtype == FC_NS_SUBTYPE
+	      && reqh->ct_options == 0
+	      && reqh->_ct_resvd1 == 0
+	      && reqh->ct_cmd == cpu_to_be16(FC_NS_GPN_FT)
+	      /* reqh->ct_mr_size can vary so do not match but read below */
+	      && reqh->_ct_resvd2 == 0
+	      && reqh->ct_reason == 0
+	      && reqh->ct_explan == 0
+	      && reqh->ct_vendor == 0
+	      && reqn->fn_resvd == 0
+	      && reqn->fn_domain_id_scope == 0
+	      && reqn->fn_area_id_scope == 0
+	      && reqn->fn_fc4_type == FC_TYPE_FCP))
+		return len; /* not GPN_FT response so do not cap */
+
+	acc = sg_virt(resp_entry);
+
+	/* cap all but accept CT responses to at least the CT header */
+	resph = (struct fc_ct_hdr *)acc;
+	if ((ct_els->status) ||
+	    (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
+		return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
+
+	max_entries = (be16_to_cpu(reqh->ct_mr_size) * 4 /
+		       sizeof(struct fc_gpn_ft_resp))
+		+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
+		     * to account for header as 1st pseudo "entry" */;
+
+	/* the basic CT_IU preamble is the same size as one entry in the GPN_FT
+	 * response, allowing us to skip special handling for it - just skip it
+	 */
+	for (x = 1; x < max_entries && !last; x++) {
+		if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
+			acc++;
+		else
+			acc = sg_virt(++resp_entry);
+
+		last = acc->fp_flags & FC_NS_FID_LAST;
+	}
+	len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
+	return len; /* cap after last entry */
+}
+
+/**
+ * zfcp_dbf_san_res - trace event for received SAN request
+ * @tag: identifier for event
+ * @fsf_req: request containing issued CT data
+ */
+void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
+{
+	struct zfcp_dbf *dbf = fsf->adapter->dbf;
+	struct zfcp_fsf_ct_els *ct_els = fsf->data;
+	u16 length;
+
+	if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
+		return;
+
+	length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
+	zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
+		     length, fsf->req_id, ct_els->d_id,
+		     zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
+}
+
+/**
+ * zfcp_dbf_san_in_els - trace event for incoming ELS
+ * @tag: identifier for event
+ * @fsf_req: request containing issued CT data
+ */
+void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
+{
+	struct zfcp_dbf *dbf = fsf->adapter->dbf;
+	struct fsf_status_read_buffer *srb =
+		(struct fsf_status_read_buffer *) fsf->data;
+	u16 length;
+	struct scatterlist sg;
+
+	if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
+		return;
+
+	length = (u16)(srb->length -
+			offsetof(struct fsf_status_read_buffer, payload));
+	sg_init_one(&sg, srb->payload.data, length);
+	zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
+		     fsf->req_id, ntoh24(srb->d_id), length);
+}
+
+/**
+ * zfcp_dbf_scsi_common() - Common trace event helper for scsi.
+ * @tag: Identifier for event.
+ * @level: trace level of event.
+ * @sdev: Pointer to SCSI device as context for this event.
+ * @sc: Pointer to SCSI command, or NULL with task management function (TMF).
+ * @fsf: Pointer to FSF request, or NULL.
+ */
+void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev,
+			  struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
+{
+	struct zfcp_adapter *adapter =
+		(struct zfcp_adapter *) sdev->host->hostdata[0];
+	struct zfcp_dbf *dbf = adapter->dbf;
+	struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
+	struct fcp_resp_with_ext *fcp_rsp;
+	struct fcp_resp_rsp_info *fcp_rsp_info;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dbf->scsi_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->id = ZFCP_DBF_SCSI_CMND;
+	if (sc) {
+		rec->scsi_result = sc->result;
+		rec->scsi_retries = sc->retries;
+		rec->scsi_allowed = sc->allowed;
+		rec->scsi_id = sc->device->id;
+		rec->scsi_lun = (u32)sc->device->lun;
+		rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
+		rec->host_scribble = (unsigned long)sc->host_scribble;
+
+		memcpy(rec->scsi_opcode, sc->cmnd,
+		       min_t(int, sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
+	} else {
+		rec->scsi_result = ~0;
+		rec->scsi_retries = ~0;
+		rec->scsi_allowed = ~0;
+		rec->scsi_id = sdev->id;
+		rec->scsi_lun = (u32)sdev->lun;
+		rec->scsi_lun_64_hi = (u32)(sdev->lun >> 32);
+		rec->host_scribble = ~0;
+
+		memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
+	}
+
+	if (fsf) {
+		rec->fsf_req_id = fsf->req_id;
+		rec->pl_len = FCP_RESP_WITH_EXT;
+		fcp_rsp = &(fsf->qtcb->bottom.io.fcp_rsp.iu);
+		/* mandatory parts of FCP_RSP IU in this SCSI record */
+		memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
+		if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
+			fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
+			rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
+			rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
+		}
+		if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
+			rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
+		}
+		/* complete FCP_RSP IU in associated PAYload record
+		 * but only if there are optional parts
+		 */
+		if (fcp_rsp->resp.fr_flags != 0)
+			zfcp_dbf_pl_write(
+				dbf, fcp_rsp,
+				/* at least one full PAY record
+				 * but not beyond hardware response field
+				 */
+				min_t(u16, max_t(u16, rec->pl_len,
+						 ZFCP_DBF_PAY_MAX_REC),
+				      FSF_FCP_RSP_SIZE),
+				"fcp_riu", fsf->req_id);
+	}
+
+	debug_event(dbf->scsi, level, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->scsi_lock, flags);
+}
+
+/**
+ * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
+ * @tag: Identifier for event.
+ * @adapter: Pointer to zfcp adapter as context for this event.
+ * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
+ * @ret: Return value of calling function.
+ *
+ * This SCSI trace variant does not depend on any of:
+ * scsi_cmnd, zfcp_fsf_req, scsi_device.
+ */
+void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
+		      unsigned int scsi_id, int ret)
+{
+	struct zfcp_dbf *dbf = adapter->dbf;
+	struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
+	unsigned long flags;
+	static int const level = 1;
+
+	if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
+		return;
+
+	spin_lock_irqsave(&dbf->scsi_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->id = ZFCP_DBF_SCSI_CMND;
+	rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */
+	rec->scsi_retries = ~0;
+	rec->scsi_allowed = ~0;
+	rec->fcp_rsp_info = ~0;
+	rec->scsi_id = scsi_id;
+	rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
+	rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
+	rec->host_scribble = ~0;
+	memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
+
+	debug_event(dbf->scsi, level, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->scsi_lock, flags);
+}
+
+static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
+{
+	struct debug_info *d;
+
+	d = debug_register(name, size, 1, rec_size);
+	if (!d)
+		return NULL;
+
+	debug_register_view(d, &debug_hex_ascii_view);
+	debug_set_level(d, dbflevel);
+
+	return d;
+}
+
+static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
+{
+	if (!dbf)
+		return;
+
+	debug_unregister(dbf->scsi);
+	debug_unregister(dbf->san);
+	debug_unregister(dbf->hba);
+	debug_unregister(dbf->pay);
+	debug_unregister(dbf->rec);
+	kfree(dbf);
+}
+
+/**
+ * zfcp_adapter_debug_register - registers debug feature for an adapter
+ * @adapter: pointer to adapter for which debug features should be registered
+ * return: -ENOMEM on error, 0 otherwise
+ */
+int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
+{
+	char name[DEBUG_MAX_NAME_LEN];
+	struct zfcp_dbf *dbf;
+
+	dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
+	if (!dbf)
+		return -ENOMEM;
+
+	spin_lock_init(&dbf->pay_lock);
+	spin_lock_init(&dbf->hba_lock);
+	spin_lock_init(&dbf->san_lock);
+	spin_lock_init(&dbf->scsi_lock);
+	spin_lock_init(&dbf->rec_lock);
+
+	/* debug feature area which records recovery activity */
+	sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
+	dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
+	if (!dbf->rec)
+		goto err_out;
+
+	/* debug feature area which records HBA (FSF and QDIO) conditions */
+	sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
+	dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
+	if (!dbf->hba)
+		goto err_out;
+
+	/* debug feature area which records payload info */
+	sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
+	dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
+	if (!dbf->pay)
+		goto err_out;
+
+	/* debug feature area which records SAN command failures and recovery */
+	sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
+	dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
+	if (!dbf->san)
+		goto err_out;
+
+	/* debug feature area which records SCSI command failures and recovery */
+	sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
+	dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
+	if (!dbf->scsi)
+		goto err_out;
+
+	adapter->dbf = dbf;
+
+	return 0;
+err_out:
+	zfcp_dbf_unregister(dbf);
+	return -ENOMEM;
+}
+
+/**
+ * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
+ * @adapter: pointer to adapter for which debug features should be unregistered
+ */
+void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
+{
+	struct zfcp_dbf *dbf = adapter->dbf;
+
+	adapter->dbf = NULL;
+	zfcp_dbf_unregister(dbf);
+}
+
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
new file mode 100644
index 0000000..d116c07
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * zfcp device driver
+ * debug feature declarations
+ *
+ * Copyright IBM Corp. 2008, 2017
+ */
+
+#ifndef ZFCP_DBF_H
+#define ZFCP_DBF_H
+
+#include <scsi/fc/fc_fcp.h>
+#include "zfcp_ext.h"
+#include "zfcp_fsf.h"
+#include "zfcp_def.h"
+
+#define ZFCP_DBF_TAG_LEN       7
+
+#define ZFCP_DBF_INVALID_LUN	0xFFFFFFFFFFFFFFFFull
+
+enum zfcp_dbf_pseudo_erp_act_type {
+	ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD = 0xff,
+	ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL = 0xfe,
+};
+
+/**
+ * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
+ * @ready: number of ready recovery actions
+ * @running: number of running recovery actions
+ * @want: wanted recovery action
+ * @need: needed recovery action
+ */
+struct zfcp_dbf_rec_trigger {
+	u32 ready;
+	u32 running;
+	u8 want;
+	u8 need;
+} __packed;
+
+/**
+ * struct zfcp_dbf_rec_running - trace record for running recovery
+ * @fsf_req_id: request id for fsf requests
+ * @rec_status: status of the fsf request
+ * @rec_step: current step of the recovery action
+ * rec_count: recovery counter
+ */
+struct zfcp_dbf_rec_running {
+	u64 fsf_req_id;
+	u32 rec_status;
+	u16 rec_step;
+	u8 rec_action;
+	u8 rec_count;
+} __packed;
+
+/**
+ * enum zfcp_dbf_rec_id - recovery trace record id
+ * @ZFCP_DBF_REC_TRIG: triggered recovery identifier
+ * @ZFCP_DBF_REC_RUN: running recovery identifier
+ */
+enum zfcp_dbf_rec_id {
+	ZFCP_DBF_REC_TRIG	= 1,
+	ZFCP_DBF_REC_RUN	= 2,
+};
+
+/**
+ * struct zfcp_dbf_rec - trace record for error recovery actions
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @lun: logical unit number
+ * @wwpn: word wide port number
+ * @d_id: destination ID
+ * @adapter_status: current status of the adapter
+ * @port_status: current status of the port
+ * @lun_status: current status of the lun
+ * @u.trig: structure zfcp_dbf_rec_trigger
+ * @u.run: structure zfcp_dbf_rec_running
+ */
+struct zfcp_dbf_rec {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
+	u64 lun;
+	u64 wwpn;
+	u32 d_id;
+	u32 adapter_status;
+	u32 port_status;
+	u32 lun_status;
+	union {
+		struct zfcp_dbf_rec_trigger trig;
+		struct zfcp_dbf_rec_running run;
+	} u;
+} __packed;
+
+/**
+ * enum zfcp_dbf_san_id - SAN trace record identifier
+ * @ZFCP_DBF_SAN_REQ: request trace record id
+ * @ZFCP_DBF_SAN_RES: response trace record id
+ * @ZFCP_DBF_SAN_ELS: extended link service record id
+ */
+enum zfcp_dbf_san_id {
+	ZFCP_DBF_SAN_REQ	= 1,
+	ZFCP_DBF_SAN_RES	= 2,
+	ZFCP_DBF_SAN_ELS	= 3,
+};
+
+/** struct zfcp_dbf_san - trace record for SAN requests and responses
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @fsf_req_id: request id for fsf requests
+ * @payload: unformatted information related to request/response
+ * @d_id: destination id
+ */
+struct zfcp_dbf_san {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
+	u64 fsf_req_id;
+	u32 d_id;
+#define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
+	char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
+	u16 pl_len;
+} __packed;
+
+/**
+ * struct zfcp_dbf_hba_res - trace record for hba responses
+ * @req_issued: timestamp when request was issued
+ * @prot_status: protocol status
+ * @prot_status_qual: protocol status qualifier
+ * @fsf_status: fsf status
+ * @fsf_status_qual: fsf status qualifier
+ */
+struct zfcp_dbf_hba_res {
+	u64 req_issued;
+	u32 prot_status;
+	u8  prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
+	u32 fsf_status;
+	u8  fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+	u32 port_handle;
+	u32 lun_handle;
+} __packed;
+
+/**
+ * struct zfcp_dbf_hba_uss - trace record for unsolicited status
+ * @status_type: type of unsolicited status
+ * @status_subtype: subtype of unsolicited status
+ * @d_id: destination ID
+ * @lun: logical unit number
+ * @queue_designator: queue designator
+ */
+struct zfcp_dbf_hba_uss {
+	u32 status_type;
+	u32 status_subtype;
+	u32 d_id;
+	u64 lun;
+	u64 queue_designator;
+} __packed;
+
+/**
+ * enum zfcp_dbf_hba_id - HBA trace record identifier
+ * @ZFCP_DBF_HBA_RES: response trace record
+ * @ZFCP_DBF_HBA_USS: unsolicited status trace record
+ * @ZFCP_DBF_HBA_BIT: bit error trace record
+ */
+enum zfcp_dbf_hba_id {
+	ZFCP_DBF_HBA_RES	= 1,
+	ZFCP_DBF_HBA_USS	= 2,
+	ZFCP_DBF_HBA_BIT	= 3,
+	ZFCP_DBF_HBA_BASIC	= 4,
+};
+
+/**
+ * struct zfcp_dbf_hba - common trace record for HBA records
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @fsf_req_id: request id for fsf requests
+ * @fsf_req_status: status of fsf request
+ * @fsf_cmd: fsf command
+ * @fsf_seq_no: fsf sequence number
+ * @pl_len: length of payload stored as zfcp_dbf_pay
+ * @u: record type specific data
+ */
+struct zfcp_dbf_hba {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
+	u64 fsf_req_id;
+	u32 fsf_req_status;
+	u32 fsf_cmd;
+	u32 fsf_seq_no;
+	u16 pl_len;
+	union {
+		struct zfcp_dbf_hba_res res;
+		struct zfcp_dbf_hba_uss uss;
+		struct fsf_bit_error_payload be;
+	} u;
+} __packed;
+
+/**
+ * enum zfcp_dbf_scsi_id - scsi trace record identifier
+ * @ZFCP_DBF_SCSI_CMND: scsi command trace record
+ */
+enum zfcp_dbf_scsi_id {
+	ZFCP_DBF_SCSI_CMND	= 1,
+};
+
+/**
+ * struct zfcp_dbf_scsi - common trace record for SCSI records
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @scsi_id: scsi device id
+ * @scsi_lun: scsi device logical unit number, low part of 64 bit, old 32 bit
+ * @scsi_result: scsi result
+ * @scsi_retries: current retry number of scsi request
+ * @scsi_allowed: allowed retries
+ * @fcp_rsp_info: FCP response info code
+ * @scsi_opcode: scsi opcode
+ * @fsf_req_id: request id of fsf request
+ * @host_scribble: LLD specific data attached to SCSI request
+ * @pl_len: length of payload stored as zfcp_dbf_pay
+ * @fcp_rsp: response for FCP request
+ * @scsi_lun_64_hi: scsi device logical unit number, high part of 64 bit
+ */
+struct zfcp_dbf_scsi {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
+	u32 scsi_id;
+	u32 scsi_lun;
+	u32 scsi_result;
+	u8 scsi_retries;
+	u8 scsi_allowed;
+	u8 fcp_rsp_info;
+#define ZFCP_DBF_SCSI_OPCODE	16
+	u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
+	u64 fsf_req_id;
+	u64 host_scribble;
+	u16 pl_len;
+	struct fcp_resp_with_ext fcp_rsp;
+	u32 scsi_lun_64_hi;
+} __packed;
+
+/**
+ * struct zfcp_dbf_pay - trace record for unformatted payload information
+ * @area: area this record is originated from
+ * @counter: ascending record number
+ * @fsf_req_id: request id of fsf request
+ * @data: unformatted data
+ */
+struct zfcp_dbf_pay {
+	u8 counter;
+	char area[ZFCP_DBF_TAG_LEN];
+	u64 fsf_req_id;
+#define ZFCP_DBF_PAY_MAX_REC 0x100
+	char data[ZFCP_DBF_PAY_MAX_REC];
+} __packed;
+
+/**
+ * struct zfcp_dbf - main dbf trace structure
+ * @pay: reference to payload trace area
+ * @rec: reference to recovery trace area
+ * @hba: reference to hba trace area
+ * @san: reference to san trace area
+ * @scsi: reference to scsi trace area
+ * @pay_lock: lock protecting payload trace buffer
+ * @rec_lock: lock protecting recovery trace buffer
+ * @hba_lock: lock protecting hba trace buffer
+ * @san_lock: lock protecting san trace buffer
+ * @scsi_lock: lock protecting scsi trace buffer
+ * @pay_buf: pre-allocated buffer for payload
+ * @rec_buf: pre-allocated buffer for recovery
+ * @hba_buf: pre-allocated buffer for hba
+ * @san_buf: pre-allocated buffer for san
+ * @scsi_buf: pre-allocated buffer for scsi
+ */
+struct zfcp_dbf {
+	debug_info_t			*pay;
+	debug_info_t			*rec;
+	debug_info_t			*hba;
+	debug_info_t			*san;
+	debug_info_t			*scsi;
+	spinlock_t			pay_lock;
+	spinlock_t			rec_lock;
+	spinlock_t			hba_lock;
+	spinlock_t			san_lock;
+	spinlock_t			scsi_lock;
+	struct zfcp_dbf_pay		pay_buf;
+	struct zfcp_dbf_rec		rec_buf;
+	struct zfcp_dbf_hba		hba_buf;
+	struct zfcp_dbf_san		san_buf;
+	struct zfcp_dbf_scsi		scsi_buf;
+};
+
+/**
+ * zfcp_dbf_hba_fsf_resp_suppress - true if we should not trace by default
+ * @req: request that has been completed
+ *
+ * Returns true if FCP response with only benign residual under count.
+ */
+static inline
+bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req)
+{
+	struct fsf_qtcb *qtcb = req->qtcb;
+	u32 fsf_stat = qtcb->header.fsf_status;
+	struct fcp_resp *fcp_rsp;
+	u8 rsp_flags, fr_status;
+
+	if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND)
+		return false; /* not an FCP response */
+	fcp_rsp = &qtcb->bottom.io.fcp_rsp.iu.resp;
+	rsp_flags = fcp_rsp->fr_flags;
+	fr_status = fcp_rsp->fr_status;
+	return (fsf_stat == FSF_FCP_RSP_AVAILABLE) &&
+		(rsp_flags == FCP_RESID_UNDER) &&
+		(fr_status == SAM_STAT_GOOD);
+}
+
+static inline
+void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
+{
+	if (debug_level_enabled(req->adapter->dbf->hba, level))
+		zfcp_dbf_hba_fsf_res(tag, level, req);
+}
+
+/**
+ * zfcp_dbf_hba_fsf_response - trace event for request completion
+ * @req: request that has been completed
+ */
+static inline
+void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
+{
+	struct fsf_qtcb *qtcb = req->qtcb;
+
+	if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
+				    ZFCP_STATUS_FSFREQ_ERROR))) {
+		zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
+
+	} else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+	    (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
+		zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
+
+	} else if (qtcb->header.fsf_status != FSF_GOOD) {
+		zfcp_dbf_hba_fsf_resp("fs_ferr",
+				      zfcp_dbf_hba_fsf_resp_suppress(req)
+				      ? 5 : 1, req);
+
+	} else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
+		   (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
+		zfcp_dbf_hba_fsf_resp("fs_open", 4, req);
+
+	} else if (qtcb->header.log_length) {
+		zfcp_dbf_hba_fsf_resp("fs_qtcb", 5, req);
+
+	} else {
+		zfcp_dbf_hba_fsf_resp("fs_norm", 6, req);
+	}
+}
+
+static inline
+void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
+		   struct zfcp_fsf_req *req)
+{
+	struct zfcp_adapter *adapter = (struct zfcp_adapter *)
+					scmd->device->host->hostdata[0];
+
+	if (debug_level_enabled(adapter->dbf->scsi, level))
+		zfcp_dbf_scsi_common(tag, level, scmd->device, scmd, req);
+}
+
+/**
+ * zfcp_dbf_scsi_result - trace event for SCSI command completion
+ * @scmd: SCSI command pointer
+ * @req: FSF request used to issue SCSI command
+ */
+static inline
+void zfcp_dbf_scsi_result(struct scsi_cmnd *scmd, struct zfcp_fsf_req *req)
+{
+	if (scmd->result != 0)
+		_zfcp_dbf_scsi("rsl_err", 3, scmd, req);
+	else if (scmd->retries > 0)
+		_zfcp_dbf_scsi("rsl_ret", 4, scmd, req);
+	else
+		_zfcp_dbf_scsi("rsl_nor", 6, scmd, req);
+}
+
+/**
+ * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command
+ * @scmd: SCSI command pointer
+ */
+static inline
+void zfcp_dbf_scsi_fail_send(struct scsi_cmnd *scmd)
+{
+	_zfcp_dbf_scsi("rsl_fai", 4, scmd, NULL);
+}
+
+/**
+ * zfcp_dbf_scsi_abort - trace event for SCSI command abort
+ * @tag: tag indicating success or failure of abort operation
+ * @scmd: SCSI command to be aborted
+ * @fsf_req: request containing abort (might be NULL)
+ */
+static inline
+void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
+			 struct zfcp_fsf_req *fsf_req)
+{
+	_zfcp_dbf_scsi(tag, 1, scmd, fsf_req);
+}
+
+/**
+ * zfcp_dbf_scsi_devreset() - Trace event for Logical Unit or Target Reset.
+ * @tag: Tag indicating success or failure of reset operation.
+ * @sdev: Pointer to SCSI device as context for this event.
+ * @flag: Indicates type of reset (Target Reset, Logical Unit Reset).
+ * @fsf_req: Pointer to FSF request representing the TMF, or NULL.
+ */
+static inline
+void zfcp_dbf_scsi_devreset(char *tag, struct scsi_device *sdev, u8 flag,
+			    struct zfcp_fsf_req *fsf_req)
+{
+	struct zfcp_adapter *adapter = (struct zfcp_adapter *)
+					sdev->host->hostdata[0];
+	char tmp_tag[ZFCP_DBF_TAG_LEN];
+	static int const level = 1;
+
+	if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
+		return;
+
+	if (flag == FCP_TMF_TGT_RESET)
+		memcpy(tmp_tag, "tr_", 3);
+	else
+		memcpy(tmp_tag, "lr_", 3);
+
+	memcpy(&tmp_tag[3], tag, 4);
+	zfcp_dbf_scsi_common(tmp_tag, level, sdev, NULL, fsf_req);
+}
+
+/**
+ * zfcp_dbf_scsi_nullcmnd() - trace NULLify of SCSI command in dev/tgt-reset.
+ * @scmnd: SCSI command that was NULLified.
+ * @fsf_req: request that owned @scmnd.
+ */
+static inline void zfcp_dbf_scsi_nullcmnd(struct scsi_cmnd *scmnd,
+					  struct zfcp_fsf_req *fsf_req)
+{
+	_zfcp_dbf_scsi("scfc__1", 3, scmnd, fsf_req);
+}
+
+#endif /* ZFCP_DBF_H */
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
new file mode 100644
index 0000000..3396a47
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * zfcp device driver
+ *
+ * Global definitions for the zfcp device driver.
+ *
+ * Copyright IBM Corp. 2002, 2010
+ */
+
+#ifndef ZFCP_DEF_H
+#define ZFCP_DEF_H
+
+/*************************** INCLUDES *****************************************/
+
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/mempool.h>
+#include <linux/syscalls.h>
+#include <linux/scatterlist.h>
+#include <linux/ioctl.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_gs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_bsg_fc.h>
+#include <asm/ccwdev.h>
+#include <asm/debug.h>
+#include <asm/ebcdic.h>
+#include <asm/sysinfo.h>
+#include "zfcp_fsf.h"
+#include "zfcp_fc.h"
+#include "zfcp_qdio.h"
+
+struct zfcp_reqlist;
+
+/********************* SCSI SPECIFIC DEFINES *********************************/
+#define ZFCP_SCSI_ER_TIMEOUT                    (10*HZ)
+
+/********************* FSF SPECIFIC DEFINES *********************************/
+
+/* ATTENTION: value must not be used by hardware */
+#define FSF_QTCB_UNSOLICITED_STATUS		0x6305
+
+/* timeout value for "default timer" for fsf requests */
+#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
+
+/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
+
+/*
+ * Note, the leftmost status byte is common among adapter, port
+ * and unit
+ */
+#define ZFCP_COMMON_FLAGS			0xfff00000
+
+/* common status bits */
+#define ZFCP_STATUS_COMMON_RUNNING		0x40000000
+#define ZFCP_STATUS_COMMON_ERP_FAILED		0x20000000
+#define ZFCP_STATUS_COMMON_UNBLOCKED		0x10000000
+#define ZFCP_STATUS_COMMON_OPEN                 0x04000000
+#define ZFCP_STATUS_COMMON_ERP_INUSE		0x01000000
+#define ZFCP_STATUS_COMMON_ACCESS_DENIED	0x00800000
+#define ZFCP_STATUS_COMMON_ACCESS_BOXED		0x00400000
+#define ZFCP_STATUS_COMMON_NOESC		0x00200000
+
+/* adapter status */
+#define ZFCP_STATUS_ADAPTER_MB_ACT		0x00000001
+#define ZFCP_STATUS_ADAPTER_QDIOUP		0x00000002
+#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED	0x00000004
+#define ZFCP_STATUS_ADAPTER_XCONFIG_OK		0x00000008
+#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT	0x00000010
+#define ZFCP_STATUS_ADAPTER_SUSPENDED		0x00000040
+#define ZFCP_STATUS_ADAPTER_ERP_PENDING		0x00000100
+#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED	0x00000200
+#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED	0x00000400
+
+/* remote port status */
+#define ZFCP_STATUS_PORT_PHYS_OPEN		0x00000001
+#define ZFCP_STATUS_PORT_LINK_TEST		0x00000002
+
+/* FSF request status (this does not have a common part) */
+#define ZFCP_STATUS_FSFREQ_ERROR		0x00000008
+#define ZFCP_STATUS_FSFREQ_CLEANUP		0x00000010
+#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED	0x00000040
+#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED       0x00000080
+#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED         0x00000200
+#define ZFCP_STATUS_FSFREQ_DISMISSED            0x00001000
+
+/************************* STRUCTURE DEFINITIONS *****************************/
+
+struct zfcp_fsf_req;
+
+/* holds various memory pools of an adapter */
+struct zfcp_adapter_mempool {
+	mempool_t *erp_req;
+	mempool_t *gid_pn_req;
+	mempool_t *scsi_req;
+	mempool_t *scsi_abort;
+	mempool_t *status_read_req;
+	mempool_t *sr_data;
+	mempool_t *gid_pn;
+	mempool_t *qtcb_pool;
+};
+
+struct zfcp_erp_action {
+	struct list_head list;
+	int action;	              /* requested action code */
+	struct zfcp_adapter *adapter; /* device which should be recovered */
+	struct zfcp_port *port;
+	struct scsi_device *sdev;
+	u32		status;	      /* recovery status */
+	u32 step;	              /* active step of this erp action */
+	unsigned long		fsf_req_id;
+	struct timer_list timer;
+};
+
+struct fsf_latency_record {
+	u32 min;
+	u32 max;
+	u64 sum;
+};
+
+struct latency_cont {
+	struct fsf_latency_record channel;
+	struct fsf_latency_record fabric;
+	u64 counter;
+};
+
+struct zfcp_latencies {
+	struct latency_cont read;
+	struct latency_cont write;
+	struct latency_cont cmd;
+	spinlock_t lock;
+};
+
+struct zfcp_adapter {
+	struct kref		ref;
+	u64			peer_wwnn;	   /* P2P peer WWNN */
+	u64			peer_wwpn;	   /* P2P peer WWPN */
+	u32			peer_d_id;	   /* P2P peer D_ID */
+	struct ccw_device       *ccw_device;	   /* S/390 ccw device */
+	struct zfcp_qdio	*qdio;
+	u32			hydra_version;	   /* Hydra version */
+	u32			fsf_lic_version;
+	u32			adapter_features;  /* FCP channel features */
+	u32			connection_features; /* host connection features */
+        u32			hardware_version;  /* of FCP channel */
+	u16			timer_ticks;       /* time int for a tick */
+	struct Scsi_Host	*scsi_host;	   /* Pointer to mid-layer */
+	struct list_head	port_list;	   /* remote port list */
+	rwlock_t		port_list_lock;    /* port list lock */
+	unsigned long		req_no;		   /* unique FSF req number */
+	struct zfcp_reqlist	*req_list;
+	u32			fsf_req_seq_no;	   /* FSF cmnd seq number */
+	rwlock_t		abort_lock;        /* Protects against SCSI
+						      stack abort/command
+						      completion races */
+	atomic_t		stat_miss;	   /* # missing status reads*/
+	unsigned int		stat_read_buf_num;
+	struct work_struct	stat_work;
+	atomic_t		status;	           /* status of this adapter */
+	struct list_head	erp_ready_head;	   /* error recovery for this
+						      adapter/devices */
+	wait_queue_head_t	erp_ready_wq;
+	struct list_head	erp_running_head;
+	rwlock_t		erp_lock;
+	wait_queue_head_t	erp_done_wqh;
+	struct zfcp_erp_action	erp_action;	   /* pending error recovery */
+        atomic_t                erp_counter;
+	u32			erp_total_count;   /* total nr of enqueued erp
+						      actions */
+	u32			erp_low_mem_count; /* nr of erp actions waiting
+						      for memory */
+	struct task_struct	*erp_thread;
+	struct zfcp_fc_wka_ports *gs;		   /* generic services */
+	struct zfcp_dbf		*dbf;		   /* debug traces */
+	struct zfcp_adapter_mempool	pool;      /* Adapter memory pools */
+	struct fc_host_statistics *fc_stats;
+	struct fsf_qtcb_bottom_port *stats_reset_data;
+	unsigned long		stats_reset;
+	struct delayed_work	scan_work;
+	struct work_struct	ns_up_work;
+	struct service_level	service_level;
+	struct workqueue_struct	*work_queue;
+	struct device_dma_parameters dma_parms;
+	struct zfcp_fc_events events;
+	unsigned long		next_port_scan;
+};
+
+struct zfcp_port {
+	struct device          dev;
+	struct fc_rport        *rport;         /* rport of fc transport class */
+	struct list_head       list;	       /* list of remote ports */
+	struct zfcp_adapter    *adapter;       /* adapter used to access port */
+	struct list_head	unit_list;	/* head of logical unit list */
+	rwlock_t		unit_list_lock; /* unit list lock */
+	atomic_t		units;	       /* zfcp_unit count */
+	atomic_t	       status;	       /* status of this remote port */
+	u64		       wwnn;	       /* WWNN if known */
+	u64		       wwpn;	       /* WWPN */
+	u32		       d_id;	       /* D_ID */
+	u32		       handle;	       /* handle assigned by FSF */
+	struct zfcp_erp_action erp_action;     /* pending error recovery */
+        atomic_t               erp_counter;
+	u32                    maxframe_size;
+	u32                    supported_classes;
+	struct work_struct     gid_pn_work;
+	struct work_struct     test_link_work;
+	struct work_struct     rport_work;
+	enum { RPORT_NONE, RPORT_ADD, RPORT_DEL }  rport_task;
+	unsigned int		starget_id;
+};
+
+/**
+ * struct zfcp_unit - LUN configured via zfcp sysfs
+ * @dev: struct device for sysfs representation and reference counting
+ * @list: entry in LUN/unit list per zfcp_port
+ * @port: reference to zfcp_port where this LUN is configured
+ * @fcp_lun: 64 bit LUN value
+ * @scsi_work: for running scsi_scan_target
+ *
+ * This is the representation of a LUN that has been configured for
+ * usage. The main data here is the 64 bit LUN value, data for
+ * running I/O and recovery is in struct zfcp_scsi_dev.
+ */
+struct zfcp_unit {
+	struct device		dev;
+	struct list_head	list;
+	struct zfcp_port	*port;
+	u64			fcp_lun;
+	struct work_struct	scsi_work;
+};
+
+/**
+ * struct zfcp_scsi_dev - zfcp data per SCSI device
+ * @status: zfcp internal status flags
+ * @lun_handle: handle from "open lun" for issuing FSF requests
+ * @erp_action: zfcp erp data for opening and recovering this LUN
+ * @erp_counter: zfcp erp counter for this LUN
+ * @latencies: FSF channel and fabric latencies
+ * @port: zfcp_port where this LUN belongs to
+ */
+struct zfcp_scsi_dev {
+	atomic_t		status;
+	u32			lun_handle;
+	struct zfcp_erp_action	erp_action;
+	atomic_t		erp_counter;
+	struct zfcp_latencies	latencies;
+	struct zfcp_port	*port;
+};
+
+/**
+ * sdev_to_zfcp - Access zfcp LUN data for SCSI device
+ * @sdev: scsi_device where to get the zfcp_scsi_dev pointer
+ */
+static inline struct zfcp_scsi_dev *sdev_to_zfcp(struct scsi_device *sdev)
+{
+	return scsi_transport_device_data(sdev);
+}
+
+/**
+ * zfcp_scsi_dev_lun - Return SCSI device LUN as 64 bit FCP LUN
+ * @sdev: SCSI device where to get the LUN from
+ */
+static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev)
+{
+	u64 fcp_lun;
+
+	int_to_scsilun(sdev->lun, (struct scsi_lun *)&fcp_lun);
+	return fcp_lun;
+}
+
+/**
+ * struct zfcp_fsf_req - basic FSF request structure
+ * @list: list of FSF requests
+ * @req_id: unique request ID
+ * @adapter: adapter this request belongs to
+ * @qdio_req: qdio queue related values
+ * @completion: used to signal the completion of the request
+ * @status: status of the request
+ * @fsf_command: FSF command issued
+ * @qtcb: associated QTCB
+ * @seq_no: sequence number of this request
+ * @data: private data
+ * @timer: timer data of this request
+ * @erp_action: reference to erp action if request issued on behalf of ERP
+ * @pool: reference to memory pool if used for this request
+ * @issued: time when request was send (STCK)
+ * @handler: handler which should be called to process response
+ */
+struct zfcp_fsf_req {
+	struct list_head	list;
+	unsigned long		req_id;
+	struct zfcp_adapter	*adapter;
+	struct zfcp_qdio_req	qdio_req;
+	struct completion	completion;
+	u32			status;
+	u32			fsf_command;
+	struct fsf_qtcb		*qtcb;
+	u32			seq_no;
+	void			*data;
+	struct timer_list	timer;
+	struct zfcp_erp_action	*erp_action;
+	mempool_t		*pool;
+	unsigned long long	issued;
+	void			(*handler)(struct zfcp_fsf_req *);
+};
+
+static inline
+int zfcp_adapter_multi_buffer_active(struct zfcp_adapter *adapter)
+{
+	return atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_MB_ACT;
+}
+
+#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
new file mode 100644
index 0000000..e7e6b63
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -0,0 +1,1704 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * zfcp device driver
+ *
+ * Error Recovery Procedures (ERP).
+ *
+ * Copyright IBM Corp. 2002, 2016
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kthread.h>
+#include "zfcp_ext.h"
+#include "zfcp_reqlist.h"
+
+#define ZFCP_MAX_ERPS                   3
+
+enum zfcp_erp_act_flags {
+	ZFCP_STATUS_ERP_TIMEDOUT	= 0x10000000,
+	ZFCP_STATUS_ERP_CLOSE_ONLY	= 0x01000000,
+	ZFCP_STATUS_ERP_DISMISSED	= 0x00200000,
+	ZFCP_STATUS_ERP_LOWMEM		= 0x00400000,
+	ZFCP_STATUS_ERP_NO_REF		= 0x00800000,
+};
+
+enum zfcp_erp_steps {
+	ZFCP_ERP_STEP_UNINITIALIZED	= 0x0000,
+	ZFCP_ERP_STEP_PHYS_PORT_CLOSING	= 0x0010,
+	ZFCP_ERP_STEP_PORT_CLOSING	= 0x0100,
+	ZFCP_ERP_STEP_PORT_OPENING	= 0x0800,
+	ZFCP_ERP_STEP_LUN_CLOSING	= 0x1000,
+	ZFCP_ERP_STEP_LUN_OPENING	= 0x2000,
+};
+
+/**
+ * enum zfcp_erp_act_type - Type of ERP action object.
+ * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
+ * @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with
+ *			  either of the first four enum values.
+ *			  Used to indicate that an ERP action could not be
+ *			  set up despite a detected need for some recovery.
+ * @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with
+ *			    either of the first four enum values.
+ *			    Used to indicate that ERP not needed because
+ *			    the object has ZFCP_STATUS_COMMON_ERP_FAILED.
+ */
+enum zfcp_erp_act_type {
+	ZFCP_ERP_ACTION_REOPEN_LUN         = 1,
+	ZFCP_ERP_ACTION_REOPEN_PORT	   = 2,
+	ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
+	ZFCP_ERP_ACTION_REOPEN_ADAPTER     = 4,
+	ZFCP_ERP_ACTION_NONE		   = 0xc0,
+	ZFCP_ERP_ACTION_FAILED		   = 0xe0,
+};
+
+enum zfcp_erp_act_result {
+	ZFCP_ERP_SUCCEEDED = 0,
+	ZFCP_ERP_FAILED    = 1,
+	ZFCP_ERP_CONTINUES = 2,
+	ZFCP_ERP_EXIT      = 3,
+	ZFCP_ERP_DISMISSED = 4,
+	ZFCP_ERP_NOMEM     = 5,
+};
+
+static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
+{
+	zfcp_erp_clear_adapter_status(adapter,
+				       ZFCP_STATUS_COMMON_UNBLOCKED | mask);
+}
+
+static bool zfcp_erp_action_is_running(struct zfcp_erp_action *act)
+{
+	struct zfcp_erp_action *curr_act;
+
+	list_for_each_entry(curr_act, &act->adapter->erp_running_head, list)
+		if (act == curr_act)
+			return true;
+	return false;
+}
+
+static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
+{
+	struct zfcp_adapter *adapter = act->adapter;
+
+	list_move(&act->list, &act->adapter->erp_ready_head);
+	zfcp_dbf_rec_run("erardy1", act);
+	wake_up(&adapter->erp_ready_wq);
+	zfcp_dbf_rec_run("erardy2", act);
+}
+
+static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
+{
+	act->status |= ZFCP_STATUS_ERP_DISMISSED;
+	if (zfcp_erp_action_is_running(act))
+		zfcp_erp_action_ready(act);
+}
+
+static void zfcp_erp_action_dismiss_lun(struct scsi_device *sdev)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
+		zfcp_erp_action_dismiss(&zfcp_sdev->erp_action);
+}
+
+static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
+{
+	struct scsi_device *sdev;
+
+	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
+		zfcp_erp_action_dismiss(&port->erp_action);
+	else {
+		spin_lock(port->adapter->scsi_host->host_lock);
+		__shost_for_each_device(sdev, port->adapter->scsi_host)
+			if (sdev_to_zfcp(sdev)->port == port)
+				zfcp_erp_action_dismiss_lun(sdev);
+		spin_unlock(port->adapter->scsi_host->host_lock);
+	}
+}
+
+static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
+{
+	struct zfcp_port *port;
+
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
+		zfcp_erp_action_dismiss(&adapter->erp_action);
+	else {
+		read_lock(&adapter->port_list_lock);
+		list_for_each_entry(port, &adapter->port_list, list)
+		    zfcp_erp_action_dismiss_port(port);
+		read_unlock(&adapter->port_list_lock);
+	}
+}
+
+static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
+				  struct zfcp_port *port,
+				  struct scsi_device *sdev)
+{
+	int need = want;
+	struct zfcp_scsi_dev *zsdev;
+
+	switch (want) {
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		zsdev = sdev_to_zfcp(sdev);
+		if (atomic_read(&zsdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+			need = 0;
+		break;
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+		if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+			need = 0;
+		break;
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		if (atomic_read(&port->status) &
+		    ZFCP_STATUS_COMMON_ERP_FAILED) {
+			need = 0;
+			/* ensure propagation of failed status to new devices */
+			zfcp_erp_set_port_status(
+				port, ZFCP_STATUS_COMMON_ERP_FAILED);
+		}
+		break;
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		if (atomic_read(&adapter->status) &
+		    ZFCP_STATUS_COMMON_ERP_FAILED) {
+			need = 0;
+			/* ensure propagation of failed status to new devices */
+			zfcp_erp_set_adapter_status(
+				adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
+		}
+		break;
+	default:
+		need = 0;
+		break;
+	}
+
+	return need;
+}
+
+static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
+				 struct zfcp_port *port,
+				 struct scsi_device *sdev)
+{
+	int need = want;
+	int l_status, p_status, a_status;
+	struct zfcp_scsi_dev *zfcp_sdev;
+
+	switch (want) {
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		zfcp_sdev = sdev_to_zfcp(sdev);
+		l_status = atomic_read(&zfcp_sdev->status);
+		if (l_status & ZFCP_STATUS_COMMON_ERP_INUSE)
+			return 0;
+		p_status = atomic_read(&port->status);
+		if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
+		      p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
+			return 0;
+		if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
+			need = ZFCP_ERP_ACTION_REOPEN_PORT;
+		/* fall through */
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+		p_status = atomic_read(&port->status);
+		if (!(p_status & ZFCP_STATUS_COMMON_OPEN))
+			need = ZFCP_ERP_ACTION_REOPEN_PORT;
+		/* fall through */
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		p_status = atomic_read(&port->status);
+		if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE)
+			return 0;
+		a_status = atomic_read(&adapter->status);
+		if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) ||
+		      a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
+			return 0;
+		if (p_status & ZFCP_STATUS_COMMON_NOESC)
+			return need;
+		if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED))
+			need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
+		/* fall through */
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		a_status = atomic_read(&adapter->status);
+		if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE)
+			return 0;
+		if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) &&
+		    !(a_status & ZFCP_STATUS_COMMON_OPEN))
+			return 0; /* shutdown requested for closed adapter */
+	}
+
+	return need;
+}
+
+static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
+						  struct zfcp_adapter *adapter,
+						  struct zfcp_port *port,
+						  struct scsi_device *sdev)
+{
+	struct zfcp_erp_action *erp_action;
+	struct zfcp_scsi_dev *zfcp_sdev;
+
+	switch (need) {
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		zfcp_sdev = sdev_to_zfcp(sdev);
+		if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
+			if (scsi_device_get(sdev))
+				return NULL;
+		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
+				&zfcp_sdev->status);
+		erp_action = &zfcp_sdev->erp_action;
+		WARN_ON_ONCE(erp_action->port != port);
+		WARN_ON_ONCE(erp_action->sdev != sdev);
+		if (!(atomic_read(&zfcp_sdev->status) &
+		      ZFCP_STATUS_COMMON_RUNNING))
+			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+		if (!get_device(&port->dev))
+			return NULL;
+		zfcp_erp_action_dismiss_port(port);
+		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
+		erp_action = &port->erp_action;
+		WARN_ON_ONCE(erp_action->port != port);
+		WARN_ON_ONCE(erp_action->sdev != NULL);
+		if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
+			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		kref_get(&adapter->ref);
+		zfcp_erp_action_dismiss_adapter(adapter);
+		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
+		erp_action = &adapter->erp_action;
+		WARN_ON_ONCE(erp_action->port != NULL);
+		WARN_ON_ONCE(erp_action->sdev != NULL);
+		if (!(atomic_read(&adapter->status) &
+		      ZFCP_STATUS_COMMON_RUNNING))
+			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
+		break;
+
+	default:
+		return NULL;
+	}
+
+	WARN_ON_ONCE(erp_action->adapter != adapter);
+	memset(&erp_action->list, 0, sizeof(erp_action->list));
+	memset(&erp_action->timer, 0, sizeof(erp_action->timer));
+	erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
+	erp_action->fsf_req_id = 0;
+	erp_action->action = need;
+	erp_action->status = act_status;
+
+	return erp_action;
+}
+
+static void zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
+				    struct zfcp_port *port,
+				    struct scsi_device *sdev,
+				    char *id, u32 act_status)
+{
+	int need;
+	struct zfcp_erp_action *act;
+
+	need = zfcp_erp_handle_failed(want, adapter, port, sdev);
+	if (!need) {
+		need = ZFCP_ERP_ACTION_FAILED; /* marker for trace */
+		goto out;
+	}
+
+	if (!adapter->erp_thread) {
+		need = ZFCP_ERP_ACTION_NONE; /* marker for trace */
+		goto out;
+	}
+
+	need = zfcp_erp_required_act(want, adapter, port, sdev);
+	if (!need)
+		goto out;
+
+	act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
+	if (!act) {
+		need |= ZFCP_ERP_ACTION_NONE; /* marker for trace */
+		goto out;
+	}
+	atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
+	++adapter->erp_total_count;
+	list_add_tail(&act->list, &adapter->erp_ready_head);
+	wake_up(&adapter->erp_ready_wq);
+ out:
+	zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
+}
+
+void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
+				      u64 port_name, u32 port_id)
+{
+	unsigned long flags;
+	static /* don't waste stack */ struct zfcp_port tmpport;
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	/* Stand-in zfcp port with fields just good enough for
+	 * zfcp_dbf_rec_trig() and zfcp_dbf_set_common().
+	 * Under lock because tmpport is static.
+	 */
+	atomic_set(&tmpport.status, -1); /* unknown */
+	tmpport.wwpn = port_name;
+	tmpport.d_id = port_id;
+	zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL,
+			  ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
+			  ZFCP_ERP_ACTION_NONE);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
+static void _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
+				    int clear_mask, char *id)
+{
+	zfcp_erp_adapter_block(adapter, clear_mask);
+	zfcp_scsi_schedule_rports_block(adapter);
+
+	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
+				adapter, NULL, NULL, id, 0);
+}
+
+/**
+ * zfcp_erp_adapter_reopen - Reopen adapter.
+ * @adapter: Adapter to reopen.
+ * @clear: Status flags to clear.
+ * @id: Id for debug trace event.
+ */
+void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
+{
+	unsigned long flags;
+
+	zfcp_erp_adapter_block(adapter, clear);
+	zfcp_scsi_schedule_rports_block(adapter);
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
+				NULL, NULL, id, 0);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
+/**
+ * zfcp_erp_adapter_shutdown - Shutdown adapter.
+ * @adapter: Adapter to shut down.
+ * @clear: Status flags to clear.
+ * @id: Id for debug trace event.
+ */
+void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
+			       char *id)
+{
+	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
+	zfcp_erp_adapter_reopen(adapter, clear | flags, id);
+}
+
+/**
+ * zfcp_erp_port_shutdown - Shutdown port
+ * @port: Port to shut down.
+ * @clear: Status flags to clear.
+ * @id: Id for debug trace event.
+ */
+void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id)
+{
+	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
+	zfcp_erp_port_reopen(port, clear | flags, id);
+}
+
+static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
+{
+	zfcp_erp_clear_port_status(port,
+				    ZFCP_STATUS_COMMON_UNBLOCKED | clear);
+}
+
+static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
+					 char *id)
+{
+	zfcp_erp_port_block(port, clear);
+	zfcp_scsi_schedule_rport_block(port);
+
+	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
+				port->adapter, port, NULL, id, 0);
+}
+
+/**
+ * zfcp_erp_port_forced_reopen - Forced close of port and open again
+ * @port: Port to force close and to reopen.
+ * @clear: Status flags to clear.
+ * @id: Id for debug trace event.
+ */
+void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id)
+{
+	unsigned long flags;
+	struct zfcp_adapter *adapter = port->adapter;
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	_zfcp_erp_port_forced_reopen(port, clear, id);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
+static void _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
+{
+	zfcp_erp_port_block(port, clear);
+	zfcp_scsi_schedule_rport_block(port);
+
+	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
+				port->adapter, port, NULL, id, 0);
+}
+
+/**
+ * zfcp_erp_port_reopen - trigger remote port recovery
+ * @port: port to recover
+ * @clear_mask: flags in port status to be cleared
+ * @id: Id for debug trace event.
+ */
+void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
+{
+	unsigned long flags;
+	struct zfcp_adapter *adapter = port->adapter;
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	_zfcp_erp_port_reopen(port, clear, id);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
+static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
+{
+	zfcp_erp_clear_lun_status(sdev,
+				  ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask);
+}
+
+static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
+				 u32 act_status)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
+
+	zfcp_erp_lun_block(sdev, clear);
+
+	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
+				zfcp_sdev->port, sdev, id, act_status);
+}
+
+/**
+ * zfcp_erp_lun_reopen - initiate reopen of a LUN
+ * @sdev: SCSI device / LUN to be reopened
+ * @clear_mask: specifies flags in LUN status to be cleared
+ * @id: Id for debug trace event.
+ *
+ * Return: 0 on success, < 0 on error
+ */
+void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
+{
+	unsigned long flags;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_port *port = zfcp_sdev->port;
+	struct zfcp_adapter *adapter = port->adapter;
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	_zfcp_erp_lun_reopen(sdev, clear, id, 0);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
+/**
+ * zfcp_erp_lun_shutdown - Shutdown LUN
+ * @sdev: SCSI device / LUN to shut down.
+ * @clear: Status flags to clear.
+ * @id: Id for debug trace event.
+ */
+void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id)
+{
+	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
+	zfcp_erp_lun_reopen(sdev, clear | flags, id);
+}
+
+/**
+ * zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion
+ * @sdev: SCSI device / LUN to shut down.
+ * @id: Id for debug trace event.
+ *
+ * Do not acquire a reference for the LUN when creating the ERP
+ * action. It is safe, because this function waits for the ERP to
+ * complete first. This allows to shutdown the LUN, even when the SCSI
+ * device is in the state SDEV_DEL when scsi_device_get will fail.
+ */
+void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
+{
+	unsigned long flags;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_port *port = zfcp_sdev->port;
+	struct zfcp_adapter *adapter = port->adapter;
+	int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	_zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+
+	zfcp_erp_wait(adapter);
+}
+
+static int zfcp_erp_status_change_set(unsigned long mask, atomic_t *status)
+{
+	return (atomic_read(status) ^ mask) & mask;
+}
+
+static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
+{
+	if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED,
+				       &adapter->status))
+		zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
+	atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
+}
+
+static void zfcp_erp_port_unblock(struct zfcp_port *port)
+{
+	if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED,
+				       &port->status))
+		zfcp_dbf_rec_run("erpubl1", &port->erp_action);
+	atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
+}
+
+static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED,
+				       &zfcp_sdev->status))
+		zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
+	atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
+}
+
+static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
+{
+	list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
+	zfcp_dbf_rec_run("erator1", erp_action);
+}
+
+static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
+{
+	struct zfcp_adapter *adapter = act->adapter;
+	struct zfcp_fsf_req *req;
+
+	if (!act->fsf_req_id)
+		return;
+
+	spin_lock(&adapter->req_list->lock);
+	req = _zfcp_reqlist_find(adapter->req_list, act->fsf_req_id);
+	if (req && req->erp_action == act) {
+		if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
+				   ZFCP_STATUS_ERP_TIMEDOUT)) {
+			req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
+			zfcp_dbf_rec_run("erscf_1", act);
+			req->erp_action = NULL;
+		}
+		if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
+			zfcp_dbf_rec_run("erscf_2", act);
+		if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
+			act->fsf_req_id = 0;
+	} else
+		act->fsf_req_id = 0;
+	spin_unlock(&adapter->req_list->lock);
+}
+
+/**
+ * zfcp_erp_notify - Trigger ERP action.
+ * @erp_action: ERP action to continue.
+ * @set_mask: ERP action status flags to set.
+ */
+void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
+{
+	struct zfcp_adapter *adapter = erp_action->adapter;
+	unsigned long flags;
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	if (zfcp_erp_action_is_running(erp_action)) {
+		erp_action->status |= set_mask;
+		zfcp_erp_action_ready(erp_action);
+	}
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
+/**
+ * zfcp_erp_timeout_handler - Trigger ERP action from timed out ERP request
+ * @data: ERP action (from timer data)
+ */
+void zfcp_erp_timeout_handler(struct timer_list *t)
+{
+	struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
+	struct zfcp_erp_action *act = fsf_req->erp_action;
+
+	zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
+}
+
+static void zfcp_erp_memwait_handler(struct timer_list *t)
+{
+	struct zfcp_erp_action *act = from_timer(act, t, timer);
+
+	zfcp_erp_notify(act, 0);
+}
+
+static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
+{
+	timer_setup(&erp_action->timer, zfcp_erp_memwait_handler, 0);
+	erp_action->timer.expires = jiffies + HZ;
+	add_timer(&erp_action->timer);
+}
+
+static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
+				      int clear, char *id)
+{
+	struct zfcp_port *port;
+
+	read_lock(&adapter->port_list_lock);
+	list_for_each_entry(port, &adapter->port_list, list)
+		_zfcp_erp_port_reopen(port, clear, id);
+	read_unlock(&adapter->port_list_lock);
+}
+
+static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
+				     char *id)
+{
+	struct scsi_device *sdev;
+
+	spin_lock(port->adapter->scsi_host->host_lock);
+	__shost_for_each_device(sdev, port->adapter->scsi_host)
+		if (sdev_to_zfcp(sdev)->port == port)
+			_zfcp_erp_lun_reopen(sdev, clear, id, 0);
+	spin_unlock(port->adapter->scsi_host->host_lock);
+}
+
+static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
+{
+	switch (act->action) {
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		_zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1");
+		break;
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+		_zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2");
+		break;
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		_zfcp_erp_port_reopen(act->port, 0, "ersff_3");
+		break;
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		_zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", 0);
+		break;
+	}
+}
+
+static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
+{
+	switch (act->action) {
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		_zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1");
+		break;
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+		_zfcp_erp_port_reopen(act->port, 0, "ersfs_2");
+		break;
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		_zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3");
+		break;
+	}
+}
+
+static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
+{
+	unsigned long flags;
+
+	read_lock_irqsave(&adapter->erp_lock, flags);
+	if (list_empty(&adapter->erp_ready_head) &&
+	    list_empty(&adapter->erp_running_head)) {
+			atomic_andnot(ZFCP_STATUS_ADAPTER_ERP_PENDING,
+					  &adapter->status);
+			wake_up(&adapter->erp_done_wqh);
+	}
+	read_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
+static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
+{
+	struct zfcp_port *port;
+	port = zfcp_port_enqueue(adapter, adapter->peer_wwpn, 0,
+				 adapter->peer_d_id);
+	if (IS_ERR(port)) /* error or port already attached */
+		return;
+	_zfcp_erp_port_reopen(port, 0, "ereptp1");
+}
+
+static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
+{
+	int retries;
+	int sleep = 1;
+	struct zfcp_adapter *adapter = erp_action->adapter;
+
+	atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
+
+	for (retries = 7; retries; retries--) {
+		atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+				  &adapter->status);
+		write_lock_irq(&adapter->erp_lock);
+		zfcp_erp_action_to_running(erp_action);
+		write_unlock_irq(&adapter->erp_lock);
+		if (zfcp_fsf_exchange_config_data(erp_action)) {
+			atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+					  &adapter->status);
+			return ZFCP_ERP_FAILED;
+		}
+
+		wait_event(adapter->erp_ready_wq,
+			   !list_empty(&adapter->erp_ready_head));
+		if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
+			break;
+
+		if (!(atomic_read(&adapter->status) &
+		      ZFCP_STATUS_ADAPTER_HOST_CON_INIT))
+			break;
+
+		ssleep(sleep);
+		sleep *= 2;
+	}
+
+	atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+			  &adapter->status);
+
+	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
+		return ZFCP_ERP_FAILED;
+
+	if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
+		zfcp_erp_enqueue_ptp_port(adapter);
+
+	return ZFCP_ERP_SUCCEEDED;
+}
+
+static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
+{
+	int ret;
+	struct zfcp_adapter *adapter = act->adapter;
+
+	write_lock_irq(&adapter->erp_lock);
+	zfcp_erp_action_to_running(act);
+	write_unlock_irq(&adapter->erp_lock);
+
+	ret = zfcp_fsf_exchange_port_data(act);
+	if (ret == -EOPNOTSUPP)
+		return ZFCP_ERP_SUCCEEDED;
+	if (ret)
+		return ZFCP_ERP_FAILED;
+
+	zfcp_dbf_rec_run("erasox1", act);
+	wait_event(adapter->erp_ready_wq,
+		   !list_empty(&adapter->erp_ready_head));
+	zfcp_dbf_rec_run("erasox2", act);
+	if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
+		return ZFCP_ERP_FAILED;
+
+	return ZFCP_ERP_SUCCEEDED;
+}
+
+static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
+{
+	if (zfcp_erp_adapter_strat_fsf_xconf(act) == ZFCP_ERP_FAILED)
+		return ZFCP_ERP_FAILED;
+
+	if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
+		return ZFCP_ERP_FAILED;
+
+	if (mempool_resize(act->adapter->pool.sr_data,
+			   act->adapter->stat_read_buf_num))
+		return ZFCP_ERP_FAILED;
+
+	if (mempool_resize(act->adapter->pool.status_read_req,
+			   act->adapter->stat_read_buf_num))
+		return ZFCP_ERP_FAILED;
+
+	atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
+	if (zfcp_status_read_refill(act->adapter))
+		return ZFCP_ERP_FAILED;
+
+	return ZFCP_ERP_SUCCEEDED;
+}
+
+static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
+{
+	struct zfcp_adapter *adapter = act->adapter;
+
+	/* close queues to ensure that buffers are not accessed by adapter */
+	zfcp_qdio_close(adapter->qdio);
+	zfcp_fsf_req_dismiss_all(adapter);
+	adapter->fsf_req_seq_no = 0;
+	zfcp_fc_wka_ports_force_offline(adapter->gs);
+	/* all ports and LUNs are closed */
+	zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);
+
+	atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+			  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
+}
+
+static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
+{
+	struct zfcp_adapter *adapter = act->adapter;
+
+	if (zfcp_qdio_open(adapter->qdio)) {
+		atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+				  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
+				  &adapter->status);
+		return ZFCP_ERP_FAILED;
+	}
+
+	if (zfcp_erp_adapter_strategy_open_fsf(act)) {
+		zfcp_erp_adapter_strategy_close(act);
+		return ZFCP_ERP_FAILED;
+	}
+
+	atomic_or(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
+
+	return ZFCP_ERP_SUCCEEDED;
+}
+
+static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
+{
+	struct zfcp_adapter *adapter = act->adapter;
+
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN) {
+		zfcp_erp_adapter_strategy_close(act);
+		if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
+			return ZFCP_ERP_EXIT;
+	}
+
+	if (zfcp_erp_adapter_strategy_open(act)) {
+		ssleep(8);
+		return ZFCP_ERP_FAILED;
+	}
+
+	return ZFCP_ERP_SUCCEEDED;
+}
+
+static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
+{
+	int retval;
+
+	retval = zfcp_fsf_close_physical_port(act);
+	if (retval == -ENOMEM)
+		return ZFCP_ERP_NOMEM;
+	act->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING;
+	if (retval)
+		return ZFCP_ERP_FAILED;
+
+	return ZFCP_ERP_CONTINUES;
+}
+
+static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_port *port = erp_action->port;
+	int status = atomic_read(&port->status);
+
+	switch (erp_action->step) {
+	case ZFCP_ERP_STEP_UNINITIALIZED:
+		if ((status & ZFCP_STATUS_PORT_PHYS_OPEN) &&
+		    (status & ZFCP_STATUS_COMMON_OPEN))
+			return zfcp_erp_port_forced_strategy_close(erp_action);
+		else
+			return ZFCP_ERP_FAILED;
+
+	case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
+		if (!(status & ZFCP_STATUS_PORT_PHYS_OPEN))
+			return ZFCP_ERP_SUCCEEDED;
+	}
+	return ZFCP_ERP_FAILED;
+}
+
+static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
+{
+	int retval;
+
+	retval = zfcp_fsf_close_port(erp_action);
+	if (retval == -ENOMEM)
+		return ZFCP_ERP_NOMEM;
+	erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING;
+	if (retval)
+		return ZFCP_ERP_FAILED;
+	return ZFCP_ERP_CONTINUES;
+}
+
+static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
+{
+	int retval;
+
+	retval = zfcp_fsf_open_port(erp_action);
+	if (retval == -ENOMEM)
+		return ZFCP_ERP_NOMEM;
+	erp_action->step = ZFCP_ERP_STEP_PORT_OPENING;
+	if (retval)
+		return ZFCP_ERP_FAILED;
+	return ZFCP_ERP_CONTINUES;
+}
+
+static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
+{
+	struct zfcp_adapter *adapter = act->adapter;
+	struct zfcp_port *port = act->port;
+
+	if (port->wwpn != adapter->peer_wwpn) {
+		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
+		return ZFCP_ERP_FAILED;
+	}
+	port->d_id = adapter->peer_d_id;
+	return zfcp_erp_port_strategy_open_port(act);
+}
+
+static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
+{
+	struct zfcp_adapter *adapter = act->adapter;
+	struct zfcp_port *port = act->port;
+	int p_status = atomic_read(&port->status);
+
+	switch (act->step) {
+	case ZFCP_ERP_STEP_UNINITIALIZED:
+	case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
+	case ZFCP_ERP_STEP_PORT_CLOSING:
+		if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
+			return zfcp_erp_open_ptp_port(act);
+		if (!port->d_id) {
+			zfcp_fc_trigger_did_lookup(port);
+			return ZFCP_ERP_EXIT;
+		}
+		return zfcp_erp_port_strategy_open_port(act);
+
+	case ZFCP_ERP_STEP_PORT_OPENING:
+		/* D_ID might have changed during open */
+		if (p_status & ZFCP_STATUS_COMMON_OPEN) {
+			if (!port->d_id) {
+				zfcp_fc_trigger_did_lookup(port);
+				return ZFCP_ERP_EXIT;
+			}
+			return ZFCP_ERP_SUCCEEDED;
+		}
+		if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
+			port->d_id = 0;
+			return ZFCP_ERP_FAILED;
+		}
+		/* fall through otherwise */
+	}
+	return ZFCP_ERP_FAILED;
+}
+
+static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_port *port = erp_action->port;
+	int p_status = atomic_read(&port->status);
+
+	if ((p_status & ZFCP_STATUS_COMMON_NOESC) &&
+	    !(p_status & ZFCP_STATUS_COMMON_OPEN))
+		goto close_init_done;
+
+	switch (erp_action->step) {
+	case ZFCP_ERP_STEP_UNINITIALIZED:
+		if (p_status & ZFCP_STATUS_COMMON_OPEN)
+			return zfcp_erp_port_strategy_close(erp_action);
+		break;
+
+	case ZFCP_ERP_STEP_PORT_CLOSING:
+		if (p_status & ZFCP_STATUS_COMMON_OPEN)
+			return ZFCP_ERP_FAILED;
+		break;
+	}
+
+close_init_done:
+	if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
+		return ZFCP_ERP_EXIT;
+
+	return zfcp_erp_port_strategy_open_common(erp_action);
+}
+
+static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED,
+			  &zfcp_sdev->status);
+}
+
+static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action)
+{
+	int retval = zfcp_fsf_close_lun(erp_action);
+	if (retval == -ENOMEM)
+		return ZFCP_ERP_NOMEM;
+	erp_action->step = ZFCP_ERP_STEP_LUN_CLOSING;
+	if (retval)
+		return ZFCP_ERP_FAILED;
+	return ZFCP_ERP_CONTINUES;
+}
+
+static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action)
+{
+	int retval = zfcp_fsf_open_lun(erp_action);
+	if (retval == -ENOMEM)
+		return ZFCP_ERP_NOMEM;
+	erp_action->step = ZFCP_ERP_STEP_LUN_OPENING;
+	if (retval)
+		return  ZFCP_ERP_FAILED;
+	return ZFCP_ERP_CONTINUES;
+}
+
+static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
+{
+	struct scsi_device *sdev = erp_action->sdev;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	switch (erp_action->step) {
+	case ZFCP_ERP_STEP_UNINITIALIZED:
+		zfcp_erp_lun_strategy_clearstati(sdev);
+		if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
+			return zfcp_erp_lun_strategy_close(erp_action);
+		/* already closed, fall through */
+	case ZFCP_ERP_STEP_LUN_CLOSING:
+		if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
+			return ZFCP_ERP_FAILED;
+		if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
+			return ZFCP_ERP_EXIT;
+		return zfcp_erp_lun_strategy_open(erp_action);
+
+	case ZFCP_ERP_STEP_LUN_OPENING:
+		if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
+			return ZFCP_ERP_SUCCEEDED;
+	}
+	return ZFCP_ERP_FAILED;
+}
+
+static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	switch (result) {
+	case ZFCP_ERP_SUCCEEDED :
+		atomic_set(&zfcp_sdev->erp_counter, 0);
+		zfcp_erp_lun_unblock(sdev);
+		break;
+	case ZFCP_ERP_FAILED :
+		atomic_inc(&zfcp_sdev->erp_counter);
+		if (atomic_read(&zfcp_sdev->erp_counter) > ZFCP_MAX_ERPS) {
+			dev_err(&zfcp_sdev->port->adapter->ccw_device->dev,
+				"ERP failed for LUN 0x%016Lx on "
+				"port 0x%016Lx\n",
+				(unsigned long long)zfcp_scsi_dev_lun(sdev),
+				(unsigned long long)zfcp_sdev->port->wwpn);
+			zfcp_erp_set_lun_status(sdev,
+						ZFCP_STATUS_COMMON_ERP_FAILED);
+		}
+		break;
+	}
+
+	if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
+		zfcp_erp_lun_block(sdev, 0);
+		result = ZFCP_ERP_EXIT;
+	}
+	return result;
+}
+
+static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
+{
+	switch (result) {
+	case ZFCP_ERP_SUCCEEDED :
+		atomic_set(&port->erp_counter, 0);
+		zfcp_erp_port_unblock(port);
+		break;
+
+	case ZFCP_ERP_FAILED :
+		if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC) {
+			zfcp_erp_port_block(port, 0);
+			result = ZFCP_ERP_EXIT;
+		}
+		atomic_inc(&port->erp_counter);
+		if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS) {
+			dev_err(&port->adapter->ccw_device->dev,
+				"ERP failed for remote port 0x%016Lx\n",
+				(unsigned long long)port->wwpn);
+			zfcp_erp_set_port_status(port,
+					 ZFCP_STATUS_COMMON_ERP_FAILED);
+		}
+		break;
+	}
+
+	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
+		zfcp_erp_port_block(port, 0);
+		result = ZFCP_ERP_EXIT;
+	}
+	return result;
+}
+
+static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
+					   int result)
+{
+	switch (result) {
+	case ZFCP_ERP_SUCCEEDED :
+		atomic_set(&adapter->erp_counter, 0);
+		zfcp_erp_adapter_unblock(adapter);
+		break;
+
+	case ZFCP_ERP_FAILED :
+		atomic_inc(&adapter->erp_counter);
+		if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS) {
+			dev_err(&adapter->ccw_device->dev,
+				"ERP cannot recover an error "
+				"on the FCP device\n");
+			zfcp_erp_set_adapter_status(adapter,
+					    ZFCP_STATUS_COMMON_ERP_FAILED);
+		}
+		break;
+	}
+
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
+		zfcp_erp_adapter_block(adapter, 0);
+		result = ZFCP_ERP_EXIT;
+	}
+	return result;
+}
+
+static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action,
+					  int result)
+{
+	struct zfcp_adapter *adapter = erp_action->adapter;
+	struct zfcp_port *port = erp_action->port;
+	struct scsi_device *sdev = erp_action->sdev;
+
+	switch (erp_action->action) {
+
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		result = zfcp_erp_strategy_check_lun(sdev, result);
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		result = zfcp_erp_strategy_check_port(port, result);
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		result = zfcp_erp_strategy_check_adapter(adapter, result);
+		break;
+	}
+	return result;
+}
+
+static int zfcp_erp_strat_change_det(atomic_t *target_status, u32 erp_status)
+{
+	int status = atomic_read(target_status);
+
+	if ((status & ZFCP_STATUS_COMMON_RUNNING) &&
+	    (erp_status & ZFCP_STATUS_ERP_CLOSE_ONLY))
+		return 1; /* take it online */
+
+	if (!(status & ZFCP_STATUS_COMMON_RUNNING) &&
+	    !(erp_status & ZFCP_STATUS_ERP_CLOSE_ONLY))
+		return 1; /* take it offline */
+
+	return 0;
+}
+
+static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
+{
+	int action = act->action;
+	struct zfcp_adapter *adapter = act->adapter;
+	struct zfcp_port *port = act->port;
+	struct scsi_device *sdev = act->sdev;
+	struct zfcp_scsi_dev *zfcp_sdev;
+	u32 erp_status = act->status;
+
+	switch (action) {
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
+			_zfcp_erp_adapter_reopen(adapter,
+						 ZFCP_STATUS_COMMON_ERP_FAILED,
+						 "ersscg1");
+			return ZFCP_ERP_EXIT;
+		}
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		if (zfcp_erp_strat_change_det(&port->status, erp_status)) {
+			_zfcp_erp_port_reopen(port,
+					      ZFCP_STATUS_COMMON_ERP_FAILED,
+					      "ersscg2");
+			return ZFCP_ERP_EXIT;
+		}
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		zfcp_sdev = sdev_to_zfcp(sdev);
+		if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) {
+			_zfcp_erp_lun_reopen(sdev,
+					     ZFCP_STATUS_COMMON_ERP_FAILED,
+					     "ersscg3", 0);
+			return ZFCP_ERP_EXIT;
+		}
+		break;
+	}
+	return ret;
+}
+
+static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_adapter *adapter = erp_action->adapter;
+	struct zfcp_scsi_dev *zfcp_sdev;
+
+	adapter->erp_total_count--;
+	if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
+		adapter->erp_low_mem_count--;
+		erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
+	}
+
+	list_del(&erp_action->list);
+	zfcp_dbf_rec_run("eractd1", erp_action);
+
+	switch (erp_action->action) {
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
+		atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
+				  &zfcp_sdev->status);
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
+				  &erp_action->port->status);
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
+				  &erp_action->adapter->status);
+		break;
+	}
+}
+
+/**
+ * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery
+ * @port: zfcp_port whose fc_rport we should try to unblock
+ */
+static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
+{
+	unsigned long flags;
+	struct zfcp_adapter *adapter = port->adapter;
+	int port_status;
+	struct Scsi_Host *shost = adapter->scsi_host;
+	struct scsi_device *sdev;
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	port_status = atomic_read(&port->status);
+	if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED)    == 0 ||
+	    (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE |
+			    ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) {
+		/* new ERP of severity >= port triggered elsewhere meanwhile or
+		 * local link down (adapter erp_failed but not clear unblock)
+		 */
+		zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action);
+		write_unlock_irqrestore(&adapter->erp_lock, flags);
+		return;
+	}
+	spin_lock(shost->host_lock);
+	__shost_for_each_device(sdev, shost) {
+		struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
+		int lun_status;
+
+		if (zsdev->port != port)
+			continue;
+		/* LUN under port of interest */
+		lun_status = atomic_read(&zsdev->status);
+		if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
+			continue; /* unblock rport despite failed LUNs */
+		/* LUN recovery not given up yet [maybe follow-up pending] */
+		if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
+		    (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) {
+			/* LUN blocked:
+			 * not yet unblocked [LUN recovery pending]
+			 * or meanwhile blocked [new LUN recovery triggered]
+			 */
+			zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action);
+			spin_unlock(shost->host_lock);
+			write_unlock_irqrestore(&adapter->erp_lock, flags);
+			return;
+		}
+	}
+	/* now port has no child or all children have completed recovery,
+	 * and no ERP of severity >= port was meanwhile triggered elsewhere
+	 */
+	zfcp_scsi_schedule_rport_register(port);
+	spin_unlock(shost->host_lock);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
+static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
+{
+	struct zfcp_adapter *adapter = act->adapter;
+	struct zfcp_port *port = act->port;
+	struct scsi_device *sdev = act->sdev;
+
+	switch (act->action) {
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
+			scsi_device_put(sdev);
+		zfcp_erp_try_rport_unblock(port);
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		/* This switch case might also happen after a forced reopen
+		 * was successfully done and thus overwritten with a new
+		 * non-forced reopen at `ersfs_2'. In this case, we must not
+		 * do the clean-up of the non-forced version.
+		 */
+		if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
+			if (result == ZFCP_ERP_SUCCEEDED)
+				zfcp_erp_try_rport_unblock(port);
+		/* fall through */
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+		put_device(&port->dev);
+		break;
+
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		if (result == ZFCP_ERP_SUCCEEDED) {
+			register_service_level(&adapter->service_level);
+			zfcp_fc_conditional_port_scan(adapter);
+			queue_work(adapter->work_queue, &adapter->ns_up_work);
+		} else
+			unregister_service_level(&adapter->service_level);
+
+		kref_put(&adapter->ref, zfcp_adapter_release);
+		break;
+	}
+}
+
+static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
+{
+	switch (erp_action->action) {
+	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+		return zfcp_erp_adapter_strategy(erp_action);
+	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+		return zfcp_erp_port_forced_strategy(erp_action);
+	case ZFCP_ERP_ACTION_REOPEN_PORT:
+		return zfcp_erp_port_strategy(erp_action);
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		return zfcp_erp_lun_strategy(erp_action);
+	}
+	return ZFCP_ERP_FAILED;
+}
+
+static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
+{
+	int retval;
+	unsigned long flags;
+	struct zfcp_adapter *adapter = erp_action->adapter;
+
+	kref_get(&adapter->ref);
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	zfcp_erp_strategy_check_fsfreq(erp_action);
+
+	if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
+		zfcp_erp_action_dequeue(erp_action);
+		retval = ZFCP_ERP_DISMISSED;
+		goto unlock;
+	}
+
+	if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
+		retval = ZFCP_ERP_FAILED;
+		goto check_target;
+	}
+
+	zfcp_erp_action_to_running(erp_action);
+
+	/* no lock to allow for blocking operations */
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+	retval = zfcp_erp_strategy_do_action(erp_action);
+	write_lock_irqsave(&adapter->erp_lock, flags);
+
+	if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
+		retval = ZFCP_ERP_CONTINUES;
+
+	switch (retval) {
+	case ZFCP_ERP_NOMEM:
+		if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
+			++adapter->erp_low_mem_count;
+			erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
+		}
+		if (adapter->erp_total_count == adapter->erp_low_mem_count)
+			_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1");
+		else {
+			zfcp_erp_strategy_memwait(erp_action);
+			retval = ZFCP_ERP_CONTINUES;
+		}
+		goto unlock;
+
+	case ZFCP_ERP_CONTINUES:
+		if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
+			--adapter->erp_low_mem_count;
+			erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
+		}
+		goto unlock;
+	}
+
+check_target:
+	retval = zfcp_erp_strategy_check_target(erp_action, retval);
+	zfcp_erp_action_dequeue(erp_action);
+	retval = zfcp_erp_strategy_statechange(erp_action, retval);
+	if (retval == ZFCP_ERP_EXIT)
+		goto unlock;
+	if (retval == ZFCP_ERP_SUCCEEDED)
+		zfcp_erp_strategy_followup_success(erp_action);
+	if (retval == ZFCP_ERP_FAILED)
+		zfcp_erp_strategy_followup_failed(erp_action);
+
+ unlock:
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+
+	if (retval != ZFCP_ERP_CONTINUES)
+		zfcp_erp_action_cleanup(erp_action, retval);
+
+	kref_put(&adapter->ref, zfcp_adapter_release);
+	return retval;
+}
+
+static int zfcp_erp_thread(void *data)
+{
+	struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
+	struct list_head *next;
+	struct zfcp_erp_action *act;
+	unsigned long flags;
+
+	for (;;) {
+		wait_event_interruptible(adapter->erp_ready_wq,
+			   !list_empty(&adapter->erp_ready_head) ||
+			   kthread_should_stop());
+
+		if (kthread_should_stop())
+			break;
+
+		write_lock_irqsave(&adapter->erp_lock, flags);
+		next = adapter->erp_ready_head.next;
+		write_unlock_irqrestore(&adapter->erp_lock, flags);
+
+		if (next != &adapter->erp_ready_head) {
+			act = list_entry(next, struct zfcp_erp_action, list);
+
+			/* there is more to come after dismission, no notify */
+			if (zfcp_erp_strategy(act) != ZFCP_ERP_DISMISSED)
+				zfcp_erp_wakeup(adapter);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * zfcp_erp_thread_setup - Start ERP thread for adapter
+ * @adapter: Adapter to start the ERP thread for
+ *
+ * Returns 0 on success or error code from kernel_thread()
+ */
+int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
+{
+	struct task_struct *thread;
+
+	thread = kthread_run(zfcp_erp_thread, adapter, "zfcperp%s",
+			     dev_name(&adapter->ccw_device->dev));
+	if (IS_ERR(thread)) {
+		dev_err(&adapter->ccw_device->dev,
+			"Creating an ERP thread for the FCP device failed.\n");
+		return PTR_ERR(thread);
+	}
+
+	adapter->erp_thread = thread;
+	return 0;
+}
+
+/**
+ * zfcp_erp_thread_kill - Stop ERP thread.
+ * @adapter: Adapter where the ERP thread should be stopped.
+ *
+ * The caller of this routine ensures that the specified adapter has
+ * been shut down and that this operation has been completed. Thus,
+ * there are no pending erp_actions which would need to be handled
+ * here.
+ */
+void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
+{
+	kthread_stop(adapter->erp_thread);
+	adapter->erp_thread = NULL;
+	WARN_ON(!list_empty(&adapter->erp_ready_head));
+	WARN_ON(!list_empty(&adapter->erp_running_head));
+}
+
+/**
+ * zfcp_erp_wait - wait for completion of error recovery on an adapter
+ * @adapter: adapter for which to wait for completion of its error recovery
+ */
+void zfcp_erp_wait(struct zfcp_adapter *adapter)
+{
+	wait_event(adapter->erp_done_wqh,
+		   !(atomic_read(&adapter->status) &
+			ZFCP_STATUS_ADAPTER_ERP_PENDING));
+}
+
+/**
+ * zfcp_erp_set_adapter_status - set adapter status bits
+ * @adapter: adapter to change the status
+ * @mask: status bits to change
+ *
+ * Changes in common status bits are propagated to attached ports and LUNs.
+ */
+void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
+{
+	struct zfcp_port *port;
+	struct scsi_device *sdev;
+	unsigned long flags;
+	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+
+	atomic_or(mask, &adapter->status);
+
+	if (!common_mask)
+		return;
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list)
+		atomic_or(common_mask, &port->status);
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+	spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+	__shost_for_each_device(sdev, adapter->scsi_host)
+		atomic_or(common_mask, &sdev_to_zfcp(sdev)->status);
+	spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
+}
+
+/**
+ * zfcp_erp_clear_adapter_status - clear adapter status bits
+ * @adapter: adapter to change the status
+ * @mask: status bits to change
+ *
+ * Changes in common status bits are propagated to attached ports and LUNs.
+ */
+void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
+{
+	struct zfcp_port *port;
+	struct scsi_device *sdev;
+	unsigned long flags;
+	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+	u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
+
+	atomic_andnot(mask, &adapter->status);
+
+	if (!common_mask)
+		return;
+
+	if (clear_counter)
+		atomic_set(&adapter->erp_counter, 0);
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list) {
+		atomic_andnot(common_mask, &port->status);
+		if (clear_counter)
+			atomic_set(&port->erp_counter, 0);
+	}
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+	spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+	__shost_for_each_device(sdev, adapter->scsi_host) {
+		atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status);
+		if (clear_counter)
+			atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
+	}
+	spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
+}
+
+/**
+ * zfcp_erp_set_port_status - set port status bits
+ * @port: port to change the status
+ * @mask: status bits to change
+ *
+ * Changes in common status bits are propagated to attached LUNs.
+ */
+void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
+{
+	struct scsi_device *sdev;
+	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+	unsigned long flags;
+
+	atomic_or(mask, &port->status);
+
+	if (!common_mask)
+		return;
+
+	spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+	__shost_for_each_device(sdev, port->adapter->scsi_host)
+		if (sdev_to_zfcp(sdev)->port == port)
+			atomic_or(common_mask,
+					&sdev_to_zfcp(sdev)->status);
+	spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
+}
+
+/**
+ * zfcp_erp_clear_port_status - clear port status bits
+ * @port: adapter to change the status
+ * @mask: status bits to change
+ *
+ * Changes in common status bits are propagated to attached LUNs.
+ */
+void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
+{
+	struct scsi_device *sdev;
+	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+	u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
+	unsigned long flags;
+
+	atomic_andnot(mask, &port->status);
+
+	if (!common_mask)
+		return;
+
+	if (clear_counter)
+		atomic_set(&port->erp_counter, 0);
+
+	spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+	__shost_for_each_device(sdev, port->adapter->scsi_host)
+		if (sdev_to_zfcp(sdev)->port == port) {
+			atomic_andnot(common_mask,
+					  &sdev_to_zfcp(sdev)->status);
+			if (clear_counter)
+				atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
+		}
+	spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
+}
+
+/**
+ * zfcp_erp_set_lun_status - set lun status bits
+ * @sdev: SCSI device / lun to set the status bits
+ * @mask: status bits to change
+ */
+void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	atomic_or(mask, &zfcp_sdev->status);
+}
+
+/**
+ * zfcp_erp_clear_lun_status - clear lun status bits
+ * @sdev: SCSi device / lun to clear the status bits
+ * @mask: status bits to change
+ */
+void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	atomic_andnot(mask, &zfcp_sdev->status);
+
+	if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
+		atomic_set(&zfcp_sdev->erp_counter, 0);
+}
+
+/**
+ * zfcp_erp_adapter_reset_sync() - Really reopen adapter and wait.
+ * @adapter: Pointer to zfcp_adapter to reopen.
+ * @id: Trace tag string of length %ZFCP_DBF_TAG_LEN.
+ */
+void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id)
+{
+	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
+	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, id);
+	zfcp_erp_wait(adapter);
+}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
new file mode 100644
index 0000000..bd0c5a9
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * zfcp device driver
+ *
+ * External function declarations.
+ *
+ * Copyright IBM Corp. 2002, 2018
+ */
+
+#ifndef ZFCP_EXT_H
+#define ZFCP_EXT_H
+
+#include <linux/types.h>
+#include <scsi/fc/fc_els.h>
+#include "zfcp_def.h"
+#include "zfcp_fc.h"
+
+/* zfcp_aux.c */
+extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
+extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
+extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
+					   u32);
+extern void zfcp_sg_free_table(struct scatterlist *, int);
+extern int zfcp_sg_setup_table(struct scatterlist *, int);
+extern void zfcp_adapter_release(struct kref *);
+extern void zfcp_adapter_unregister(struct zfcp_adapter *);
+
+/* zfcp_ccw.c */
+extern struct ccw_driver zfcp_ccw_driver;
+extern struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *);
+extern void zfcp_ccw_adapter_put(struct zfcp_adapter *);
+
+/* zfcp_dbf.c */
+extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
+extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
+extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
+			      struct zfcp_port *, struct scsi_device *, u8, u8);
+extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
+				   struct zfcp_port *port,
+				   struct scsi_device *sdev, u8 want, u8 need);
+extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
+				 struct zfcp_erp_action *erp);
+extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
+extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
+extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
+extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
+extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev,
+				 struct scsi_cmnd *sc,
+				 struct zfcp_fsf_req *fsf);
+extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
+			     unsigned int scsi_id, int ret);
+
+/* zfcp_erp.c */
+extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
+extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
+extern void zfcp_erp_port_forced_no_port_dbf(char *id,
+					     struct zfcp_adapter *adapter,
+					     u64 port_name, u32 port_id);
+extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
+extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
+extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
+extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
+extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id);
+extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
+extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
+extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
+extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *);
+extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *);
+extern int  zfcp_erp_thread_setup(struct zfcp_adapter *);
+extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
+extern void zfcp_erp_wait(struct zfcp_adapter *);
+extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
+extern void zfcp_erp_timeout_handler(struct timer_list *t);
+extern void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id);
+
+/* zfcp_fc.c */
+extern struct kmem_cache *zfcp_fc_req_cache;
+extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
+				enum fc_host_event_code event_code, u32);
+extern void zfcp_fc_post_event(struct work_struct *);
+extern void zfcp_fc_scan_ports(struct work_struct *);
+extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
+extern void zfcp_fc_port_did_lookup(struct work_struct *);
+extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *);
+extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fc_els_flogi *);
+extern void zfcp_fc_test_link(struct zfcp_port *);
+extern void zfcp_fc_link_test_work(struct work_struct *);
+extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
+extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
+extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
+extern int zfcp_fc_exec_bsg_job(struct bsg_job *);
+extern int zfcp_fc_timeout_bsg_job(struct bsg_job *);
+extern void zfcp_fc_sym_name_update(struct work_struct *);
+extern unsigned int zfcp_fc_port_scan_backoff(void);
+extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
+extern void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *);
+
+/* zfcp_fsf.c */
+extern struct kmem_cache *zfcp_fsf_qtcb_cache;
+extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
+extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
+extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
+extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
+extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
+extern int zfcp_fsf_open_lun(struct zfcp_erp_action *);
+extern int zfcp_fsf_close_lun(struct zfcp_erp_action *);
+extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
+extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
+					      struct fsf_qtcb_bottom_config *);
+extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
+extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *,
+					    struct fsf_qtcb_bottom_port *);
+extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
+extern int zfcp_fsf_status_read(struct zfcp_qdio *);
+extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
+extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
+			    mempool_t *, unsigned int);
+extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
+			     struct zfcp_fsf_ct_els *, unsigned int);
+extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
+extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
+extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
+						   u8 tm_flags);
+extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
+extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
+
+/* zfcp_qdio.c */
+extern int zfcp_qdio_setup(struct zfcp_adapter *);
+extern void zfcp_qdio_destroy(struct zfcp_qdio *);
+extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
+extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
+extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
+				   struct scatterlist *);
+extern int zfcp_qdio_open(struct zfcp_qdio *);
+extern void zfcp_qdio_close(struct zfcp_qdio *);
+extern void zfcp_qdio_siosl(struct zfcp_adapter *);
+
+/* zfcp_scsi.c */
+extern struct scsi_transport_template *zfcp_scsi_transport_template;
+extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
+extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
+extern struct fc_function_template zfcp_transport_functions;
+extern void zfcp_scsi_rport_work(struct work_struct *);
+extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
+extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
+extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
+extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
+extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
+
+/* zfcp_sysfs.c */
+extern const struct attribute_group *zfcp_unit_attr_groups[];
+extern struct attribute_group zfcp_sysfs_adapter_attrs;
+extern const struct attribute_group *zfcp_port_attr_groups[];
+extern struct mutex zfcp_sysfs_port_units_mutex;
+extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
+extern struct device_attribute *zfcp_sysfs_shost_attrs[];
+
+/* zfcp_unit.c */
+extern int zfcp_unit_add(struct zfcp_port *, u64);
+extern int zfcp_unit_remove(struct zfcp_port *, u64);
+extern struct zfcp_unit *zfcp_unit_find(struct zfcp_port *, u64);
+extern struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit);
+extern void zfcp_unit_scsi_scan(struct zfcp_unit *);
+extern void zfcp_unit_queue_scsi_scan(struct zfcp_port *);
+extern unsigned int zfcp_unit_sdev_status(struct zfcp_unit *);
+
+#endif	/* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
new file mode 100644
index 0000000..f6c415d
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -0,0 +1,1049 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * zfcp device driver
+ *
+ * Fibre Channel related functions for the zfcp device driver.
+ *
+ * Copyright IBM Corp. 2008, 2017
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+#include <linux/random.h>
+#include <linux/bsg-lib.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/libfc.h>
+#include "zfcp_ext.h"
+#include "zfcp_fc.h"
+
+struct kmem_cache *zfcp_fc_req_cache;
+
+static u32 zfcp_fc_rscn_range_mask[] = {
+	[ELS_ADDR_FMT_PORT]		= 0xFFFFFF,
+	[ELS_ADDR_FMT_AREA]		= 0xFFFF00,
+	[ELS_ADDR_FMT_DOM]		= 0xFF0000,
+	[ELS_ADDR_FMT_FAB]		= 0x000000,
+};
+
+static bool no_auto_port_rescan;
+module_param(no_auto_port_rescan, bool, 0600);
+MODULE_PARM_DESC(no_auto_port_rescan,
+		 "no automatic port_rescan (default off)");
+
+static unsigned int port_scan_backoff = 500;
+module_param(port_scan_backoff, uint, 0600);
+MODULE_PARM_DESC(port_scan_backoff,
+	"upper limit of port scan random backoff in msecs (default 500)");
+
+static unsigned int port_scan_ratelimit = 60000;
+module_param(port_scan_ratelimit, uint, 0600);
+MODULE_PARM_DESC(port_scan_ratelimit,
+	"minimum interval between port scans in msecs (default 60000)");
+
+unsigned int zfcp_fc_port_scan_backoff(void)
+{
+	if (!port_scan_backoff)
+		return 0;
+	return get_random_int() % port_scan_backoff;
+}
+
+static void zfcp_fc_port_scan_time(struct zfcp_adapter *adapter)
+{
+	unsigned long interval = msecs_to_jiffies(port_scan_ratelimit);
+	unsigned long backoff = msecs_to_jiffies(zfcp_fc_port_scan_backoff());
+
+	adapter->next_port_scan = jiffies + interval + backoff;
+}
+
+static void zfcp_fc_port_scan(struct zfcp_adapter *adapter)
+{
+	unsigned long now = jiffies;
+	unsigned long next = adapter->next_port_scan;
+	unsigned long delay = 0, max;
+
+	/* delay only needed within waiting period */
+	if (time_before(now, next)) {
+		delay = next - now;
+		/* paranoia: never ever delay scans longer than specified */
+		max = msecs_to_jiffies(port_scan_ratelimit + port_scan_backoff);
+		delay = min(delay, max);
+	}
+
+	queue_delayed_work(adapter->work_queue, &adapter->scan_work, delay);
+}
+
+void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter)
+{
+	if (no_auto_port_rescan)
+		return;
+
+	zfcp_fc_port_scan(adapter);
+}
+
+void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
+{
+	if (!no_auto_port_rescan)
+		return;
+
+	zfcp_fc_port_scan(adapter);
+}
+
+/**
+ * zfcp_fc_post_event - post event to userspace via fc_transport
+ * @work: work struct with enqueued events
+ */
+void zfcp_fc_post_event(struct work_struct *work)
+{
+	struct zfcp_fc_event *event = NULL, *tmp = NULL;
+	LIST_HEAD(tmp_lh);
+	struct zfcp_fc_events *events = container_of(work,
+					struct zfcp_fc_events, work);
+	struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter,
+						events);
+
+	spin_lock_bh(&events->list_lock);
+	list_splice_init(&events->list, &tmp_lh);
+	spin_unlock_bh(&events->list_lock);
+
+	list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
+		fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
+				   event->code, event->data);
+		list_del(&event->list);
+		kfree(event);
+	}
+}
+
+/**
+ * zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context
+ * @adapter: The adapter where to enqueue the event
+ * @event_code: The event code (as defined in fc_host_event_code in
+ *		scsi_transport_fc.h)
+ * @event_data: The event data (e.g. n_port page in case of els)
+ */
+void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
+			   enum fc_host_event_code event_code, u32 event_data)
+{
+	struct zfcp_fc_event *event;
+
+	event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC);
+	if (!event)
+		return;
+
+	event->code = event_code;
+	event->data = event_data;
+
+	spin_lock(&adapter->events.list_lock);
+	list_add_tail(&event->list, &adapter->events.list);
+	spin_unlock(&adapter->events.list_lock);
+
+	queue_work(adapter->work_queue, &adapter->events.work);
+}
+
+static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
+{
+	if (mutex_lock_interruptible(&wka_port->mutex))
+		return -ERESTARTSYS;
+
+	if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
+	    wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
+		wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
+		if (zfcp_fsf_open_wka_port(wka_port))
+			wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+	}
+
+	mutex_unlock(&wka_port->mutex);
+
+	wait_event(wka_port->completion_wq,
+		   wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
+		   wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
+
+	if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
+		atomic_inc(&wka_port->refcount);
+		return 0;
+	}
+	return -EIO;
+}
+
+static void zfcp_fc_wka_port_offline(struct work_struct *work)
+{
+	struct delayed_work *dw = to_delayed_work(work);
+	struct zfcp_fc_wka_port *wka_port =
+			container_of(dw, struct zfcp_fc_wka_port, work);
+
+	mutex_lock(&wka_port->mutex);
+	if ((atomic_read(&wka_port->refcount) != 0) ||
+	    (wka_port->status != ZFCP_FC_WKA_PORT_ONLINE))
+		goto out;
+
+	wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
+	if (zfcp_fsf_close_wka_port(wka_port)) {
+		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+		wake_up(&wka_port->completion_wq);
+	}
+out:
+	mutex_unlock(&wka_port->mutex);
+}
+
+static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
+{
+	if (atomic_dec_return(&wka_port->refcount) != 0)
+		return;
+	/* wait 10 milliseconds, other reqs might pop in */
+	schedule_delayed_work(&wka_port->work, HZ / 100);
+}
+
+static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
+				  struct zfcp_adapter *adapter)
+{
+	init_waitqueue_head(&wka_port->completion_wq);
+
+	wka_port->adapter = adapter;
+	wka_port->d_id = d_id;
+
+	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+	atomic_set(&wka_port->refcount, 0);
+	mutex_init(&wka_port->mutex);
+	INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
+}
+
+static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka)
+{
+	cancel_delayed_work_sync(&wka->work);
+	mutex_lock(&wka->mutex);
+	wka->status = ZFCP_FC_WKA_PORT_OFFLINE;
+	mutex_unlock(&wka->mutex);
+}
+
+void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs)
+{
+	if (!gs)
+		return;
+	zfcp_fc_wka_port_force_offline(&gs->ms);
+	zfcp_fc_wka_port_force_offline(&gs->ts);
+	zfcp_fc_wka_port_force_offline(&gs->ds);
+	zfcp_fc_wka_port_force_offline(&gs->as);
+}
+
+static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
+				   struct fc_els_rscn_page *page)
+{
+	unsigned long flags;
+	struct zfcp_adapter *adapter = fsf_req->adapter;
+	struct zfcp_port *port;
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list) {
+		if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
+			zfcp_fc_test_link(port);
+		if (!port->d_id)
+			zfcp_erp_port_reopen(port,
+					     ZFCP_STATUS_COMMON_ERP_FAILED,
+					     "fcrscn1");
+	}
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+}
+
+static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
+{
+	struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
+	struct fc_els_rscn *head;
+	struct fc_els_rscn_page *page;
+	u16 i;
+	u16 no_entries;
+	unsigned int afmt;
+
+	head = (struct fc_els_rscn *) status_buffer->payload.data;
+	page = (struct fc_els_rscn_page *) head;
+
+	/* see FC-FS */
+	no_entries = be16_to_cpu(head->rscn_plen) /
+		sizeof(struct fc_els_rscn_page);
+
+	for (i = 1; i < no_entries; i++) {
+		/* skip head and start with 1st element */
+		page++;
+		afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
+		_zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
+				       page);
+		zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
+				      *(u32 *)page);
+	}
+	zfcp_fc_conditional_port_scan(fsf_req->adapter);
+}
+
+static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
+{
+	unsigned long flags;
+	struct zfcp_adapter *adapter = req->adapter;
+	struct zfcp_port *port;
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list)
+		if (port->wwpn == wwpn) {
+			zfcp_erp_port_forced_reopen(port, 0, "fciwwp1");
+			break;
+		}
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+}
+
+static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
+{
+	struct fsf_status_read_buffer *status_buffer;
+	struct fc_els_flogi *plogi;
+
+	status_buffer = (struct fsf_status_read_buffer *) req->data;
+	plogi = (struct fc_els_flogi *) status_buffer->payload.data;
+	zfcp_fc_incoming_wwpn(req, be64_to_cpu(plogi->fl_wwpn));
+}
+
+static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
+{
+	struct fsf_status_read_buffer *status_buffer =
+		(struct fsf_status_read_buffer *)req->data;
+	struct fc_els_logo *logo =
+		(struct fc_els_logo *) status_buffer->payload.data;
+
+	zfcp_fc_incoming_wwpn(req, be64_to_cpu(logo->fl_n_port_wwn));
+}
+
+/**
+ * zfcp_fc_incoming_els - handle incoming ELS
+ * @fsf_req - request which contains incoming ELS
+ */
+void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
+{
+	struct fsf_status_read_buffer *status_buffer =
+		(struct fsf_status_read_buffer *) fsf_req->data;
+	unsigned int els_type = status_buffer->payload.data[0];
+
+	zfcp_dbf_san_in_els("fciels1", fsf_req);
+	if (els_type == ELS_PLOGI)
+		zfcp_fc_incoming_plogi(fsf_req);
+	else if (els_type == ELS_LOGO)
+		zfcp_fc_incoming_logo(fsf_req);
+	else if (els_type == ELS_RSCN)
+		zfcp_fc_incoming_rscn(fsf_req);
+}
+
+static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
+{
+	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+	struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
+
+	if (ct_els->status)
+		return;
+	if (gid_pn_rsp->ct_hdr.ct_cmd != cpu_to_be16(FC_FS_ACC))
+		return;
+
+	/* looks like a valid d_id */
+	ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
+}
+
+static void zfcp_fc_complete(void *data)
+{
+	complete(data);
+}
+
+static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
+{
+	ct_hdr->ct_rev = FC_CT_REV;
+	ct_hdr->ct_fs_type = FC_FST_DIR;
+	ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
+	ct_hdr->ct_cmd = cpu_to_be16(cmd);
+	ct_hdr->ct_mr_size = cpu_to_be16(mr_size / 4);
+}
+
+static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
+				     struct zfcp_fc_req *fc_req)
+{
+	struct zfcp_adapter *adapter = port->adapter;
+	DECLARE_COMPLETION_ONSTACK(completion);
+	struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
+	struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
+	int ret;
+
+	/* setup parameters for send generic command */
+	fc_req->ct_els.port = port;
+	fc_req->ct_els.handler = zfcp_fc_complete;
+	fc_req->ct_els.handler_data = &completion;
+	fc_req->ct_els.req = &fc_req->sg_req;
+	fc_req->ct_els.resp = &fc_req->sg_rsp;
+	sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
+	sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));
+
+	zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
+			   FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
+	gid_pn_req->gid_pn.fn_wwpn = cpu_to_be64(port->wwpn);
+
+	ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
+			       adapter->pool.gid_pn_req,
+			       ZFCP_FC_CTELS_TMO);
+	if (!ret) {
+		wait_for_completion(&completion);
+		zfcp_fc_ns_gid_pn_eval(fc_req);
+	}
+	return ret;
+}
+
+/**
+ * zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
+ * @port: port where GID_PN request is needed
+ * return: -ENOMEM on error, 0 otherwise
+ */
+static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
+{
+	int ret;
+	struct zfcp_fc_req *fc_req;
+	struct zfcp_adapter *adapter = port->adapter;
+
+	fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
+	if (!fc_req)
+		return -ENOMEM;
+
+	memset(fc_req, 0, sizeof(*fc_req));
+
+	ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
+	if (ret)
+		goto out;
+
+	ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
+
+	zfcp_fc_wka_port_put(&adapter->gs->ds);
+out:
+	mempool_free(fc_req, adapter->pool.gid_pn);
+	return ret;
+}
+
+void zfcp_fc_port_did_lookup(struct work_struct *work)
+{
+	int ret;
+	struct zfcp_port *port = container_of(work, struct zfcp_port,
+					      gid_pn_work);
+
+	set_worker_desc("zgidpn%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
+	ret = zfcp_fc_ns_gid_pn(port);
+	if (ret) {
+		/* could not issue gid_pn for some reason */
+		zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1");
+		goto out;
+	}
+
+	if (!port->d_id) {
+		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
+		goto out;
+	}
+
+	zfcp_erp_port_reopen(port, 0, "fcgpn_3");
+out:
+	put_device(&port->dev);
+}
+
+/**
+ * zfcp_fc_trigger_did_lookup - trigger the d_id lookup using a GID_PN request
+ * @port: The zfcp_port to lookup the d_id for.
+ */
+void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
+{
+	get_device(&port->dev);
+	if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
+		put_device(&port->dev);
+}
+
+/**
+ * zfcp_fc_plogi_evaluate - evaluate PLOGI playload
+ * @port: zfcp_port structure
+ * @plogi: plogi payload
+ *
+ * Evaluate PLOGI playload and copy important fields into zfcp_port structure
+ */
+void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
+{
+	if (be64_to_cpu(plogi->fl_wwpn) != port->wwpn) {
+		port->d_id = 0;
+		dev_warn(&port->adapter->ccw_device->dev,
+			 "A port opened with WWPN 0x%016Lx returned data that "
+			 "identifies it as WWPN 0x%016Lx\n",
+			 (unsigned long long) port->wwpn,
+			 (unsigned long long) be64_to_cpu(plogi->fl_wwpn));
+		return;
+	}
+
+	port->wwnn = be64_to_cpu(plogi->fl_wwnn);
+	port->maxframe_size = be16_to_cpu(plogi->fl_csp.sp_bb_data);
+
+	if (plogi->fl_cssp[0].cp_class & cpu_to_be16(FC_CPC_VALID))
+		port->supported_classes |= FC_COS_CLASS1;
+	if (plogi->fl_cssp[1].cp_class & cpu_to_be16(FC_CPC_VALID))
+		port->supported_classes |= FC_COS_CLASS2;
+	if (plogi->fl_cssp[2].cp_class & cpu_to_be16(FC_CPC_VALID))
+		port->supported_classes |= FC_COS_CLASS3;
+	if (plogi->fl_cssp[3].cp_class & cpu_to_be16(FC_CPC_VALID))
+		port->supported_classes |= FC_COS_CLASS4;
+}
+
+static void zfcp_fc_adisc_handler(void *data)
+{
+	struct zfcp_fc_req *fc_req = data;
+	struct zfcp_port *port = fc_req->ct_els.port;
+	struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
+
+	if (fc_req->ct_els.status) {
+		/* request rejected or timed out */
+		zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
+					    "fcadh_1");
+		goto out;
+	}
+
+	if (!port->wwnn)
+		port->wwnn = be64_to_cpu(adisc_resp->adisc_wwnn);
+
+	if ((port->wwpn != be64_to_cpu(adisc_resp->adisc_wwpn)) ||
+	    !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
+		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
+				     "fcadh_2");
+		goto out;
+	}
+
+	/* port is good, unblock rport without going through erp */
+	zfcp_scsi_schedule_rport_register(port);
+ out:
+	atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+	put_device(&port->dev);
+	kmem_cache_free(zfcp_fc_req_cache, fc_req);
+}
+
+static int zfcp_fc_adisc(struct zfcp_port *port)
+{
+	struct zfcp_fc_req *fc_req;
+	struct zfcp_adapter *adapter = port->adapter;
+	struct Scsi_Host *shost = adapter->scsi_host;
+	int ret;
+
+	fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
+	if (!fc_req)
+		return -ENOMEM;
+
+	fc_req->ct_els.port = port;
+	fc_req->ct_els.req = &fc_req->sg_req;
+	fc_req->ct_els.resp = &fc_req->sg_rsp;
+	sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
+		    sizeof(struct fc_els_adisc));
+	sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
+		    sizeof(struct fc_els_adisc));
+
+	fc_req->ct_els.handler = zfcp_fc_adisc_handler;
+	fc_req->ct_els.handler_data = fc_req;
+
+	/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
+	   without FC-AL-2 capability, so we don't set it */
+	fc_req->u.adisc.req.adisc_wwpn = cpu_to_be64(fc_host_port_name(shost));
+	fc_req->u.adisc.req.adisc_wwnn = cpu_to_be64(fc_host_node_name(shost));
+	fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
+	hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
+
+	ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
+				ZFCP_FC_CTELS_TMO);
+	if (ret)
+		kmem_cache_free(zfcp_fc_req_cache, fc_req);
+
+	return ret;
+}
+
+void zfcp_fc_link_test_work(struct work_struct *work)
+{
+	struct zfcp_port *port =
+		container_of(work, struct zfcp_port, test_link_work);
+	int retval;
+
+	set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
+	get_device(&port->dev);
+	port->rport_task = RPORT_DEL;
+	zfcp_scsi_rport_work(&port->rport_work);
+
+	/* only issue one test command at one time per port */
+	if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
+		goto out;
+
+	atomic_or(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+
+	retval = zfcp_fc_adisc(port);
+	if (retval == 0)
+		return;
+
+	/* send of ADISC was not possible */
+	atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
+
+out:
+	put_device(&port->dev);
+}
+
+/**
+ * zfcp_fc_test_link - lightweight link test procedure
+ * @port: port to be tested
+ *
+ * Test status of a link to a remote port using the ELS command ADISC.
+ * If there is a problem with the remote port, error recovery steps
+ * will be triggered.
+ */
+void zfcp_fc_test_link(struct zfcp_port *port)
+{
+	get_device(&port->dev);
+	if (!queue_work(port->adapter->work_queue, &port->test_link_work))
+		put_device(&port->dev);
+}
+
+static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num)
+{
+	struct zfcp_fc_req *fc_req;
+
+	fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
+	if (!fc_req)
+		return NULL;
+
+	if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
+		kmem_cache_free(zfcp_fc_req_cache, fc_req);
+		return NULL;
+	}
+
+	sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
+		    sizeof(struct zfcp_fc_gpn_ft_req));
+
+	return fc_req;
+}
+
+static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req,
+			       struct zfcp_adapter *adapter, int max_bytes)
+{
+	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+	struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
+	DECLARE_COMPLETION_ONSTACK(completion);
+	int ret;
+
+	zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
+	req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
+
+	ct_els->handler = zfcp_fc_complete;
+	ct_els->handler_data = &completion;
+	ct_els->req = &fc_req->sg_req;
+	ct_els->resp = &fc_req->sg_rsp;
+
+	ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+			       ZFCP_FC_CTELS_TMO);
+	if (!ret)
+		wait_for_completion(&completion);
+	return ret;
+}
+
+static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
+{
+	if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
+		return;
+
+	atomic_andnot(ZFCP_STATUS_COMMON_NOESC, &port->status);
+
+	if ((port->supported_classes != 0) ||
+	    !list_empty(&port->unit_list))
+		return;
+
+	list_move_tail(&port->list, lh);
+}
+
+static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
+			       struct zfcp_adapter *adapter, int max_entries)
+{
+	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+	struct scatterlist *sg = &fc_req->sg_rsp;
+	struct fc_ct_hdr *hdr = sg_virt(sg);
+	struct fc_gpn_ft_resp *acc = sg_virt(sg);
+	struct zfcp_port *port, *tmp;
+	unsigned long flags;
+	LIST_HEAD(remove_lh);
+	u32 d_id;
+	int ret = 0, x, last = 0;
+
+	if (ct_els->status)
+		return -EIO;
+
+	if (hdr->ct_cmd != cpu_to_be16(FC_FS_ACC)) {
+		if (hdr->ct_reason == FC_FS_RJT_UNABL)
+			return -EAGAIN; /* might be a temporary condition */
+		return -EIO;
+	}
+
+	if (hdr->ct_mr_size) {
+		dev_warn(&adapter->ccw_device->dev,
+			 "The name server reported %d words residual data\n",
+			 hdr->ct_mr_size);
+		return -E2BIG;
+	}
+
+	/* first entry is the header */
+	for (x = 1; x < max_entries && !last; x++) {
+		if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
+			acc++;
+		else
+			acc = sg_virt(++sg);
+
+		last = acc->fp_flags & FC_NS_FID_LAST;
+		d_id = ntoh24(acc->fp_fid);
+
+		/* don't attach ports with a well known address */
+		if (d_id >= FC_FID_WELL_KNOWN_BASE)
+			continue;
+		/* skip the adapter's port and known remote ports */
+		if (be64_to_cpu(acc->fp_wwpn) ==
+		    fc_host_port_name(adapter->scsi_host))
+			continue;
+
+		port = zfcp_port_enqueue(adapter, be64_to_cpu(acc->fp_wwpn),
+					 ZFCP_STATUS_COMMON_NOESC, d_id);
+		if (!IS_ERR(port))
+			zfcp_erp_port_reopen(port, 0, "fcegpf1");
+		else if (PTR_ERR(port) != -EEXIST)
+			ret = PTR_ERR(port);
+	}
+
+	zfcp_erp_wait(adapter);
+	write_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry_safe(port, tmp, &adapter->port_list, list)
+		zfcp_fc_validate_port(port, &remove_lh);
+	write_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+	list_for_each_entry_safe(port, tmp, &remove_lh, list) {
+		zfcp_erp_port_shutdown(port, 0, "fcegpf2");
+		device_unregister(&port->dev);
+	}
+
+	return ret;
+}
+
+/**
+ * zfcp_fc_scan_ports - scan remote ports and attach new ports
+ * @work: reference to scheduled work
+ */
+void zfcp_fc_scan_ports(struct work_struct *work)
+{
+	struct delayed_work *dw = to_delayed_work(work);
+	struct zfcp_adapter *adapter = container_of(dw, struct zfcp_adapter,
+						    scan_work);
+	int ret, i;
+	struct zfcp_fc_req *fc_req;
+	int chain, max_entries, buf_num, max_bytes;
+
+	zfcp_fc_port_scan_time(adapter);
+
+	chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
+	buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
+	max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
+	max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE;
+
+	if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
+	    fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+		return;
+
+	if (zfcp_fc_wka_port_get(&adapter->gs->ds))
+		return;
+
+	fc_req = zfcp_fc_alloc_sg_env(buf_num);
+	if (!fc_req)
+		goto out;
+
+	for (i = 0; i < 3; i++) {
+		ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes);
+		if (!ret) {
+			ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries);
+			if (ret == -EAGAIN)
+				ssleep(1);
+			else
+				break;
+		}
+	}
+	zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
+	kmem_cache_free(zfcp_fc_req_cache, fc_req);
+out:
+	zfcp_fc_wka_port_put(&adapter->gs->ds);
+}
+
+static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
+			struct zfcp_fc_req *fc_req)
+{
+	DECLARE_COMPLETION_ONSTACK(completion);
+	char devno[] = "DEVNO:";
+	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+	struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
+	struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp;
+	int ret;
+
+	zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID,
+			   FC_SYMBOLIC_NAME_SIZE);
+	hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host));
+
+	sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req));
+	sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp));
+
+	ct_els->handler = zfcp_fc_complete;
+	ct_els->handler_data = &completion;
+	ct_els->req = &fc_req->sg_req;
+	ct_els->resp = &fc_req->sg_rsp;
+
+	ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+			       ZFCP_FC_CTELS_TMO);
+	if (ret)
+		return ret;
+
+	wait_for_completion(&completion);
+	if (ct_els->status)
+		return ct_els->status;
+
+	if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV &&
+	    !(strstr(gspn_rsp->gspn.fp_name, devno)))
+		snprintf(fc_host_symbolic_name(adapter->scsi_host),
+			 FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s",
+			 gspn_rsp->gspn.fp_name, devno,
+			 dev_name(&adapter->ccw_device->dev),
+			 init_utsname()->nodename);
+	else
+		strlcpy(fc_host_symbolic_name(adapter->scsi_host),
+			gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
+
+	return 0;
+}
+
+static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
+			 struct zfcp_fc_req *fc_req)
+{
+	DECLARE_COMPLETION_ONSTACK(completion);
+	struct Scsi_Host *shost = adapter->scsi_host;
+	struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+	struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
+	struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp;
+	int ret, len;
+
+	zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
+			   FC_SYMBOLIC_NAME_SIZE);
+	hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
+	len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
+		      FC_SYMBOLIC_NAME_SIZE);
+	rspn_req->rspn.fr_name_len = len;
+
+	sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
+	sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp));
+
+	ct_els->handler = zfcp_fc_complete;
+	ct_els->handler_data = &completion;
+	ct_els->req = &fc_req->sg_req;
+	ct_els->resp = &fc_req->sg_rsp;
+
+	ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+			       ZFCP_FC_CTELS_TMO);
+	if (!ret)
+		wait_for_completion(&completion);
+}
+
+/**
+ * zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
+ * @work: ns_up_work of the adapter where to update the symbolic port name
+ *
+ * Retrieve the current symbolic port name that may have been set by
+ * the hardware using the GSPN request and update the fc_host
+ * symbolic_name sysfs attribute. When running in NPIV mode (and hence
+ * the port name is unique for this system), update the symbolic port
+ * name to add Linux specific information and update the FC nameserver
+ * using the RSPN request.
+ */
+void zfcp_fc_sym_name_update(struct work_struct *work)
+{
+	struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
+						    ns_up_work);
+	int ret;
+	struct zfcp_fc_req *fc_req;
+
+	if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
+	    fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+		return;
+
+	fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
+	if (!fc_req)
+		return;
+
+	ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
+	if (ret)
+		goto out_free;
+
+	ret = zfcp_fc_gspn(adapter, fc_req);
+	if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+		goto out_ds_put;
+
+	memset(fc_req, 0, sizeof(*fc_req));
+	zfcp_fc_rspn(adapter, fc_req);
+
+out_ds_put:
+	zfcp_fc_wka_port_put(&adapter->gs->ds);
+out_free:
+	kmem_cache_free(zfcp_fc_req_cache, fc_req);
+}
+
+static void zfcp_fc_ct_els_job_handler(void *data)
+{
+	struct bsg_job *job = data;
+	struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
+	struct fc_bsg_reply *jr = job->reply;
+
+	jr->reply_payload_rcv_len = job->reply_payload.payload_len;
+	jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+	jr->result = zfcp_ct_els->status ? -EIO : 0;
+	bsg_job_done(job, jr->result, jr->reply_payload_rcv_len);
+}
+
+static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct bsg_job *job)
+{
+	u32 preamble_word1;
+	u8 gs_type;
+	struct zfcp_adapter *adapter;
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_rport *rport = fc_bsg_to_rport(job);
+	struct Scsi_Host *shost;
+
+	preamble_word1 = bsg_request->rqst_data.r_ct.preamble_word1;
+	gs_type = (preamble_word1 & 0xff000000) >> 24;
+
+	shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
+	adapter = (struct zfcp_adapter *) shost->hostdata[0];
+
+	switch (gs_type) {
+	case FC_FST_ALIAS:
+		return &adapter->gs->as;
+	case FC_FST_MGMT:
+		return &adapter->gs->ms;
+	case FC_FST_TIME:
+		return &adapter->gs->ts;
+		break;
+	case FC_FST_DIR:
+		return &adapter->gs->ds;
+		break;
+	default:
+		return NULL;
+	}
+}
+
+static void zfcp_fc_ct_job_handler(void *data)
+{
+	struct bsg_job *job = data;
+	struct zfcp_fc_wka_port *wka_port;
+
+	wka_port = zfcp_fc_job_wka_port(job);
+	zfcp_fc_wka_port_put(wka_port);
+
+	zfcp_fc_ct_els_job_handler(data);
+}
+
+static int zfcp_fc_exec_els_job(struct bsg_job *job,
+				struct zfcp_adapter *adapter)
+{
+	struct zfcp_fsf_ct_els *els = job->dd_data;
+	struct fc_rport *rport = fc_bsg_to_rport(job);
+	struct fc_bsg_request *bsg_request = job->request;
+	struct zfcp_port *port;
+	u32 d_id;
+
+	if (rport) {
+		port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
+		if (!port)
+			return -EINVAL;
+
+		d_id = port->d_id;
+		put_device(&port->dev);
+	} else
+		d_id = ntoh24(bsg_request->rqst_data.h_els.port_id);
+
+	els->handler = zfcp_fc_ct_els_job_handler;
+	return zfcp_fsf_send_els(adapter, d_id, els, job->timeout / HZ);
+}
+
+static int zfcp_fc_exec_ct_job(struct bsg_job *job,
+			       struct zfcp_adapter *adapter)
+{
+	int ret;
+	struct zfcp_fsf_ct_els *ct = job->dd_data;
+	struct zfcp_fc_wka_port *wka_port;
+
+	wka_port = zfcp_fc_job_wka_port(job);
+	if (!wka_port)
+		return -EINVAL;
+
+	ret = zfcp_fc_wka_port_get(wka_port);
+	if (ret)
+		return ret;
+
+	ct->handler = zfcp_fc_ct_job_handler;
+	ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->timeout / HZ);
+	if (ret)
+		zfcp_fc_wka_port_put(wka_port);
+
+	return ret;
+}
+
+int zfcp_fc_exec_bsg_job(struct bsg_job *job)
+{
+	struct Scsi_Host *shost;
+	struct zfcp_adapter *adapter;
+	struct zfcp_fsf_ct_els *ct_els = job->dd_data;
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_rport *rport = fc_bsg_to_rport(job);
+
+	shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
+	adapter = (struct zfcp_adapter *)shost->hostdata[0];
+
+	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
+		return -EINVAL;
+
+	ct_els->req = job->request_payload.sg_list;
+	ct_els->resp = job->reply_payload.sg_list;
+	ct_els->handler_data = job;
+
+	switch (bsg_request->msgcode) {
+	case FC_BSG_RPT_ELS:
+	case FC_BSG_HST_ELS_NOLOGIN:
+		return zfcp_fc_exec_els_job(job, adapter);
+	case FC_BSG_RPT_CT:
+	case FC_BSG_HST_CT:
+		return zfcp_fc_exec_ct_job(job, adapter);
+	default:
+		return -EINVAL;
+	}
+}
+
+int zfcp_fc_timeout_bsg_job(struct bsg_job *job)
+{
+	/* hardware tracks timeout, reset bsg timeout to not interfere */
+	return -EAGAIN;
+}
+
+int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
+{
+	struct zfcp_fc_wka_ports *wka_ports;
+
+	wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL);
+	if (!wka_ports)
+		return -ENOMEM;
+
+	adapter->gs = wka_ports;
+	zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter);
+	zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
+	zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
+	zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);
+
+	return 0;
+}
+
+void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter)
+{
+	kfree(adapter->gs);
+	adapter->gs = NULL;
+}
+
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
new file mode 100644
index 0000000..3cd7472
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * zfcp device driver
+ *
+ * Fibre Channel related definitions and inline functions for the zfcp
+ * device driver
+ *
+ * Copyright IBM Corp. 2009, 2017
+ */
+
+#ifndef ZFCP_FC_H
+#define ZFCP_FC_H
+
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fcp.h>
+#include <scsi/fc/fc_ns.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include "zfcp_fsf.h"
+
+#define ZFCP_FC_CT_SIZE_PAGE	  (PAGE_SIZE - sizeof(struct fc_ct_hdr))
+#define ZFCP_FC_GPN_FT_ENT_PAGE	  (ZFCP_FC_CT_SIZE_PAGE \
+					/ sizeof(struct fc_gpn_ft_resp))
+#define ZFCP_FC_GPN_FT_NUM_BUFS	  4 /* memory pages */
+
+#define ZFCP_FC_GPN_FT_MAX_SIZE	  (ZFCP_FC_GPN_FT_NUM_BUFS * PAGE_SIZE \
+					- sizeof(struct fc_ct_hdr))
+#define ZFCP_FC_GPN_FT_MAX_ENT	  (ZFCP_FC_GPN_FT_NUM_BUFS * \
+					(ZFCP_FC_GPN_FT_ENT_PAGE + 1))
+
+#define ZFCP_FC_CTELS_TMO	(2 * FC_DEF_R_A_TOV / 1000)
+
+/**
+ * struct zfcp_fc_event - FC HBAAPI event for internal queueing from irq context
+ * @code: Event code
+ * @data: Event data
+ * @list: list_head for zfcp_fc_events list
+ */
+struct zfcp_fc_event {
+	enum fc_host_event_code code;
+	u32 data;
+	struct list_head list;
+};
+
+/**
+ * struct zfcp_fc_events - Infrastructure for posting FC events from irq context
+ * @list: List for queueing of events from irq context to workqueue
+ * @list_lock: Lock for event list
+ * @work: work_struct for forwarding events in workqueue
+*/
+struct zfcp_fc_events {
+	struct list_head list;
+	spinlock_t list_lock;
+	struct work_struct work;
+};
+
+/**
+ * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request
+ * @ct_hdr: FC GS common transport header
+ * @gid_pn: GID_PN request
+ */
+struct zfcp_fc_gid_pn_req {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_ns_gid_pn	gid_pn;
+} __packed;
+
+/**
+ * struct zfcp_fc_gid_pn_rsp - container for ct header plus gid_pn response
+ * @ct_hdr: FC GS common transport header
+ * @gid_pn: GID_PN response
+ */
+struct zfcp_fc_gid_pn_rsp {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_gid_pn_resp	gid_pn;
+} __packed;
+
+/**
+ * struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
+ * @ct_hdr: FC GS common transport header
+ * @gpn_ft: GPN_FT request
+ */
+struct zfcp_fc_gpn_ft_req {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_ns_gid_ft	gpn_ft;
+} __packed;
+
+/**
+ * struct zfcp_fc_gspn_req - container for ct header plus GSPN_ID request
+ * @ct_hdr: FC GS common transport header
+ * @gspn: GSPN_ID request
+ */
+struct zfcp_fc_gspn_req {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_gid_pn_resp	gspn;
+} __packed;
+
+/**
+ * struct zfcp_fc_gspn_rsp - container for ct header plus GSPN_ID response
+ * @ct_hdr: FC GS common transport header
+ * @gspn: GSPN_ID response
+ * @name: The name string of the GSPN_ID response
+ */
+struct zfcp_fc_gspn_rsp {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_gspn_resp	gspn;
+	char			name[FC_SYMBOLIC_NAME_SIZE];
+} __packed;
+
+/**
+ * struct zfcp_fc_rspn_req - container for ct header plus RSPN_ID request
+ * @ct_hdr: FC GS common transport header
+ * @rspn: RSPN_ID request
+ * @name: The name string of the RSPN_ID request
+ */
+struct zfcp_fc_rspn_req {
+	struct fc_ct_hdr	ct_hdr;
+	struct fc_ns_rspn	rspn;
+	char			name[FC_SYMBOLIC_NAME_SIZE];
+} __packed;
+
+/**
+ * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp
+ * @ct_els: data required for issuing fsf command
+ * @sg_req: scatterlist entry for request data
+ * @sg_rsp: scatterlist entry for response data
+ * @u: request specific data
+ */
+struct zfcp_fc_req {
+	struct zfcp_fsf_ct_els				ct_els;
+	struct scatterlist				sg_req;
+	struct scatterlist				sg_rsp;
+	union {
+		struct {
+			struct fc_els_adisc		req;
+			struct fc_els_adisc		rsp;
+		} adisc;
+		struct {
+			struct zfcp_fc_gid_pn_req	req;
+			struct zfcp_fc_gid_pn_rsp	rsp;
+		} gid_pn;
+		struct {
+			struct scatterlist sg_rsp2[ZFCP_FC_GPN_FT_NUM_BUFS - 1];
+			struct zfcp_fc_gpn_ft_req	req;
+		} gpn_ft;
+		struct {
+			struct zfcp_fc_gspn_req		req;
+			struct zfcp_fc_gspn_rsp		rsp;
+		} gspn;
+		struct {
+			struct zfcp_fc_rspn_req		req;
+			struct fc_ct_hdr		rsp;
+		} rspn;
+	} u;
+};
+
+/**
+ * enum zfcp_fc_wka_status - FC WKA port status in zfcp
+ * @ZFCP_FC_WKA_PORT_OFFLINE: Port is closed and not in use
+ * @ZFCP_FC_WKA_PORT_CLOSING: The FSF "close port" request is pending
+ * @ZFCP_FC_WKA_PORT_OPENING: The FSF "open port" request is pending
+ * @ZFCP_FC_WKA_PORT_ONLINE: The port is open and the port handle is valid
+ */
+enum zfcp_fc_wka_status {
+	ZFCP_FC_WKA_PORT_OFFLINE,
+	ZFCP_FC_WKA_PORT_CLOSING,
+	ZFCP_FC_WKA_PORT_OPENING,
+	ZFCP_FC_WKA_PORT_ONLINE,
+};
+
+/**
+ * struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port
+ * @adapter: Pointer to adapter structure this WKA port belongs to
+ * @completion_wq: Wait for completion of open/close command
+ * @status: Current status of WKA port
+ * @refcount: Reference count to keep port open as long as it is in use
+ * @d_id: FC destination id or well-known-address
+ * @handle: FSF handle for the open WKA port
+ * @mutex: Mutex used during opening/closing state changes
+ * @work: For delaying the closing of the WKA port
+ */
+struct zfcp_fc_wka_port {
+	struct zfcp_adapter	*adapter;
+	wait_queue_head_t	completion_wq;
+	enum zfcp_fc_wka_status	status;
+	atomic_t		refcount;
+	u32			d_id;
+	u32			handle;
+	struct mutex		mutex;
+	struct delayed_work	work;
+};
+
+/**
+ * struct zfcp_fc_wka_ports - Data structures for FC generic services
+ * @ms: FC Management service
+ * @ts: FC time service
+ * @ds: FC directory service
+ * @as: FC alias service
+ */
+struct zfcp_fc_wka_ports {
+	struct zfcp_fc_wka_port ms;
+	struct zfcp_fc_wka_port ts;
+	struct zfcp_fc_wka_port ds;
+	struct zfcp_fc_wka_port as;
+};
+
+/**
+ * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
+ * @fcp: fcp_cmnd to setup
+ * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
+ */
+static inline
+void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
+{
+	u32 datalen;
+
+	int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
+
+	fcp->fc_pri_ta = FCP_PTA_SIMPLE;
+
+	if (scsi->sc_data_direction == DMA_FROM_DEVICE)
+		fcp->fc_flags |= FCP_CFL_RDDATA;
+	if (scsi->sc_data_direction == DMA_TO_DEVICE)
+		fcp->fc_flags |= FCP_CFL_WRDATA;
+
+	memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len);
+
+	datalen = scsi_bufflen(scsi);
+	fcp->fc_dl = cpu_to_be32(datalen);
+
+	if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1) {
+		datalen += datalen / scsi->device->sector_size * 8;
+		fcp->fc_dl = cpu_to_be32(datalen);
+	}
+}
+
+/**
+ * zfcp_fc_fcp_tm() - Setup FCP command as task management command.
+ * @fcp: Pointer to FCP_CMND IU to set up.
+ * @dev: Pointer to SCSI_device where to send the task management command.
+ * @tm_flags: Task management flags to setup tm command.
+ */
+static inline
+void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags)
+{
+	int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun);
+	fcp->fc_tm_flags = tm_flags;
+}
+
+/**
+ * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
+ * @fcp_rsp: FCP RSP IU to evaluate
+ * @scsi: SCSI command where to update status and sense buffer
+ */
+static inline
+void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
+			  struct scsi_cmnd *scsi)
+{
+	struct fcp_resp_rsp_info *rsp_info;
+	char *sense;
+	u32 sense_len, resid;
+	u8 rsp_flags;
+
+	set_msg_byte(scsi, COMMAND_COMPLETE);
+	scsi->result |= fcp_rsp->resp.fr_status;
+
+	rsp_flags = fcp_rsp->resp.fr_flags;
+
+	if (unlikely(rsp_flags & FCP_RSP_LEN_VAL)) {
+		rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
+		if (rsp_info->rsp_code == FCP_TMF_CMPL)
+			set_host_byte(scsi, DID_OK);
+		else {
+			set_host_byte(scsi, DID_ERROR);
+			return;
+		}
+	}
+
+	if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
+		sense = (char *) &fcp_rsp[1];
+		if (rsp_flags & FCP_RSP_LEN_VAL)
+			sense += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
+		sense_len = min_t(u32, be32_to_cpu(fcp_rsp->ext.fr_sns_len),
+				  SCSI_SENSE_BUFFERSIZE);
+		memcpy(scsi->sense_buffer, sense, sense_len);
+	}
+
+	if (unlikely(rsp_flags & FCP_RESID_UNDER)) {
+		resid = be32_to_cpu(fcp_rsp->ext.fr_resid);
+		scsi_set_resid(scsi, resid);
+		if (scsi_bufflen(scsi) - resid < scsi->underflow &&
+		     !(rsp_flags & FCP_SNS_LEN_VAL) &&
+		     fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+			set_host_byte(scsi, DID_ERROR);
+	} else if (unlikely(rsp_flags & FCP_RESID_OVER)) {
+		/* FCP_DL was not sufficient for SCSI data length */
+		if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+			set_host_byte(scsi, DID_ERROR);
+	}
+}
+
+#endif
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
new file mode 100644
index 0000000..3c86e27
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -0,0 +1,2418 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * zfcp device driver
+ *
+ * Implementation of FSF commands.
+ *
+ * Copyright IBM Corp. 2002, 2018
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/blktrace_api.h>
+#include <linux/slab.h>
+#include <scsi/fc/fc_els.h>
+#include "zfcp_ext.h"
+#include "zfcp_fc.h"
+#include "zfcp_dbf.h"
+#include "zfcp_qdio.h"
+#include "zfcp_reqlist.h"
+
+struct kmem_cache *zfcp_fsf_qtcb_cache;
+
+static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
+{
+	struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
+	struct zfcp_adapter *adapter = fsf_req->adapter;
+
+	zfcp_qdio_siosl(adapter);
+	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+				"fsrth_1");
+}
+
+static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
+				 unsigned long timeout)
+{
+	fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
+	fsf_req->timer.expires = jiffies + timeout;
+	add_timer(&fsf_req->timer);
+}
+
+static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
+{
+	BUG_ON(!fsf_req->erp_action);
+	fsf_req->timer.function = zfcp_erp_timeout_handler;
+	fsf_req->timer.expires = jiffies + 30 * HZ;
+	add_timer(&fsf_req->timer);
+}
+
+/* association between FSF command and FSF QTCB type */
+static u32 fsf_qtcb_type[] = {
+	[FSF_QTCB_FCP_CMND] =             FSF_IO_COMMAND,
+	[FSF_QTCB_ABORT_FCP_CMND] =       FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_OPEN_PORT_WITH_DID] =   FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_OPEN_LUN] =             FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_CLOSE_LUN] =            FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_CLOSE_PORT] =           FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_CLOSE_PHYSICAL_PORT] =  FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_SEND_ELS] =             FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_SEND_GENERIC] =         FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
+	[FSF_QTCB_EXCHANGE_PORT_DATA] =   FSF_PORT_COMMAND,
+	[FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
+	[FSF_QTCB_UPLOAD_CONTROL_FILE] =  FSF_SUPPORT_COMMAND
+};
+
+static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
+{
+	dev_err(&req->adapter->ccw_device->dev, "FCP device not "
+		"operational because of an unsupported FC class\n");
+	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
+	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+}
+
+/**
+ * zfcp_fsf_req_free - free memory used by fsf request
+ * @fsf_req: pointer to struct zfcp_fsf_req
+ */
+void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
+{
+	if (likely(req->pool)) {
+		if (likely(req->qtcb))
+			mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
+		mempool_free(req, req->pool);
+		return;
+	}
+
+	if (likely(req->qtcb))
+		kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
+	kfree(req);
+}
+
+static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
+{
+	unsigned long flags;
+	struct fsf_status_read_buffer *sr_buf = req->data;
+	struct zfcp_adapter *adapter = req->adapter;
+	struct zfcp_port *port;
+	int d_id = ntoh24(sr_buf->d_id);
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list)
+		if (port->d_id == d_id) {
+			zfcp_erp_port_reopen(port, 0, "fssrpc1");
+			break;
+		}
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+}
+
+static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
+					 struct fsf_link_down_info *link_down)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
+		return;
+
+	atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
+
+	zfcp_scsi_schedule_rports_block(adapter);
+
+	if (!link_down)
+		goto out;
+
+	switch (link_down->error_code) {
+	case FSF_PSQ_LINK_NO_LIGHT:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "There is no light signal from the local "
+			 "fibre channel cable\n");
+		break;
+	case FSF_PSQ_LINK_WRAP_PLUG:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "There is a wrap plug instead of a fibre "
+			 "channel cable\n");
+		break;
+	case FSF_PSQ_LINK_NO_FCP:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The adjacent fibre channel node does not "
+			 "support FCP\n");
+		break;
+	case FSF_PSQ_LINK_FIRMWARE_UPDATE:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The FCP device is suspended because of a "
+			 "firmware update\n");
+		break;
+	case FSF_PSQ_LINK_INVALID_WWPN:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The FCP device detected a WWPN that is "
+			 "duplicate or not valid\n");
+		break;
+	case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The fibre channel fabric does not support NPIV\n");
+		break;
+	case FSF_PSQ_LINK_NO_FCP_RESOURCES:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The FCP adapter cannot support more NPIV ports\n");
+		break;
+	case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The adjacent switch cannot support "
+			 "more NPIV ports\n");
+		break;
+	case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The FCP adapter could not log in to the "
+			 "fibre channel fabric\n");
+		break;
+	case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The WWPN assignment file on the FCP adapter "
+			 "has been damaged\n");
+		break;
+	case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The mode table on the FCP adapter "
+			 "has been damaged\n");
+		break;
+	case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "All NPIV ports on the FCP adapter have "
+			 "been assigned\n");
+		break;
+	default:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "The link between the FCP adapter and "
+			 "the FC fabric is down\n");
+	}
+out:
+	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
+}
+
+static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
+{
+	struct fsf_status_read_buffer *sr_buf = req->data;
+	struct fsf_link_down_info *ldi =
+		(struct fsf_link_down_info *) &sr_buf->payload;
+
+	switch (sr_buf->status_subtype) {
+	case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
+	case FSF_STATUS_READ_SUB_FDISC_FAILED:
+		zfcp_fsf_link_down_info_eval(req, ldi);
+		break;
+	case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
+		zfcp_fsf_link_down_info_eval(req, NULL);
+	}
+}
+
+static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+	struct fsf_status_read_buffer *sr_buf = req->data;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
+		zfcp_dbf_hba_fsf_uss("fssrh_1", req);
+		mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
+		zfcp_fsf_req_free(req);
+		return;
+	}
+
+	zfcp_dbf_hba_fsf_uss("fssrh_4", req);
+
+	switch (sr_buf->status_type) {
+	case FSF_STATUS_READ_PORT_CLOSED:
+		zfcp_fsf_status_read_port_closed(req);
+		break;
+	case FSF_STATUS_READ_INCOMING_ELS:
+		zfcp_fc_incoming_els(req);
+		break;
+	case FSF_STATUS_READ_SENSE_DATA_AVAIL:
+		break;
+	case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
+		dev_warn(&adapter->ccw_device->dev,
+			 "The error threshold for checksum statistics "
+			 "has been exceeded\n");
+		zfcp_dbf_hba_bit_err("fssrh_3", req);
+		break;
+	case FSF_STATUS_READ_LINK_DOWN:
+		zfcp_fsf_status_read_link_down(req);
+		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
+		break;
+	case FSF_STATUS_READ_LINK_UP:
+		dev_info(&adapter->ccw_device->dev,
+			 "The local link has been restored\n");
+		/* All ports should be marked as ready to run again */
+		zfcp_erp_set_adapter_status(adapter,
+					    ZFCP_STATUS_COMMON_RUNNING);
+		zfcp_erp_adapter_reopen(adapter,
+					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
+					ZFCP_STATUS_COMMON_ERP_FAILED,
+					"fssrh_2");
+		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
+
+		break;
+	case FSF_STATUS_READ_NOTIFICATION_LOST:
+		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
+			zfcp_fc_conditional_port_scan(adapter);
+		break;
+	case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
+		adapter->adapter_features = sr_buf->payload.word[0];
+		break;
+	}
+
+	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
+	zfcp_fsf_req_free(req);
+
+	atomic_inc(&adapter->stat_miss);
+	queue_work(adapter->work_queue, &adapter->stat_work);
+}
+
+static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
+{
+	switch (req->qtcb->header.fsf_status_qual.word[0]) {
+	case FSF_SQ_FCP_RSP_AVAILABLE:
+	case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+	case FSF_SQ_NO_RETRY_POSSIBLE:
+	case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+		return;
+	case FSF_SQ_COMMAND_ABORTED:
+		break;
+	case FSF_SQ_NO_RECOM:
+		dev_err(&req->adapter->ccw_device->dev,
+			"The FCP adapter reported a problem "
+			"that cannot be recovered\n");
+		zfcp_qdio_siosl(req->adapter);
+		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
+		break;
+	}
+	/* all non-return stats set FSFREQ_ERROR*/
+	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+}
+
+static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
+{
+	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
+		return;
+
+	switch (req->qtcb->header.fsf_status) {
+	case FSF_UNKNOWN_COMMAND:
+		dev_err(&req->adapter->ccw_device->dev,
+			"The FCP adapter does not recognize the command 0x%x\n",
+			req->qtcb->header.fsf_command);
+		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		zfcp_fsf_fsfstatus_qual_eval(req);
+		break;
+	}
+}
+
+static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+	struct fsf_qtcb *qtcb = req->qtcb;
+	union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
+
+	zfcp_dbf_hba_fsf_response(req);
+
+	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		return;
+	}
+
+	switch (qtcb->prefix.prot_status) {
+	case FSF_PROT_GOOD:
+	case FSF_PROT_FSF_STATUS_PRESENTED:
+		return;
+	case FSF_PROT_QTCB_VERSION_ERROR:
+		dev_err(&adapter->ccw_device->dev,
+			"QTCB version 0x%x not supported by FCP adapter "
+			"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
+			psq->word[0], psq->word[1]);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
+		break;
+	case FSF_PROT_ERROR_STATE:
+	case FSF_PROT_SEQ_NUMB_ERROR:
+		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_PROT_UNSUPP_QTCB_TYPE:
+		dev_err(&adapter->ccw_device->dev,
+			"The QTCB type is not supported by the FCP adapter\n");
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
+		break;
+	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
+		atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+				&adapter->status);
+		break;
+	case FSF_PROT_DUPLICATE_REQUEST_ID:
+		dev_err(&adapter->ccw_device->dev,
+			"0x%Lx is an ambiguous request identifier\n",
+			(unsigned long long)qtcb->bottom.support.req_handle);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
+		break;
+	case FSF_PROT_LINK_DOWN:
+		zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
+		/* go through reopen to flush pending requests */
+		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
+		break;
+	case FSF_PROT_REEST_QUEUE:
+		/* All ports should be marked as ready to run again */
+		zfcp_erp_set_adapter_status(adapter,
+					    ZFCP_STATUS_COMMON_RUNNING);
+		zfcp_erp_adapter_reopen(adapter,
+					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
+					ZFCP_STATUS_COMMON_ERP_FAILED,
+					"fspse_8");
+		break;
+	default:
+		dev_err(&adapter->ccw_device->dev,
+			"0x%x is not a valid transfer protocol status\n",
+			qtcb->prefix.prot_status);
+		zfcp_qdio_siosl(adapter);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
+	}
+	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+}
+
+/**
+ * zfcp_fsf_req_complete - process completion of a FSF request
+ * @fsf_req: The FSF request that has been completed.
+ *
+ * When a request has been completed either from the FCP adapter,
+ * or it has been dismissed due to a queue shutdown, this function
+ * is called to process the completion status and trigger further
+ * events related to the FSF request.
+ */
+static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
+{
+	if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
+		zfcp_fsf_status_read_handler(req);
+		return;
+	}
+
+	del_timer(&req->timer);
+	zfcp_fsf_protstatus_eval(req);
+	zfcp_fsf_fsfstatus_eval(req);
+	req->handler(req);
+
+	if (req->erp_action)
+		zfcp_erp_notify(req->erp_action, 0);
+
+	if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
+		zfcp_fsf_req_free(req);
+	else
+		complete(&req->completion);
+}
+
+/**
+ * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
+ * @adapter: pointer to struct zfcp_adapter
+ *
+ * Never ever call this without shutting down the adapter first.
+ * Otherwise the adapter would continue using and corrupting s390 storage.
+ * Included BUG_ON() call to ensure this is done.
+ * ERP is supposed to be the only user of this function.
+ */
+void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
+{
+	struct zfcp_fsf_req *req, *tmp;
+	LIST_HEAD(remove_queue);
+
+	BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
+	zfcp_reqlist_move(adapter->req_list, &remove_queue);
+
+	list_for_each_entry_safe(req, tmp, &remove_queue, list) {
+		list_del(&req->list);
+		req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
+		zfcp_fsf_req_complete(req);
+	}
+}
+
+#define ZFCP_FSF_PORTSPEED_1GBIT	(1 <<  0)
+#define ZFCP_FSF_PORTSPEED_2GBIT	(1 <<  1)
+#define ZFCP_FSF_PORTSPEED_4GBIT	(1 <<  2)
+#define ZFCP_FSF_PORTSPEED_10GBIT	(1 <<  3)
+#define ZFCP_FSF_PORTSPEED_8GBIT	(1 <<  4)
+#define ZFCP_FSF_PORTSPEED_16GBIT	(1 <<  5)
+#define ZFCP_FSF_PORTSPEED_32GBIT	(1 <<  6)
+#define ZFCP_FSF_PORTSPEED_64GBIT	(1 <<  7)
+#define ZFCP_FSF_PORTSPEED_128GBIT	(1 <<  8)
+#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
+
+static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
+{
+	u32 fdmi_speed = 0;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
+		fdmi_speed |= FC_PORTSPEED_1GBIT;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
+		fdmi_speed |= FC_PORTSPEED_2GBIT;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
+		fdmi_speed |= FC_PORTSPEED_4GBIT;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
+		fdmi_speed |= FC_PORTSPEED_10GBIT;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
+		fdmi_speed |= FC_PORTSPEED_8GBIT;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
+		fdmi_speed |= FC_PORTSPEED_16GBIT;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT)
+		fdmi_speed |= FC_PORTSPEED_32GBIT;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT)
+		fdmi_speed |= FC_PORTSPEED_64GBIT;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT)
+		fdmi_speed |= FC_PORTSPEED_128GBIT;
+	if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
+		fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
+	return fdmi_speed;
+}
+
+static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
+{
+	struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
+	struct zfcp_adapter *adapter = req->adapter;
+	struct Scsi_Host *shost = adapter->scsi_host;
+	struct fc_els_flogi *nsp, *plogi;
+
+	/* adjust pointers for missing command code */
+	nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
+					- sizeof(u32));
+	plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
+					- sizeof(u32));
+
+	if (req->data)
+		memcpy(req->data, bottom, sizeof(*bottom));
+
+	fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
+	fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
+	fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
+
+	adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
+	adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
+					 (u16)FSF_STATUS_READS_RECOM);
+
+	if (fc_host_permanent_port_name(shost) == -1)
+		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
+
+	zfcp_scsi_set_prot(adapter);
+
+	/* no error return above here, otherwise must fix call chains */
+	/* do not evaluate invalid fields */
+	if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
+		return 0;
+
+	fc_host_port_id(shost) = ntoh24(bottom->s_id);
+	fc_host_speed(shost) =
+		zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
+
+	adapter->hydra_version = bottom->adapter_type;
+
+	switch (bottom->fc_topology) {
+	case FSF_TOPO_P2P:
+		adapter->peer_d_id = ntoh24(bottom->peer_d_id);
+		adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
+		adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
+		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+		break;
+	case FSF_TOPO_FABRIC:
+		if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
+			fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+		else
+			fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+		break;
+	case FSF_TOPO_AL:
+		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+		/* fall through */
+	default:
+		dev_err(&adapter->ccw_device->dev,
+			"Unknown or unsupported arbitrated loop "
+			"fibre channel topology detected\n");
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+	struct fsf_qtcb *qtcb = req->qtcb;
+	struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
+	struct Scsi_Host *shost = adapter->scsi_host;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		return;
+
+	adapter->fsf_lic_version = bottom->lic_version;
+	adapter->adapter_features = bottom->adapter_features;
+	adapter->connection_features = bottom->connection_features;
+	adapter->peer_wwpn = 0;
+	adapter->peer_wwnn = 0;
+	adapter->peer_d_id = 0;
+
+	switch (qtcb->header.fsf_status) {
+	case FSF_GOOD:
+		if (zfcp_fsf_exchange_config_evaluate(req))
+			return;
+
+		if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
+			dev_err(&adapter->ccw_device->dev,
+				"FCP adapter maximum QTCB size (%d bytes) "
+				"is too small\n",
+				bottom->max_qtcb_size);
+			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
+			return;
+		}
+		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+				&adapter->status);
+		break;
+	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+		fc_host_node_name(shost) = 0;
+		fc_host_port_name(shost) = 0;
+		fc_host_port_id(shost) = 0;
+		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+		fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+		adapter->hydra_version = 0;
+
+		/* avoids adapter shutdown to be able to recognize
+		 * events such as LINK UP */
+		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+				&adapter->status);
+		zfcp_fsf_link_down_info_eval(req,
+			&qtcb->header.fsf_status_qual.link_down_info);
+		if (zfcp_fsf_exchange_config_evaluate(req))
+			return;
+		break;
+	default:
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
+		return;
+	}
+
+	if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+		adapter->hardware_version = bottom->hardware_version;
+		memcpy(fc_host_serial_number(shost), bottom->serial_number,
+		       min(FC_SERIAL_NUMBER_SIZE, 17));
+		EBCASC(fc_host_serial_number(shost),
+		       min(FC_SERIAL_NUMBER_SIZE, 17));
+	}
+
+	if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
+		dev_err(&adapter->ccw_device->dev,
+			"The FCP adapter only supports newer "
+			"control block versions\n");
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
+		return;
+	}
+	if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
+		dev_err(&adapter->ccw_device->dev,
+			"The FCP adapter only supports older "
+			"control block versions\n");
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
+	}
+}
+
+static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+	struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
+	struct Scsi_Host *shost = adapter->scsi_host;
+
+	if (req->data)
+		memcpy(req->data, bottom, sizeof(*bottom));
+
+	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
+		fc_host_permanent_port_name(shost) = bottom->wwpn;
+	} else
+		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
+	fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
+	fc_host_supported_speeds(shost) =
+		zfcp_fsf_convert_portspeed(bottom->supported_speed);
+	memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
+	       FC_FC4_LIST_SIZE);
+	memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
+	       FC_FC4_LIST_SIZE);
+}
+
+static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
+{
+	struct fsf_qtcb *qtcb = req->qtcb;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		return;
+
+	switch (qtcb->header.fsf_status) {
+	case FSF_GOOD:
+		zfcp_fsf_exchange_port_evaluate(req);
+		break;
+	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+		zfcp_fsf_exchange_port_evaluate(req);
+		zfcp_fsf_link_down_info_eval(req,
+			&qtcb->header.fsf_status_qual.link_down_info);
+		break;
+	}
+}
+
+static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
+{
+	struct zfcp_fsf_req *req;
+
+	if (likely(pool))
+		req = mempool_alloc(pool, GFP_ATOMIC);
+	else
+		req = kmalloc(sizeof(*req), GFP_ATOMIC);
+
+	if (unlikely(!req))
+		return NULL;
+
+	memset(req, 0, sizeof(*req));
+	req->pool = pool;
+	return req;
+}
+
+static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool)
+{
+	struct fsf_qtcb *qtcb;
+
+	if (likely(pool))
+		qtcb = mempool_alloc(pool, GFP_ATOMIC);
+	else
+		qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
+
+	if (unlikely(!qtcb))
+		return NULL;
+
+	memset(qtcb, 0, sizeof(*qtcb));
+	return qtcb;
+}
+
+static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
+						u32 fsf_cmd, u8 sbtype,
+						mempool_t *pool)
+{
+	struct zfcp_adapter *adapter = qdio->adapter;
+	struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
+
+	if (unlikely(!req))
+		return ERR_PTR(-ENOMEM);
+
+	if (adapter->req_no == 0)
+		adapter->req_no++;
+
+	INIT_LIST_HEAD(&req->list);
+	timer_setup(&req->timer, NULL, 0);
+	init_completion(&req->completion);
+
+	req->adapter = adapter;
+	req->fsf_command = fsf_cmd;
+	req->req_id = adapter->req_no;
+
+	if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
+		if (likely(pool))
+			req->qtcb = zfcp_fsf_qtcb_alloc(
+				adapter->pool.qtcb_pool);
+		else
+			req->qtcb = zfcp_fsf_qtcb_alloc(NULL);
+
+		if (unlikely(!req->qtcb)) {
+			zfcp_fsf_req_free(req);
+			return ERR_PTR(-ENOMEM);
+		}
+
+		req->seq_no = adapter->fsf_req_seq_no;
+		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
+		req->qtcb->prefix.req_id = req->req_id;
+		req->qtcb->prefix.ulp_info = 26;
+		req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
+		req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
+		req->qtcb->header.req_handle = req->req_id;
+		req->qtcb->header.fsf_command = req->fsf_command;
+	}
+
+	zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
+			   req->qtcb, sizeof(struct fsf_qtcb));
+
+	return req;
+}
+
+static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+	struct zfcp_qdio *qdio = adapter->qdio;
+	int with_qtcb = (req->qtcb != NULL);
+	int req_id = req->req_id;
+
+	zfcp_reqlist_add(adapter->req_list, req);
+
+	req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
+	req->issued = get_tod_clock();
+	if (zfcp_qdio_send(qdio, &req->qdio_req)) {
+		del_timer(&req->timer);
+		/* lookup request again, list might have changed */
+		zfcp_reqlist_find_rm(adapter->req_list, req_id);
+		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
+		return -EIO;
+	}
+
+	/* Don't increase for unsolicited status */
+	if (with_qtcb)
+		adapter->fsf_req_seq_no++;
+	adapter->req_no++;
+
+	return 0;
+}
+
+/**
+ * zfcp_fsf_status_read - send status read request
+ * @adapter: pointer to struct zfcp_adapter
+ * @req_flags: request flags
+ * Returns: 0 on success, ERROR otherwise
+ */
+int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
+{
+	struct zfcp_adapter *adapter = qdio->adapter;
+	struct zfcp_fsf_req *req;
+	struct fsf_status_read_buffer *sr_buf;
+	struct page *page;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
+				  SBAL_SFLAGS0_TYPE_STATUS,
+				  adapter->pool.status_read_req);
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
+	if (!page) {
+		retval = -ENOMEM;
+		goto failed_buf;
+	}
+	sr_buf = page_address(page);
+	memset(sr_buf, 0, sizeof(*sr_buf));
+	req->data = sr_buf;
+
+	zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	retval = zfcp_fsf_req_send(req);
+	if (retval)
+		goto failed_req_send;
+
+	goto out;
+
+failed_req_send:
+	req->data = NULL;
+	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
+failed_buf:
+	zfcp_dbf_hba_fsf_uss("fssr__1", req);
+	zfcp_fsf_req_free(req);
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
+{
+	struct scsi_device *sdev = req->data;
+	struct zfcp_scsi_dev *zfcp_sdev;
+	union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		return;
+
+	zfcp_sdev = sdev_to_zfcp(sdev);
+
+	switch (req->qtcb->header.fsf_status) {
+	case FSF_PORT_HANDLE_NOT_VALID:
+		if (fsq->word[0] == fsq->word[1]) {
+			zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
+						"fsafch1");
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		}
+		break;
+	case FSF_LUN_HANDLE_NOT_VALID:
+		if (fsq->word[0] == fsq->word[1]) {
+			zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		}
+		break;
+	case FSF_FCP_COMMAND_DOES_NOT_EXIST:
+		req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
+		break;
+	case FSF_PORT_BOXED:
+		zfcp_erp_set_port_status(zfcp_sdev->port,
+					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
+		zfcp_erp_port_reopen(zfcp_sdev->port,
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_LUN_BOXED:
+		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
+		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
+				    "fsafch4");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+                break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		switch (fsq->word[0]) {
+		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+			zfcp_fc_test_link(zfcp_sdev->port);
+			/* fall through */
+		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+			break;
+		}
+		break;
+	case FSF_GOOD:
+		req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
+		break;
+	}
+}
+
+/**
+ * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
+ * @scmnd: The SCSI command to abort
+ * Returns: pointer to struct zfcp_fsf_req
+ */
+
+struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
+{
+	struct zfcp_fsf_req *req = NULL;
+	struct scsi_device *sdev = scmnd->device;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
+	unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.scsi_abort);
+	if (IS_ERR(req)) {
+		req = NULL;
+		goto out;
+	}
+
+	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
+		       ZFCP_STATUS_COMMON_UNBLOCKED)))
+		goto out_error_free;
+
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->data = sdev;
+	req->handler = zfcp_fsf_abort_fcp_command_handler;
+	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
+	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
+	req->qtcb->bottom.support.req_handle = (u64) old_req_id;
+
+	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
+	if (!zfcp_fsf_req_send(req))
+		goto out;
+
+out_error_free:
+	zfcp_fsf_req_free(req);
+	req = NULL;
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return req;
+}
+
+static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+	struct zfcp_fsf_ct_els *ct = req->data;
+	struct fsf_qtcb_header *header = &req->qtcb->header;
+
+	ct->status = -EINVAL;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		goto skip_fsfstatus;
+
+	switch (header->fsf_status) {
+        case FSF_GOOD:
+		ct->status = 0;
+		zfcp_dbf_san_res("fsscth2", req);
+		break;
+        case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+		zfcp_fsf_class_not_supp(req);
+		break;
+        case FSF_ADAPTER_STATUS_AVAILABLE:
+                switch (header->fsf_status_qual.word[0]){
+                case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+                case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+			break;
+                }
+                break;
+        case FSF_PORT_BOXED:
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_PORT_HANDLE_NOT_VALID:
+		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
+		/* fall through */
+	case FSF_GENERIC_COMMAND_REJECTED:
+	case FSF_PAYLOAD_SIZE_MISMATCH:
+	case FSF_REQUEST_SIZE_TOO_LARGE:
+	case FSF_RESPONSE_SIZE_TOO_LARGE:
+	case FSF_SBAL_MISMATCH:
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	}
+
+skip_fsfstatus:
+	if (ct->handler)
+		ct->handler(ct->handler_data);
+}
+
+static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
+					    struct zfcp_qdio_req *q_req,
+					    struct scatterlist *sg_req,
+					    struct scatterlist *sg_resp)
+{
+	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
+	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
+	zfcp_qdio_set_sbale_last(qdio, q_req);
+}
+
+static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
+				       struct scatterlist *sg_req,
+				       struct scatterlist *sg_resp)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+	struct zfcp_qdio *qdio = adapter->qdio;
+	struct fsf_qtcb *qtcb = req->qtcb;
+	u32 feat = adapter->adapter_features;
+
+	if (zfcp_adapter_multi_buffer_active(adapter)) {
+		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
+			return -EIO;
+		qtcb->bottom.support.req_buf_length =
+			zfcp_qdio_real_bytes(sg_req);
+		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
+			return -EIO;
+		qtcb->bottom.support.resp_buf_length =
+			zfcp_qdio_real_bytes(sg_resp);
+
+		zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req));
+		zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+		zfcp_qdio_set_scount(qdio, &req->qdio_req);
+		return 0;
+	}
+
+	/* use single, unchained SBAL if it can hold the request */
+	if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
+		zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
+						sg_req, sg_resp);
+		return 0;
+	}
+
+	if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
+		return -EOPNOTSUPP;
+
+	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
+		return -EIO;
+
+	qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
+
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+	zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
+
+	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
+		return -EIO;
+
+	qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
+
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	return 0;
+}
+
+static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
+				 struct scatterlist *sg_req,
+				 struct scatterlist *sg_resp,
+				 unsigned int timeout)
+{
+	int ret;
+
+	ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
+	if (ret)
+		return ret;
+
+	/* common settings for ct/gs and els requests */
+	if (timeout > 255)
+		timeout = 255; /* max value accepted by hardware */
+	req->qtcb->bottom.support.service_class = FSF_CLASS_3;
+	req->qtcb->bottom.support.timeout = timeout;
+	zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
+
+	return 0;
+}
+
+/**
+ * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
+ * @ct: pointer to struct zfcp_send_ct with data for request
+ * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
+ */
+int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
+		     struct zfcp_fsf_ct_els *ct, mempool_t *pool,
+		     unsigned int timeout)
+{
+	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
+	struct zfcp_fsf_req *req;
+	int ret = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
+				  SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
+
+	if (IS_ERR(req)) {
+		ret = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
+	if (ret)
+		goto failed_send;
+
+	req->handler = zfcp_fsf_send_ct_handler;
+	req->qtcb->header.port_handle = wka_port->handle;
+	ct->d_id = wka_port->d_id;
+	req->data = ct;
+
+	zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
+
+	ret = zfcp_fsf_req_send(req);
+	if (ret)
+		goto failed_send;
+
+	goto out;
+
+failed_send:
+	zfcp_fsf_req_free(req);
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return ret;
+}
+
+static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_fsf_ct_els *send_els = req->data;
+	struct fsf_qtcb_header *header = &req->qtcb->header;
+
+	send_els->status = -EINVAL;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		goto skip_fsfstatus;
+
+	switch (header->fsf_status) {
+	case FSF_GOOD:
+		send_els->status = 0;
+		zfcp_dbf_san_res("fsselh1", req);
+		break;
+	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+		zfcp_fsf_class_not_supp(req);
+		break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		switch (header->fsf_status_qual.word[0]){
+		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+		case FSF_SQ_RETRY_IF_POSSIBLE:
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+			break;
+		}
+		break;
+	case FSF_ELS_COMMAND_REJECTED:
+	case FSF_PAYLOAD_SIZE_MISMATCH:
+	case FSF_REQUEST_SIZE_TOO_LARGE:
+	case FSF_RESPONSE_SIZE_TOO_LARGE:
+		break;
+	case FSF_SBAL_MISMATCH:
+		/* should never occur, avoided in zfcp_fsf_send_els */
+		/* fall through */
+	default:
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	}
+skip_fsfstatus:
+	if (send_els->handler)
+		send_els->handler(send_els->handler_data);
+}
+
+/**
+ * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
+ * @els: pointer to struct zfcp_send_els with data for the command
+ */
+int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
+		      struct zfcp_fsf_ct_els *els, unsigned int timeout)
+{
+	struct zfcp_fsf_req *req;
+	struct zfcp_qdio *qdio = adapter->qdio;
+	int ret = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
+				  SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
+
+	if (IS_ERR(req)) {
+		ret = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+
+	if (!zfcp_adapter_multi_buffer_active(adapter))
+		zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
+
+	ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
+
+	if (ret)
+		goto failed_send;
+
+	hton24(req->qtcb->bottom.support.d_id, d_id);
+	req->handler = zfcp_fsf_send_els_handler;
+	els->d_id = d_id;
+	req->data = els;
+
+	zfcp_dbf_san_req("fssels1", req, d_id);
+
+	ret = zfcp_fsf_req_send(req);
+	if (ret)
+		goto failed_send;
+
+	goto out;
+
+failed_send:
+	zfcp_fsf_req_free(req);
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return ret;
+}
+
+int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_fsf_req *req;
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->qtcb->bottom.config.feature_selection =
+			FSF_FEATURE_NOTIFICATION_LOST |
+			FSF_FEATURE_UPDATE_ALERT;
+	req->erp_action = erp_action;
+	req->handler = zfcp_fsf_exchange_config_data_handler;
+	erp_action->fsf_req_id = req->req_id;
+
+	zfcp_fsf_start_erp_timer(req);
+	retval = zfcp_fsf_req_send(req);
+	if (retval) {
+		zfcp_fsf_req_free(req);
+		erp_action->fsf_req_id = 0;
+	}
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
+				       struct fsf_qtcb_bottom_config *data)
+{
+	struct zfcp_fsf_req *req = NULL;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out_unlock;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+				  SBAL_SFLAGS0_TYPE_READ, NULL);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out_unlock;
+	}
+
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+	req->handler = zfcp_fsf_exchange_config_data_handler;
+
+	req->qtcb->bottom.config.feature_selection =
+			FSF_FEATURE_NOTIFICATION_LOST |
+			FSF_FEATURE_UPDATE_ALERT;
+
+	if (data)
+		req->data = data;
+
+	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+	retval = zfcp_fsf_req_send(req);
+	spin_unlock_irq(&qdio->req_q_lock);
+	if (!retval)
+		wait_for_completion(&req->completion);
+
+	zfcp_fsf_req_free(req);
+	return retval;
+
+out_unlock:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+/**
+ * zfcp_fsf_exchange_port_data - request information about local port
+ * @erp_action: ERP action for the adapter for which port data is requested
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
+	struct zfcp_fsf_req *req;
+	int retval = -EIO;
+
+	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
+		return -EOPNOTSUPP;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->handler = zfcp_fsf_exchange_port_data_handler;
+	req->erp_action = erp_action;
+	erp_action->fsf_req_id = req->req_id;
+
+	zfcp_fsf_start_erp_timer(req);
+	retval = zfcp_fsf_req_send(req);
+	if (retval) {
+		zfcp_fsf_req_free(req);
+		erp_action->fsf_req_id = 0;
+	}
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+/**
+ * zfcp_fsf_exchange_port_data_sync - request information about local port
+ * @qdio: pointer to struct zfcp_qdio
+ * @data: pointer to struct fsf_qtcb_bottom_port
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
+				     struct fsf_qtcb_bottom_port *data)
+{
+	struct zfcp_fsf_req *req = NULL;
+	int retval = -EIO;
+
+	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
+		return -EOPNOTSUPP;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out_unlock;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+				  SBAL_SFLAGS0_TYPE_READ, NULL);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out_unlock;
+	}
+
+	if (data)
+		req->data = data;
+
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->handler = zfcp_fsf_exchange_port_data_handler;
+	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+	retval = zfcp_fsf_req_send(req);
+	spin_unlock_irq(&qdio->req_q_lock);
+
+	if (!retval)
+		wait_for_completion(&req->completion);
+
+	zfcp_fsf_req_free(req);
+
+	return retval;
+
+out_unlock:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_port *port = req->data;
+	struct fsf_qtcb_header *header = &req->qtcb->header;
+	struct fc_els_flogi *plogi;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		goto out;
+
+	switch (header->fsf_status) {
+	case FSF_PORT_ALREADY_OPEN:
+		break;
+	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "Not enough FCP adapter resources to open "
+			 "remote port 0x%016Lx\n",
+			 (unsigned long long)port->wwpn);
+		zfcp_erp_set_port_status(port,
+					 ZFCP_STATUS_COMMON_ERP_FAILED);
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		switch (header->fsf_status_qual.word[0]) {
+		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+			/* no zfcp_fc_test_link() with failed open port */
+			/* fall through */
+		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+		case FSF_SQ_NO_RETRY_POSSIBLE:
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+			break;
+		}
+		break;
+	case FSF_GOOD:
+		port->handle = header->port_handle;
+		atomic_or(ZFCP_STATUS_COMMON_OPEN |
+				ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+		atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
+		                  &port->status);
+		/* check whether D_ID has changed during open */
+		/*
+		 * FIXME: This check is not airtight, as the FCP channel does
+		 * not monitor closures of target port connections caused on
+		 * the remote side. Thus, they might miss out on invalidating
+		 * locally cached WWPNs (and other N_Port parameters) of gone
+		 * target ports. So, our heroic attempt to make things safe
+		 * could be undermined by 'open port' response data tagged with
+		 * obsolete WWPNs. Another reason to monitor potential
+		 * connection closures ourself at least (by interpreting
+		 * incoming ELS' and unsolicited status). It just crosses my
+		 * mind that one should be able to cross-check by means of
+		 * another GID_PN straight after a port has been opened.
+		 * Alternately, an ADISC/PDISC ELS should suffice, as well.
+		 */
+		plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
+		if (req->qtcb->bottom.support.els1_length >=
+		    FSF_PLOGI_MIN_LEN)
+				zfcp_fc_plogi_evaluate(port, plogi);
+		break;
+	case FSF_UNKNOWN_OP_SUBTYPE:
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	}
+
+out:
+	put_device(&port->dev);
+}
+
+/**
+ * zfcp_fsf_open_port - create and send open port request
+ * @erp_action: pointer to struct zfcp_erp_action
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
+	struct zfcp_port *port = erp_action->port;
+	struct zfcp_fsf_req *req;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->handler = zfcp_fsf_open_port_handler;
+	hton24(req->qtcb->bottom.support.d_id, port->d_id);
+	req->data = port;
+	req->erp_action = erp_action;
+	erp_action->fsf_req_id = req->req_id;
+	get_device(&port->dev);
+
+	zfcp_fsf_start_erp_timer(req);
+	retval = zfcp_fsf_req_send(req);
+	if (retval) {
+		zfcp_fsf_req_free(req);
+		erp_action->fsf_req_id = 0;
+		put_device(&port->dev);
+	}
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_port *port = req->data;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		return;
+
+	switch (req->qtcb->header.fsf_status) {
+	case FSF_PORT_HANDLE_NOT_VALID:
+		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		break;
+	case FSF_GOOD:
+		zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
+		break;
+	}
+}
+
+/**
+ * zfcp_fsf_close_port - create and send close port request
+ * @erp_action: pointer to struct zfcp_erp_action
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
+	struct zfcp_fsf_req *req;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->handler = zfcp_fsf_close_port_handler;
+	req->data = erp_action->port;
+	req->erp_action = erp_action;
+	req->qtcb->header.port_handle = erp_action->port->handle;
+	erp_action->fsf_req_id = req->req_id;
+
+	zfcp_fsf_start_erp_timer(req);
+	retval = zfcp_fsf_req_send(req);
+	if (retval) {
+		zfcp_fsf_req_free(req);
+		erp_action->fsf_req_id = 0;
+	}
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_fc_wka_port *wka_port = req->data;
+	struct fsf_qtcb_header *header = &req->qtcb->header;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
+		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+		goto out;
+	}
+
+	switch (header->fsf_status) {
+	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
+		dev_warn(&req->adapter->ccw_device->dev,
+			 "Opening WKA port 0x%x failed\n", wka_port->d_id);
+		/* fall through */
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+		break;
+	case FSF_GOOD:
+		wka_port->handle = header->port_handle;
+		/* fall through */
+	case FSF_PORT_ALREADY_OPEN:
+		wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
+	}
+out:
+	wake_up(&wka_port->completion_wq);
+}
+
+/**
+ * zfcp_fsf_open_wka_port - create and send open wka-port request
+ * @wka_port: pointer to struct zfcp_fc_wka_port
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
+{
+	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
+	struct zfcp_fsf_req *req;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->handler = zfcp_fsf_open_wka_port_handler;
+	hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
+	req->data = wka_port;
+
+	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+	retval = zfcp_fsf_req_send(req);
+	if (retval)
+		zfcp_fsf_req_free(req);
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	if (!retval)
+		zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
+	return retval;
+}
+
+static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_fc_wka_port *wka_port = req->data;
+
+	if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
+	}
+
+	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+	wake_up(&wka_port->completion_wq);
+}
+
+/**
+ * zfcp_fsf_close_wka_port - create and send close wka port request
+ * @wka_port: WKA port to open
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
+{
+	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
+	struct zfcp_fsf_req *req;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->handler = zfcp_fsf_close_wka_port_handler;
+	req->data = wka_port;
+	req->qtcb->header.port_handle = wka_port->handle;
+
+	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+	retval = zfcp_fsf_req_send(req);
+	if (retval)
+		zfcp_fsf_req_free(req);
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	if (!retval)
+		zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
+	return retval;
+}
+
+static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_port *port = req->data;
+	struct fsf_qtcb_header *header = &req->qtcb->header;
+	struct scsi_device *sdev;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		return;
+
+	switch (header->fsf_status) {
+	case FSF_PORT_HANDLE_NOT_VALID:
+		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_PORT_BOXED:
+		/* can't use generic zfcp_erp_modify_port_status because
+		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
+		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+		shost_for_each_device(sdev, port->adapter->scsi_host)
+			if (sdev_to_zfcp(sdev)->port == port)
+				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
+						  &sdev_to_zfcp(sdev)->status);
+		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
+		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
+				     "fscpph2");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		switch (header->fsf_status_qual.word[0]) {
+		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+			/* fall through */
+		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+			break;
+		}
+		break;
+	case FSF_GOOD:
+		/* can't use generic zfcp_erp_modify_port_status because
+		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
+		 */
+		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+		shost_for_each_device(sdev, port->adapter->scsi_host)
+			if (sdev_to_zfcp(sdev)->port == port)
+				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
+						  &sdev_to_zfcp(sdev)->status);
+		break;
+	}
+}
+
+/**
+ * zfcp_fsf_close_physical_port - close physical port
+ * @erp_action: pointer to struct zfcp_erp_action
+ * Returns: 0 on success
+ */
+int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
+	struct zfcp_fsf_req *req;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->data = erp_action->port;
+	req->qtcb->header.port_handle = erp_action->port->handle;
+	req->erp_action = erp_action;
+	req->handler = zfcp_fsf_close_physical_port_handler;
+	erp_action->fsf_req_id = req->req_id;
+
+	zfcp_fsf_start_erp_timer(req);
+	retval = zfcp_fsf_req_send(req);
+	if (retval) {
+		zfcp_fsf_req_free(req);
+		erp_action->fsf_req_id = 0;
+	}
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
+{
+	struct zfcp_adapter *adapter = req->adapter;
+	struct scsi_device *sdev = req->data;
+	struct zfcp_scsi_dev *zfcp_sdev;
+	struct fsf_qtcb_header *header = &req->qtcb->header;
+	union fsf_status_qual *qual = &header->fsf_status_qual;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		return;
+
+	zfcp_sdev = sdev_to_zfcp(sdev);
+
+	atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
+			  ZFCP_STATUS_COMMON_ACCESS_BOXED,
+			  &zfcp_sdev->status);
+
+	switch (header->fsf_status) {
+
+	case FSF_PORT_HANDLE_NOT_VALID:
+		zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
+		/* fall through */
+	case FSF_LUN_ALREADY_OPEN:
+		break;
+	case FSF_PORT_BOXED:
+		zfcp_erp_set_port_status(zfcp_sdev->port,
+					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
+		zfcp_erp_port_reopen(zfcp_sdev->port,
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_LUN_SHARING_VIOLATION:
+		if (qual->word[0])
+			dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
+				 "LUN 0x%Lx on port 0x%Lx is already in "
+				 "use by CSS%d, MIF Image ID %x\n",
+				 zfcp_scsi_dev_lun(sdev),
+				 (unsigned long long)zfcp_sdev->port->wwpn,
+				 qual->fsf_queue_designator.cssid,
+				 qual->fsf_queue_designator.hla);
+		zfcp_erp_set_lun_status(sdev,
+					ZFCP_STATUS_COMMON_ERP_FAILED |
+					ZFCP_STATUS_COMMON_ACCESS_DENIED);
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
+		dev_warn(&adapter->ccw_device->dev,
+			 "No handle is available for LUN "
+			 "0x%016Lx on port 0x%016Lx\n",
+			 (unsigned long long)zfcp_scsi_dev_lun(sdev),
+			 (unsigned long long)zfcp_sdev->port->wwpn);
+		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
+		/* fall through */
+	case FSF_INVALID_COMMAND_OPTION:
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		switch (header->fsf_status_qual.word[0]) {
+		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+			zfcp_fc_test_link(zfcp_sdev->port);
+			/* fall through */
+		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+			break;
+		}
+		break;
+
+	case FSF_GOOD:
+		zfcp_sdev->lun_handle = header->lun_handle;
+		atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
+		break;
+	}
+}
+
+/**
+ * zfcp_fsf_open_lun - open LUN
+ * @erp_action: pointer to struct zfcp_erp_action
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_adapter *adapter = erp_action->adapter;
+	struct zfcp_qdio *qdio = adapter->qdio;
+	struct zfcp_fsf_req *req;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->qtcb->header.port_handle = erp_action->port->handle;
+	req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
+	req->handler = zfcp_fsf_open_lun_handler;
+	req->data = erp_action->sdev;
+	req->erp_action = erp_action;
+	erp_action->fsf_req_id = req->req_id;
+
+	if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
+		req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
+
+	zfcp_fsf_start_erp_timer(req);
+	retval = zfcp_fsf_req_send(req);
+	if (retval) {
+		zfcp_fsf_req_free(req);
+		erp_action->fsf_req_id = 0;
+	}
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
+{
+	struct scsi_device *sdev = req->data;
+	struct zfcp_scsi_dev *zfcp_sdev;
+
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		return;
+
+	zfcp_sdev = sdev_to_zfcp(sdev);
+
+	switch (req->qtcb->header.fsf_status) {
+	case FSF_PORT_HANDLE_NOT_VALID:
+		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_LUN_HANDLE_NOT_VALID:
+		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_PORT_BOXED:
+		zfcp_erp_set_port_status(zfcp_sdev->port,
+					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
+		zfcp_erp_port_reopen(zfcp_sdev->port,
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		switch (req->qtcb->header.fsf_status_qual.word[0]) {
+		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+			zfcp_fc_test_link(zfcp_sdev->port);
+			/* fall through */
+		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+			break;
+		}
+		break;
+	case FSF_GOOD:
+		atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
+		break;
+	}
+}
+
+/**
+ * zfcp_fsf_close_LUN - close LUN
+ * @erp_action: pointer to erp_action triggering the "close LUN"
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
+{
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
+	struct zfcp_fsf_req *req;
+	int retval = -EIO;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
+				  SBAL_SFLAGS0_TYPE_READ,
+				  qdio->adapter->pool.erp_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	req->qtcb->header.port_handle = erp_action->port->handle;
+	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
+	req->handler = zfcp_fsf_close_lun_handler;
+	req->data = erp_action->sdev;
+	req->erp_action = erp_action;
+	erp_action->fsf_req_id = req->req_id;
+
+	zfcp_fsf_start_erp_timer(req);
+	retval = zfcp_fsf_req_send(req);
+	if (retval) {
+		zfcp_fsf_req_free(req);
+		erp_action->fsf_req_id = 0;
+	}
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return retval;
+}
+
+static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
+{
+	lat_rec->sum += lat;
+	lat_rec->min = min(lat_rec->min, lat);
+	lat_rec->max = max(lat_rec->max, lat);
+}
+
+static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
+{
+	struct fsf_qual_latency_info *lat_in;
+	struct latency_cont *lat = NULL;
+	struct zfcp_scsi_dev *zfcp_sdev;
+	struct zfcp_blk_drv_data blktrc;
+	int ticks = req->adapter->timer_ticks;
+
+	lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
+
+	blktrc.flags = 0;
+	blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
+	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+		blktrc.flags |= ZFCP_BLK_REQ_ERROR;
+	blktrc.inb_usage = 0;
+	blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
+
+	if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
+	    !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
+		zfcp_sdev = sdev_to_zfcp(scsi->device);
+		blktrc.flags |= ZFCP_BLK_LAT_VALID;
+		blktrc.channel_lat = lat_in->channel_lat * ticks;
+		blktrc.fabric_lat = lat_in->fabric_lat * ticks;
+
+		switch (req->qtcb->bottom.io.data_direction) {
+		case FSF_DATADIR_DIF_READ_STRIP:
+		case FSF_DATADIR_DIF_READ_CONVERT:
+		case FSF_DATADIR_READ:
+			lat = &zfcp_sdev->latencies.read;
+			break;
+		case FSF_DATADIR_DIF_WRITE_INSERT:
+		case FSF_DATADIR_DIF_WRITE_CONVERT:
+		case FSF_DATADIR_WRITE:
+			lat = &zfcp_sdev->latencies.write;
+			break;
+		case FSF_DATADIR_CMND:
+			lat = &zfcp_sdev->latencies.cmd;
+			break;
+		}
+
+		if (lat) {
+			spin_lock(&zfcp_sdev->latencies.lock);
+			zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
+			zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
+			lat->counter++;
+			spin_unlock(&zfcp_sdev->latencies.lock);
+		}
+	}
+
+	blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
+			    sizeof(blktrc));
+}
+
+/**
+ * zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF.
+ * @req: Pointer to FSF request.
+ * @sdev: Pointer to SCSI device as request context.
+ */
+static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
+					struct scsi_device *sdev)
+{
+	struct zfcp_scsi_dev *zfcp_sdev;
+	struct fsf_qtcb_header *header = &req->qtcb->header;
+
+	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
+		return;
+
+	zfcp_sdev = sdev_to_zfcp(sdev);
+
+	switch (header->fsf_status) {
+	case FSF_HANDLE_MISMATCH:
+	case FSF_PORT_HANDLE_NOT_VALID:
+		zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_FCPLUN_NOT_VALID:
+	case FSF_LUN_HANDLE_NOT_VALID:
+		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+		zfcp_fsf_class_not_supp(req);
+		break;
+	case FSF_DIRECTION_INDICATOR_NOT_VALID:
+		dev_err(&req->adapter->ccw_device->dev,
+			"Incorrect direction %d, LUN 0x%016Lx on port "
+			"0x%016Lx closed\n",
+			req->qtcb->bottom.io.data_direction,
+			(unsigned long long)zfcp_scsi_dev_lun(sdev),
+			(unsigned long long)zfcp_sdev->port->wwpn);
+		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_CMND_LENGTH_NOT_VALID:
+		dev_err(&req->adapter->ccw_device->dev,
+			"Incorrect CDB length %d, LUN 0x%016Lx on "
+			"port 0x%016Lx closed\n",
+			req->qtcb->bottom.io.fcp_cmnd_length,
+			(unsigned long long)zfcp_scsi_dev_lun(sdev),
+			(unsigned long long)zfcp_sdev->port->wwpn);
+		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_PORT_BOXED:
+		zfcp_erp_set_port_status(zfcp_sdev->port,
+					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
+		zfcp_erp_port_reopen(zfcp_sdev->port,
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_LUN_BOXED:
+		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
+		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
+				    "fssfch6");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	case FSF_ADAPTER_STATUS_AVAILABLE:
+		if (header->fsf_status_qual.word[0] ==
+		    FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
+			zfcp_fc_test_link(zfcp_sdev->port);
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
+	}
+}
+
+static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
+{
+	struct scsi_cmnd *scpnt;
+	struct fcp_resp_with_ext *fcp_rsp;
+	unsigned long flags;
+
+	read_lock_irqsave(&req->adapter->abort_lock, flags);
+
+	scpnt = req->data;
+	if (unlikely(!scpnt)) {
+		read_unlock_irqrestore(&req->adapter->abort_lock, flags);
+		return;
+	}
+
+	zfcp_fsf_fcp_handler_common(req, scpnt->device);
+
+	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
+		set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
+		goto skip_fsfstatus;
+	}
+
+	switch (req->qtcb->header.fsf_status) {
+	case FSF_INCONSISTENT_PROT_DATA:
+	case FSF_INVALID_PROT_PARM:
+		set_host_byte(scpnt, DID_ERROR);
+		goto skip_fsfstatus;
+	case FSF_BLOCK_GUARD_CHECK_FAILURE:
+		zfcp_scsi_dif_sense_error(scpnt, 0x1);
+		goto skip_fsfstatus;
+	case FSF_APP_TAG_CHECK_FAILURE:
+		zfcp_scsi_dif_sense_error(scpnt, 0x2);
+		goto skip_fsfstatus;
+	case FSF_REF_TAG_CHECK_FAILURE:
+		zfcp_scsi_dif_sense_error(scpnt, 0x3);
+		goto skip_fsfstatus;
+	}
+	BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE);
+	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
+	zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
+
+skip_fsfstatus:
+	zfcp_fsf_req_trace(req, scpnt);
+	zfcp_dbf_scsi_result(scpnt, req);
+
+	scpnt->host_scribble = NULL;
+	(scpnt->scsi_done) (scpnt);
+	/*
+	 * We must hold this lock until scsi_done has been called.
+	 * Otherwise we may call scsi_done after abort regarding this
+	 * command has completed.
+	 * Note: scsi_done must not block!
+	 */
+	read_unlock_irqrestore(&req->adapter->abort_lock, flags);
+}
+
+static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
+{
+	switch (scsi_get_prot_op(scsi_cmnd)) {
+	case SCSI_PROT_NORMAL:
+		switch (scsi_cmnd->sc_data_direction) {
+		case DMA_NONE:
+			*data_dir = FSF_DATADIR_CMND;
+			break;
+		case DMA_FROM_DEVICE:
+			*data_dir = FSF_DATADIR_READ;
+			break;
+		case DMA_TO_DEVICE:
+			*data_dir = FSF_DATADIR_WRITE;
+			break;
+		case DMA_BIDIRECTIONAL:
+			return -EINVAL;
+		}
+		break;
+
+	case SCSI_PROT_READ_STRIP:
+		*data_dir = FSF_DATADIR_DIF_READ_STRIP;
+		break;
+	case SCSI_PROT_WRITE_INSERT:
+		*data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
+		break;
+	case SCSI_PROT_READ_PASS:
+		*data_dir = FSF_DATADIR_DIF_READ_CONVERT;
+		break;
+	case SCSI_PROT_WRITE_PASS:
+		*data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
+ * @scsi_cmnd: scsi command to be sent
+ */
+int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
+{
+	struct zfcp_fsf_req *req;
+	struct fcp_cmnd *fcp_cmnd;
+	u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
+	int retval = -EIO;
+	struct scsi_device *sdev = scsi_cmnd->device;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
+	struct zfcp_qdio *qdio = adapter->qdio;
+	struct fsf_qtcb_bottom_io *io;
+	unsigned long flags;
+
+	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
+		       ZFCP_STATUS_COMMON_UNBLOCKED)))
+		return -EBUSY;
+
+	spin_lock_irqsave(&qdio->req_q_lock, flags);
+	if (atomic_read(&qdio->req_q_free) <= 0) {
+		atomic_inc(&qdio->req_q_full);
+		goto out;
+	}
+
+	if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
+		sbtype = SBAL_SFLAGS0_TYPE_WRITE;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
+				  sbtype, adapter->pool.scsi_req);
+
+	if (IS_ERR(req)) {
+		retval = PTR_ERR(req);
+		goto out;
+	}
+
+	scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
+
+	io = &req->qtcb->bottom.io;
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	req->data = scsi_cmnd;
+	req->handler = zfcp_fsf_fcp_cmnd_handler;
+	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
+	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
+	io->service_class = FSF_CLASS_3;
+	io->fcp_cmnd_length = FCP_CMND_LEN;
+
+	if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
+		io->data_block_length = scsi_cmnd->device->sector_size;
+		io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
+	}
+
+	if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
+		goto failed_scsi_cmnd;
+
+	BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE);
+	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
+	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
+
+	if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
+	    scsi_prot_sg_count(scsi_cmnd)) {
+		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
+				       scsi_prot_sg_count(scsi_cmnd));
+		retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
+						 scsi_prot_sglist(scsi_cmnd));
+		if (retval)
+			goto failed_scsi_cmnd;
+		io->prot_data_length = zfcp_qdio_real_bytes(
+						scsi_prot_sglist(scsi_cmnd));
+	}
+
+	retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
+					 scsi_sglist(scsi_cmnd));
+	if (unlikely(retval))
+		goto failed_scsi_cmnd;
+
+	zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
+	if (zfcp_adapter_multi_buffer_active(adapter))
+		zfcp_qdio_set_scount(qdio, &req->qdio_req);
+
+	retval = zfcp_fsf_req_send(req);
+	if (unlikely(retval))
+		goto failed_scsi_cmnd;
+
+	goto out;
+
+failed_scsi_cmnd:
+	zfcp_fsf_req_free(req);
+	scsi_cmnd->host_scribble = NULL;
+out:
+	spin_unlock_irqrestore(&qdio->req_q_lock, flags);
+	return retval;
+}
+
+static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
+{
+	struct scsi_device *sdev = req->data;
+	struct fcp_resp_with_ext *fcp_rsp;
+	struct fcp_resp_rsp_info *rsp_info;
+
+	zfcp_fsf_fcp_handler_common(req, sdev);
+
+	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
+	rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
+
+	if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
+	     (req->status & ZFCP_STATUS_FSFREQ_ERROR))
+		req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
+}
+
+/**
+ * zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF).
+ * @sdev: Pointer to SCSI device to send the task management command to.
+ * @tm_flags: Unsigned byte for task management flags.
+ *
+ * Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise.
+ */
+struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
+					    u8 tm_flags)
+{
+	struct zfcp_fsf_req *req = NULL;
+	struct fcp_cmnd *fcp_cmnd;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
+
+	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
+		       ZFCP_STATUS_COMMON_UNBLOCKED)))
+		return NULL;
+
+	spin_lock_irq(&qdio->req_q_lock);
+	if (zfcp_qdio_sbal_get(qdio))
+		goto out;
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
+				  SBAL_SFLAGS0_TYPE_WRITE,
+				  qdio->adapter->pool.scsi_req);
+
+	if (IS_ERR(req)) {
+		req = NULL;
+		goto out;
+	}
+
+	req->data = sdev;
+
+	req->handler = zfcp_fsf_fcp_task_mgmt_handler;
+	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
+	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
+	req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
+	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
+	req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
+
+	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
+
+	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
+	zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
+
+	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
+	if (!zfcp_fsf_req_send(req))
+		goto out;
+
+	zfcp_fsf_req_free(req);
+	req = NULL;
+out:
+	spin_unlock_irq(&qdio->req_q_lock);
+	return req;
+}
+
+/**
+ * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
+ * @adapter: pointer to struct zfcp_adapter
+ * @sbal_idx: response queue index of SBAL to be processed
+ */
+void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
+{
+	struct zfcp_adapter *adapter = qdio->adapter;
+	struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
+	struct qdio_buffer_element *sbale;
+	struct zfcp_fsf_req *fsf_req;
+	unsigned long req_id;
+	int idx;
+
+	for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
+
+		sbale = &sbal->element[idx];
+		req_id = (unsigned long) sbale->addr;
+		fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
+
+		if (!fsf_req) {
+			/*
+			 * Unknown request means that we have potentially memory
+			 * corruption and must stop the machine immediately.
+			 */
+			zfcp_qdio_siosl(adapter);
+			panic("error: unknown req_id (%lx) on adapter %s.\n",
+			      req_id, dev_name(&adapter->ccw_device->dev));
+		}
+
+		zfcp_fsf_req_complete(fsf_req);
+
+		if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
+			break;
+	}
+}
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
new file mode 100644
index 0000000..535628b
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -0,0 +1,459 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * zfcp device driver
+ *
+ * Interface to the FSF support functions.
+ *
+ * Copyright IBM Corp. 2002, 2018
+ */
+
+#ifndef FSF_H
+#define FSF_H
+
+#include <linux/pfn.h>
+#include <linux/scatterlist.h>
+#include <scsi/libfc.h>
+
+#define FSF_QTCB_CURRENT_VERSION		0x00000001
+
+/* FSF commands */
+#define	FSF_QTCB_FCP_CMND			0x00000001
+#define	FSF_QTCB_ABORT_FCP_CMND			0x00000002
+#define	FSF_QTCB_OPEN_PORT_WITH_DID		0x00000005
+#define	FSF_QTCB_OPEN_LUN			0x00000006
+#define	FSF_QTCB_CLOSE_LUN			0x00000007
+#define	FSF_QTCB_CLOSE_PORT			0x00000008
+#define	FSF_QTCB_CLOSE_PHYSICAL_PORT		0x00000009
+#define	FSF_QTCB_SEND_ELS			0x0000000B
+#define	FSF_QTCB_SEND_GENERIC			0x0000000C
+#define	FSF_QTCB_EXCHANGE_CONFIG_DATA		0x0000000D
+#define	FSF_QTCB_EXCHANGE_PORT_DATA		0x0000000E
+#define FSF_QTCB_DOWNLOAD_CONTROL_FILE		0x00000012
+#define FSF_QTCB_UPLOAD_CONTROL_FILE		0x00000013
+
+/* FSF QTCB types */
+#define FSF_IO_COMMAND				0x00000001
+#define FSF_SUPPORT_COMMAND			0x00000002
+#define FSF_CONFIG_COMMAND			0x00000003
+#define FSF_PORT_COMMAND			0x00000004
+
+/* FSF protocol states */
+#define FSF_PROT_GOOD				0x00000001
+#define FSF_PROT_QTCB_VERSION_ERROR		0x00000010
+#define FSF_PROT_SEQ_NUMB_ERROR			0x00000020
+#define FSF_PROT_UNSUPP_QTCB_TYPE		0x00000040
+#define FSF_PROT_HOST_CONNECTION_INITIALIZING	0x00000080
+#define FSF_PROT_FSF_STATUS_PRESENTED		0x00000100
+#define FSF_PROT_DUPLICATE_REQUEST_ID		0x00000200
+#define FSF_PROT_LINK_DOWN                      0x00000400
+#define FSF_PROT_REEST_QUEUE                    0x00000800
+#define FSF_PROT_ERROR_STATE			0x01000000
+
+/* FSF states */
+#define FSF_GOOD				0x00000000
+#define FSF_PORT_ALREADY_OPEN			0x00000001
+#define FSF_LUN_ALREADY_OPEN			0x00000002
+#define FSF_PORT_HANDLE_NOT_VALID		0x00000003
+#define FSF_LUN_HANDLE_NOT_VALID		0x00000004
+#define FSF_HANDLE_MISMATCH			0x00000005
+#define FSF_SERVICE_CLASS_NOT_SUPPORTED		0x00000006
+#define FSF_FCPLUN_NOT_VALID			0x00000009
+#define FSF_LUN_SHARING_VIOLATION               0x00000012
+#define FSF_FCP_COMMAND_DOES_NOT_EXIST		0x00000022
+#define FSF_DIRECTION_INDICATOR_NOT_VALID	0x00000030
+#define FSF_CMND_LENGTH_NOT_VALID		0x00000033
+#define FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED	0x00000040
+#define FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED	0x00000041
+#define FSF_ELS_COMMAND_REJECTED		0x00000050
+#define FSF_GENERIC_COMMAND_REJECTED		0x00000051
+#define FSF_PORT_BOXED				0x00000059
+#define FSF_LUN_BOXED				0x0000005A
+#define FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE	0x0000005B
+#define FSF_PAYLOAD_SIZE_MISMATCH		0x00000060
+#define FSF_REQUEST_SIZE_TOO_LARGE		0x00000061
+#define FSF_RESPONSE_SIZE_TOO_LARGE		0x00000062
+#define FSF_SBAL_MISMATCH			0x00000063
+#define FSF_INCONSISTENT_PROT_DATA		0x00000070
+#define FSF_INVALID_PROT_PARM			0x00000071
+#define FSF_BLOCK_GUARD_CHECK_FAILURE		0x00000081
+#define FSF_APP_TAG_CHECK_FAILURE		0x00000082
+#define FSF_REF_TAG_CHECK_FAILURE		0x00000083
+#define FSF_ADAPTER_STATUS_AVAILABLE		0x000000AD
+#define FSF_FCP_RSP_AVAILABLE			0x000000AF
+#define FSF_UNKNOWN_COMMAND			0x000000E2
+#define FSF_UNKNOWN_OP_SUBTYPE                  0x000000E3
+#define FSF_INVALID_COMMAND_OPTION              0x000000E5
+
+#define FSF_PROT_STATUS_QUAL_SIZE		16
+#define FSF_STATUS_QUALIFIER_SIZE		16
+
+/* FSF status qualifier, recommendations */
+#define FSF_SQ_NO_RECOM				0x00
+#define FSF_SQ_FCP_RSP_AVAILABLE		0x01
+#define FSF_SQ_RETRY_IF_POSSIBLE		0x02
+#define FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED	0x03
+#define FSF_SQ_INVOKE_LINK_TEST_PROCEDURE	0x04
+#define FSF_SQ_COMMAND_ABORTED			0x06
+#define FSF_SQ_NO_RETRY_POSSIBLE		0x07
+
+/* FSF status qualifier (most significant 4 bytes), local link down */
+#define FSF_PSQ_LINK_NO_LIGHT			0x00000004
+#define FSF_PSQ_LINK_WRAP_PLUG			0x00000008
+#define FSF_PSQ_LINK_NO_FCP			0x00000010
+#define FSF_PSQ_LINK_FIRMWARE_UPDATE		0x00000020
+#define FSF_PSQ_LINK_INVALID_WWPN		0x00000100
+#define FSF_PSQ_LINK_NO_NPIV_SUPPORT		0x00000200
+#define FSF_PSQ_LINK_NO_FCP_RESOURCES		0x00000400
+#define FSF_PSQ_LINK_NO_FABRIC_RESOURCES	0x00000800
+#define FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE	0x00001000
+#define FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED	0x00002000
+#define FSF_PSQ_LINK_MODE_TABLE_CURRUPTED	0x00004000
+#define FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT		0x00008000
+
+/* payload size in status read buffer */
+#define FSF_STATUS_READ_PAYLOAD_SIZE		4032
+
+/* number of status read buffers that should be sent by ULP */
+#define FSF_STATUS_READS_RECOM			16
+
+/* status types in status read buffer */
+#define FSF_STATUS_READ_PORT_CLOSED		0x00000001
+#define FSF_STATUS_READ_INCOMING_ELS		0x00000002
+#define FSF_STATUS_READ_SENSE_DATA_AVAIL        0x00000003
+#define FSF_STATUS_READ_BIT_ERROR_THRESHOLD	0x00000004
+#define FSF_STATUS_READ_LINK_DOWN		0x00000005
+#define FSF_STATUS_READ_LINK_UP          	0x00000006
+#define FSF_STATUS_READ_NOTIFICATION_LOST	0x00000009
+#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT	0x0000000C
+
+/* status subtypes for link down */
+#define FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK	0x00000000
+#define FSF_STATUS_READ_SUB_FDISC_FAILED	0x00000001
+#define FSF_STATUS_READ_SUB_FIRMWARE_UPDATE	0x00000002
+
+/* status subtypes for unsolicited status notification lost */
+#define FSF_STATUS_READ_SUB_INCOMING_ELS	0x00000001
+
+/* topologie that is detected by the adapter */
+#define FSF_TOPO_P2P				0x00000001
+#define FSF_TOPO_FABRIC				0x00000002
+#define FSF_TOPO_AL				0x00000003
+
+/* data direction for FCP commands */
+#define FSF_DATADIR_WRITE			0x00000001
+#define FSF_DATADIR_READ			0x00000002
+#define FSF_DATADIR_CMND			0x00000004
+#define FSF_DATADIR_DIF_WRITE_INSERT		0x00000009
+#define FSF_DATADIR_DIF_READ_STRIP		0x0000000a
+#define FSF_DATADIR_DIF_WRITE_CONVERT		0x0000000b
+#define FSF_DATADIR_DIF_READ_CONVERT		0X0000000c
+
+/* data protection control flags */
+#define FSF_APP_TAG_CHECK_ENABLE		0x10
+
+/* fc service class */
+#define FSF_CLASS_3				0x00000003
+
+/* logging space behind QTCB */
+#define FSF_QTCB_LOG_SIZE			1024
+
+/* channel features */
+#define FSF_FEATURE_NOTIFICATION_LOST		0x00000008
+#define FSF_FEATURE_HBAAPI_MANAGEMENT           0x00000010
+#define FSF_FEATURE_ELS_CT_CHAINED_SBALS	0x00000020
+#define FSF_FEATURE_UPDATE_ALERT		0x00000100
+#define FSF_FEATURE_MEASUREMENT_DATA		0x00000200
+#define FSF_FEATURE_DIF_PROT_TYPE1		0x00010000
+#define FSF_FEATURE_DIX_PROT_TCPIP		0x00020000
+
+/* host connection features */
+#define FSF_FEATURE_NPIV_MODE			0x00000001
+
+/* option */
+#define FSF_OPEN_LUN_SUPPRESS_BOXING		0x00000001
+
+struct fsf_queue_designator {
+	u8  cssid;
+	u8  chpid;
+	u8  hla;
+	u8  ua;
+	u32 res1;
+} __attribute__ ((packed));
+
+struct fsf_bit_error_payload {
+	u32 res1;
+	u32 link_failure_error_count;
+	u32 loss_of_sync_error_count;
+	u32 loss_of_signal_error_count;
+	u32 primitive_sequence_error_count;
+	u32 invalid_transmission_word_error_count;
+	u32 crc_error_count;
+	u32 primitive_sequence_event_timeout_count;
+	u32 elastic_buffer_overrun_error_count;
+	u32 fcal_arbitration_timeout_count;
+	u32 advertised_receive_b2b_credit;
+	u32 current_receive_b2b_credit;
+	u32 advertised_transmit_b2b_credit;
+	u32 current_transmit_b2b_credit;
+} __attribute__ ((packed));
+
+struct fsf_link_down_info {
+	u32 error_code;
+	u32 res1;
+	u8 res2[2];
+	u8 primary_status;
+	u8 ioerr_code;
+	u8 action_code;
+	u8 reason_code;
+	u8 explanation_code;
+	u8 vendor_specific_code;
+} __attribute__ ((packed));
+
+struct fsf_status_read_buffer {
+	u32 status_type;
+	u32 status_subtype;
+	u32 length;
+	u32 res1;
+	struct fsf_queue_designator queue_designator;
+	u8 res2;
+	u8 d_id[3];
+	u32 class;
+	u64 fcp_lun;
+	u8  res3[24];
+	union {
+		u8  data[FSF_STATUS_READ_PAYLOAD_SIZE];
+		u32 word[FSF_STATUS_READ_PAYLOAD_SIZE/sizeof(u32)];
+		struct fsf_link_down_info link_down_info;
+		struct fsf_bit_error_payload bit_error;
+	} payload;
+} __attribute__ ((packed));
+
+struct fsf_qual_version_error {
+	u32 fsf_version;
+	u32 res1[3];
+} __attribute__ ((packed));
+
+struct fsf_qual_sequence_error {
+	u32 exp_req_seq_no;
+	u32 res1[3];
+} __attribute__ ((packed));
+
+struct fsf_qual_latency_info {
+	u32 channel_lat;
+	u32 fabric_lat;
+	u8 res1[8];
+} __attribute__ ((packed));
+
+union fsf_prot_status_qual {
+	u32 word[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u32)];
+	u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)];
+	struct fsf_qual_version_error   version_error;
+	struct fsf_qual_sequence_error  sequence_error;
+	struct fsf_link_down_info link_down_info;
+	struct fsf_qual_latency_info latency_info;
+} __attribute__ ((packed));
+
+struct fsf_qtcb_prefix {
+	u64 req_id;
+	u32 qtcb_version;
+	u32 ulp_info;
+	u32 qtcb_type;
+	u32 req_seq_no;
+	u32 prot_status;
+	union fsf_prot_status_qual prot_status_qual;
+	u8  res1[20];
+} __attribute__ ((packed));
+
+struct fsf_statistics_info {
+	u64 input_req;
+	u64 output_req;
+	u64 control_req;
+	u64 input_mb;
+	u64 output_mb;
+	u64 seconds_act;
+} __attribute__ ((packed));
+
+union fsf_status_qual {
+	u8  byte[FSF_STATUS_QUALIFIER_SIZE];
+	u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)];
+	u32 word[FSF_STATUS_QUALIFIER_SIZE / sizeof (u32)];
+	u64 doubleword[FSF_STATUS_QUALIFIER_SIZE / sizeof(u64)];
+	struct fsf_queue_designator fsf_queue_designator;
+	struct fsf_link_down_info link_down_info;
+} __attribute__ ((packed));
+
+struct fsf_qtcb_header {
+	u64 req_handle;
+	u32 fsf_command;
+	u32 res1;
+	u32 port_handle;
+	u32 lun_handle;
+	u32 res2;
+	u32 fsf_status;
+	union fsf_status_qual fsf_status_qual;
+	u8  res3[28];
+	u16 log_start;
+	u16 log_length;
+	u8  res4[16];
+} __attribute__ ((packed));
+
+#define FSF_PLOGI_MIN_LEN	112
+
+#define FSF_FCP_CMND_SIZE	288
+#define FSF_FCP_RSP_SIZE	128
+
+struct fsf_qtcb_bottom_io {
+	u32 data_direction;
+	u32 service_class;
+	u8  res1;
+	u8  data_prot_flags;
+	u16 app_tag_value;
+	u32 ref_tag_value;
+	u32 fcp_cmnd_length;
+	u32 data_block_length;
+	u32 prot_data_length;
+	u8  res2[4];
+	union {
+		u8		byte[FSF_FCP_CMND_SIZE];
+		struct fcp_cmnd iu;
+	}   fcp_cmnd;
+	union {
+		u8			 byte[FSF_FCP_RSP_SIZE];
+		struct fcp_resp_with_ext iu;
+	}   fcp_rsp;
+	u8  res3[64];
+} __attribute__ ((packed));
+
+struct fsf_qtcb_bottom_support {
+	u32 operation_subtype;
+	u8  res1[13];
+	u8 d_id[3];
+	u32 option;
+	u64 fcp_lun;
+	u64 res2;
+	u64 req_handle;
+	u32 service_class;
+	u8  res3[3];
+	u8  timeout;
+        u32 lun_access_info;
+        u8  res4[180];
+	u32 els1_length;
+	u32 els2_length;
+	u32 req_buf_length;
+	u32 resp_buf_length;
+	u8  els[256];
+} __attribute__ ((packed));
+
+#define ZFCP_FSF_TIMER_INT_MASK	0x3FFF
+
+struct fsf_qtcb_bottom_config {
+	u32 lic_version;
+	u32 feature_selection;
+	u32 high_qtcb_version;
+	u32 low_qtcb_version;
+	u32 max_qtcb_size;
+	u32 max_data_transfer_size;
+	u32 adapter_features;
+	u32 connection_features;
+	u32 fc_topology;
+	u32 fc_link_speed;	/* one of ZFCP_FSF_PORTSPEED_* */
+	u32 adapter_type;
+	u8 res0;
+	u8 peer_d_id[3];
+	u16 status_read_buf_num;
+	u16 timer_interval;
+	u8 res2[9];
+	u8 s_id[3];
+	u8 nport_serv_param[128];
+	u8 res3[8];
+	u32 adapter_ports;
+	u32 hardware_version;
+	u8 serial_number[32];
+	u8 plogi_payload[112];
+	struct fsf_statistics_info stat_info;
+	u8 res4[112];
+} __attribute__ ((packed));
+
+struct fsf_qtcb_bottom_port {
+	u64 wwpn;
+	u32 fc_port_id;
+	u32 port_type;
+	u32 port_state;
+	u32 class_of_service;	/* should be 0x00000006 for class 2 and 3 */
+	u8 supported_fc4_types[32]; /* should be 0x00000100 for scsi fcp */
+	u8 active_fc4_types[32];
+	u32 supported_speed;	/* any combination of ZFCP_FSF_PORTSPEED_* */
+	u32 maximum_frame_size;	/* fixed value of 2112 */
+	u64 seconds_since_last_reset;
+	u64 tx_frames;
+	u64 tx_words;
+	u64 rx_frames;
+	u64 rx_words;
+	u64 lip;		/* 0 */
+	u64 nos;		/* currently 0 */
+	u64 error_frames;	/* currently 0 */
+	u64 dumped_frames;	/* currently 0 */
+	u64 link_failure;
+	u64 loss_of_sync;
+	u64 loss_of_signal;
+	u64 psp_error_counts;
+	u64 invalid_tx_words;
+	u64 invalid_crcs;
+	u64 input_requests;
+	u64 output_requests;
+	u64 control_requests;
+	u64 input_mb;		/* where 1 MByte == 1.000.000 Bytes */
+	u64 output_mb;		/* where 1 MByte == 1.000.000 Bytes */
+	u8 cp_util;
+	u8 cb_util;
+	u8 a_util;
+	u8 res2[253];
+} __attribute__ ((packed));
+
+union fsf_qtcb_bottom {
+	struct fsf_qtcb_bottom_io      io;
+	struct fsf_qtcb_bottom_support support;
+	struct fsf_qtcb_bottom_config  config;
+	struct fsf_qtcb_bottom_port port;
+};
+
+struct fsf_qtcb {
+	struct fsf_qtcb_prefix prefix;
+	struct fsf_qtcb_header header;
+	union  fsf_qtcb_bottom bottom;
+	u8 log[FSF_QTCB_LOG_SIZE];
+} __attribute__ ((packed));
+
+struct zfcp_blk_drv_data {
+#define ZFCP_BLK_DRV_DATA_MAGIC			0x1
+	u32 magic;
+#define ZFCP_BLK_LAT_VALID			0x1
+#define ZFCP_BLK_REQ_ERROR			0x2
+	u16 flags;
+	u8 inb_usage;
+	u8 outb_usage;
+	u64 channel_lat;
+	u64 fabric_lat;
+} __attribute__ ((packed));
+
+/**
+ * struct zfcp_fsf_ct_els - zfcp data for ct or els request
+ * @req: scatter-gather list for request
+ * @resp: scatter-gather list for response
+ * @handler: handler function (called for response to the request)
+ * @handler_data: data passed to handler function
+ * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
+ * @status: used to pass error status to calling function
+ * @d_id: Destination ID of either open WKA port for CT or of D_ID for ELS
+ */
+struct zfcp_fsf_ct_els {
+	struct scatterlist *req;
+	struct scatterlist *resp;
+	void (*handler)(void *);
+	void *handler_data;
+	struct zfcp_port *port;
+	int status;
+	u32 d_id;
+};
+
+#endif				/* FSF_H */
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
new file mode 100644
index 0000000..4ab02e8
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -0,0 +1,505 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * zfcp device driver
+ *
+ * Setup and helper functions to access QDIO.
+ *
+ * Copyright IBM Corp. 2002, 2010
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "zfcp_ext.h"
+#include "zfcp_qdio.h"
+
+static bool enable_multibuffer = true;
+module_param_named(datarouter, enable_multibuffer, bool, 0400);
+MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
+
+static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
+				    unsigned int qdio_err)
+{
+	struct zfcp_adapter *adapter = qdio->adapter;
+
+	dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
+
+	if (qdio_err & QDIO_ERROR_SLSB_STATE) {
+		zfcp_qdio_siosl(adapter);
+		zfcp_erp_adapter_shutdown(adapter, 0, id);
+		return;
+	}
+	zfcp_erp_adapter_reopen(adapter,
+				ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
+				ZFCP_STATUS_COMMON_ERP_FAILED, id);
+}
+
+static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
+{
+	int i, sbal_idx;
+
+	for (i = first; i < first + cnt; i++) {
+		sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
+		memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
+	}
+}
+
+/* this needs to be called prior to updating the queue fill level */
+static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
+{
+	unsigned long long now, span;
+	int used;
+
+	now = get_tod_clock_monotonic();
+	span = (now - qdio->req_q_time) >> 12;
+	used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
+	qdio->req_q_util += used * span;
+	qdio->req_q_time = now;
+}
+
+static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
+			      int queue_no, int idx, int count,
+			      unsigned long parm)
+{
+	struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
+
+	if (unlikely(qdio_err)) {
+		zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
+		return;
+	}
+
+	/* cleanup all SBALs being program-owned now */
+	zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
+
+	spin_lock_irq(&qdio->stat_lock);
+	zfcp_qdio_account(qdio);
+	spin_unlock_irq(&qdio->stat_lock);
+	atomic_add(count, &qdio->req_q_free);
+	wake_up(&qdio->req_q_wq);
+}
+
+static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
+			       int queue_no, int idx, int count,
+			       unsigned long parm)
+{
+	struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
+	struct zfcp_adapter *adapter = qdio->adapter;
+	int sbal_no, sbal_idx;
+
+	if (unlikely(qdio_err)) {
+		if (zfcp_adapter_multi_buffer_active(adapter)) {
+			void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
+			struct qdio_buffer_element *sbale;
+			u64 req_id;
+			u8 scount;
+
+			memset(pl, 0,
+			       ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
+			sbale = qdio->res_q[idx]->element;
+			req_id = (u64) sbale->addr;
+			scount = min(sbale->scount + 1,
+				     ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
+				     /* incl. signaling SBAL */
+
+			for (sbal_no = 0; sbal_no < scount; sbal_no++) {
+				sbal_idx = (idx + sbal_no) %
+					QDIO_MAX_BUFFERS_PER_Q;
+				pl[sbal_no] = qdio->res_q[sbal_idx];
+			}
+			zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
+		}
+		zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
+		return;
+	}
+
+	/*
+	 * go through all SBALs from input queue currently
+	 * returned by QDIO layer
+	 */
+	for (sbal_no = 0; sbal_no < count; sbal_no++) {
+		sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
+		/* go through all SBALEs of SBAL */
+		zfcp_fsf_reqid_check(qdio, sbal_idx);
+	}
+
+	/*
+	 * put SBALs back to response queue
+	 */
+	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
+		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
+}
+
+static struct qdio_buffer_element *
+zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+	struct qdio_buffer_element *sbale;
+
+	/* set last entry flag in current SBALE of current SBAL */
+	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+	sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
+
+	/* don't exceed last allowed SBAL */
+	if (q_req->sbal_last == q_req->sbal_limit)
+		return NULL;
+
+	/* set chaining flag in first SBALE of current SBAL */
+	sbale = zfcp_qdio_sbale_req(qdio, q_req);
+	sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
+
+	/* calculate index of next SBAL */
+	q_req->sbal_last++;
+	q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
+
+	/* keep this requests number of SBALs up-to-date */
+	q_req->sbal_number++;
+	BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
+
+	/* start at first SBALE of new SBAL */
+	q_req->sbale_curr = 0;
+
+	/* set storage-block type for new SBAL */
+	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+	sbale->sflags |= q_req->sbtype;
+
+	return sbale;
+}
+
+static struct qdio_buffer_element *
+zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+	if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
+		return zfcp_qdio_sbal_chain(qdio, q_req);
+	q_req->sbale_curr++;
+	return zfcp_qdio_sbale_curr(qdio, q_req);
+}
+
+/**
+ * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_qdio_req
+ * @sg: scatter-gather list
+ * @max_sbals: upper bound for number of SBALs to be used
+ * Returns: zero or -EINVAL on error
+ */
+int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+			    struct scatterlist *sg)
+{
+	struct qdio_buffer_element *sbale;
+
+	/* set storage-block type for this request */
+	sbale = zfcp_qdio_sbale_req(qdio, q_req);
+	sbale->sflags |= q_req->sbtype;
+
+	for (; sg; sg = sg_next(sg)) {
+		sbale = zfcp_qdio_sbale_next(qdio, q_req);
+		if (!sbale) {
+			atomic_inc(&qdio->req_q_full);
+			zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
+					     q_req->sbal_number);
+			return -EINVAL;
+		}
+		sbale->addr = sg_virt(sg);
+		sbale->length = sg->length;
+	}
+	return 0;
+}
+
+static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
+{
+	if (atomic_read(&qdio->req_q_free) ||
+	    !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
+		return 1;
+	return 0;
+}
+
+/**
+ * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
+ * @qdio: pointer to struct zfcp_qdio
+ *
+ * The req_q_lock must be held by the caller of this function, and
+ * this function may only be called from process context; it will
+ * sleep when waiting for a free sbal.
+ *
+ * Returns: 0 on success, -EIO if there is no free sbal after waiting.
+ */
+int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
+{
+	long ret;
+
+	ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
+		       zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
+
+	if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
+		return -EIO;
+
+	if (ret > 0)
+		return 0;
+
+	if (!ret) {
+		atomic_inc(&qdio->req_q_full);
+		/* assume hanging outbound queue, try queue recovery */
+		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
+	}
+
+	return -EIO;
+}
+
+/**
+ * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_qdio_req
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+	int retval;
+	u8 sbal_number = q_req->sbal_number;
+
+	spin_lock(&qdio->stat_lock);
+	zfcp_qdio_account(qdio);
+	spin_unlock(&qdio->stat_lock);
+
+	retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
+			 q_req->sbal_first, sbal_number);
+
+	if (unlikely(retval)) {
+		zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
+				     sbal_number);
+		return retval;
+	}
+
+	/* account for transferred buffers */
+	atomic_sub(sbal_number, &qdio->req_q_free);
+	qdio->req_q_idx += sbal_number;
+	qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
+
+	return 0;
+}
+
+
+static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
+				      struct zfcp_qdio *qdio)
+{
+	memset(id, 0, sizeof(*id));
+	id->cdev = qdio->adapter->ccw_device;
+	id->q_format = QDIO_ZFCP_QFMT;
+	memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
+	ASCEBC(id->adapter_name, 8);
+	id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
+	if (enable_multibuffer)
+		id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
+	id->no_input_qs = 1;
+	id->no_output_qs = 1;
+	id->input_handler = zfcp_qdio_int_resp;
+	id->output_handler = zfcp_qdio_int_req;
+	id->int_parm = (unsigned long) qdio;
+	id->input_sbal_addr_array = (void **) (qdio->res_q);
+	id->output_sbal_addr_array = (void **) (qdio->req_q);
+	id->scan_threshold =
+		QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
+}
+
+/**
+ * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
+ * @adapter: pointer to struct zfcp_adapter
+ * Returns: -ENOMEM on memory allocation error or return value from
+ *          qdio_allocate
+ */
+static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
+{
+	struct qdio_initialize init_data;
+	int ret;
+
+	ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
+	if (ret)
+		return -ENOMEM;
+
+	ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
+	if (ret)
+		goto free_req_q;
+
+	zfcp_qdio_setup_init_data(&init_data, qdio);
+	init_waitqueue_head(&qdio->req_q_wq);
+
+	ret = qdio_allocate(&init_data);
+	if (ret)
+		goto free_res_q;
+
+	return 0;
+
+free_res_q:
+	qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
+free_req_q:
+	qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
+	return ret;
+}
+
+/**
+ * zfcp_close_qdio - close qdio queues for an adapter
+ * @qdio: pointer to structure zfcp_qdio
+ */
+void zfcp_qdio_close(struct zfcp_qdio *qdio)
+{
+	struct zfcp_adapter *adapter = qdio->adapter;
+	int idx, count;
+
+	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
+		return;
+
+	/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
+	spin_lock_irq(&qdio->req_q_lock);
+	atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
+	spin_unlock_irq(&qdio->req_q_lock);
+
+	wake_up(&qdio->req_q_wq);
+
+	qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
+
+	/* cleanup used outbound sbals */
+	count = atomic_read(&qdio->req_q_free);
+	if (count < QDIO_MAX_BUFFERS_PER_Q) {
+		idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
+		count = QDIO_MAX_BUFFERS_PER_Q - count;
+		zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
+	}
+	qdio->req_q_idx = 0;
+	atomic_set(&qdio->req_q_free, 0);
+}
+
+/**
+ * zfcp_qdio_open - prepare and initialize response queue
+ * @qdio: pointer to struct zfcp_qdio
+ * Returns: 0 on success, otherwise -EIO
+ */
+int zfcp_qdio_open(struct zfcp_qdio *qdio)
+{
+	struct qdio_buffer_element *sbale;
+	struct qdio_initialize init_data;
+	struct zfcp_adapter *adapter = qdio->adapter;
+	struct ccw_device *cdev = adapter->ccw_device;
+	struct qdio_ssqd_desc ssqd;
+	int cc;
+
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
+		return -EIO;
+
+	atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
+			  &qdio->adapter->status);
+
+	zfcp_qdio_setup_init_data(&init_data, qdio);
+
+	if (qdio_establish(&init_data))
+		goto failed_establish;
+
+	if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
+		goto failed_qdio;
+
+	if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
+		atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
+				&qdio->adapter->status);
+
+	if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
+		atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
+		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
+	} else {
+		atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
+		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
+	}
+
+	qdio->max_sbale_per_req =
+		ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
+		- 2;
+	if (qdio_activate(cdev))
+		goto failed_qdio;
+
+	for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
+		sbale = &(qdio->res_q[cc]->element[0]);
+		sbale->length = 0;
+		sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
+		sbale->sflags = 0;
+		sbale->addr = NULL;
+	}
+
+	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
+		goto failed_qdio;
+
+	/* set index of first available SBALS / number of available SBALS */
+	qdio->req_q_idx = 0;
+	atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
+	atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
+
+	if (adapter->scsi_host) {
+		adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
+		adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
+	}
+
+	return 0;
+
+failed_qdio:
+	qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+failed_establish:
+	dev_err(&cdev->dev,
+		"Setting up the QDIO connection to the FCP adapter failed\n");
+	return -EIO;
+}
+
+void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
+{
+	if (!qdio)
+		return;
+
+	if (qdio->adapter->ccw_device)
+		qdio_free(qdio->adapter->ccw_device);
+
+	qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
+	qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
+	kfree(qdio);
+}
+
+int zfcp_qdio_setup(struct zfcp_adapter *adapter)
+{
+	struct zfcp_qdio *qdio;
+
+	qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
+	if (!qdio)
+		return -ENOMEM;
+
+	qdio->adapter = adapter;
+
+	if (zfcp_qdio_allocate(qdio)) {
+		kfree(qdio);
+		return -ENOMEM;
+	}
+
+	spin_lock_init(&qdio->req_q_lock);
+	spin_lock_init(&qdio->stat_lock);
+
+	adapter->qdio = qdio;
+	return 0;
+}
+
+/**
+ * zfcp_qdio_siosl - Trigger logging in FCP channel
+ * @adapter: The zfcp_adapter where to trigger logging
+ *
+ * Call the cio siosl function to trigger hardware logging.  This
+ * wrapper function sets a flag to ensure hardware logging is only
+ * triggered once before going through qdio shutdown.
+ *
+ * The triggers are always run from qdio tasklet context, so no
+ * additional synchronization is necessary.
+ */
+void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
+{
+	int rc;
+
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
+		return;
+
+	rc = ccw_device_siosl(adapter->ccw_device);
+	if (!rc)
+		atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
+				&adapter->status);
+}
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
new file mode 100644
index 0000000..886c662
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -0,0 +1,255 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * zfcp device driver
+ *
+ * Header file for zfcp qdio interface
+ *
+ * Copyright IBM Corp. 2010
+ */
+
+#ifndef ZFCP_QDIO_H
+#define ZFCP_QDIO_H
+
+#include <asm/qdio.h>
+
+#define ZFCP_QDIO_SBALE_LEN	PAGE_SIZE
+
+/* Max SBALS for chaining */
+#define ZFCP_QDIO_MAX_SBALS_PER_REQ	36
+
+/**
+ * struct zfcp_qdio - basic qdio data structure
+ * @res_q: response queue
+ * @req_q: request queue
+ * @req_q_idx: index of next free buffer
+ * @req_q_free: number of free buffers in queue
+ * @stat_lock: lock to protect req_q_util and req_q_time
+ * @req_q_lock: lock to serialize access to request queue
+ * @req_q_time: time of last fill level change
+ * @req_q_util: used for accounting
+ * @req_q_full: queue full incidents
+ * @req_q_wq: used to wait for SBAL availability
+ * @adapter: adapter used in conjunction with this qdio structure
+ */
+struct zfcp_qdio {
+	struct qdio_buffer	*res_q[QDIO_MAX_BUFFERS_PER_Q];
+	struct qdio_buffer	*req_q[QDIO_MAX_BUFFERS_PER_Q];
+	u8			req_q_idx;
+	atomic_t		req_q_free;
+	spinlock_t		stat_lock;
+	spinlock_t		req_q_lock;
+	unsigned long long	req_q_time;
+	u64			req_q_util;
+	atomic_t		req_q_full;
+	wait_queue_head_t	req_q_wq;
+	struct zfcp_adapter	*adapter;
+	u16			max_sbale_per_sbal;
+	u16			max_sbale_per_req;
+};
+
+/**
+ * struct zfcp_qdio_req - qdio queue related values for a request
+ * @sbtype: sbal type flags for sbale 0
+ * @sbal_number: number of free sbals
+ * @sbal_first: first sbal for this request
+ * @sbal_last: last sbal for this request
+ * @sbal_limit: last possible sbal for this request
+ * @sbale_curr: current sbale at creation of this request
+ * @qdio_outb_usage: usage of outbound queue
+ */
+struct zfcp_qdio_req {
+	u8	sbtype;
+	u8	sbal_number;
+	u8	sbal_first;
+	u8	sbal_last;
+	u8	sbal_limit;
+	u8	sbale_curr;
+	u16	qdio_outb_usage;
+};
+
+/**
+ * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_rec: pointer to struct zfcp_qdio_req
+ * Returns: pointer to qdio_buffer_element (sbale) structure
+ */
+static inline struct qdio_buffer_element *
+zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+	return &qdio->req_q[q_req->sbal_last]->element[0];
+}
+
+/**
+ * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
+ * @qdio: pointer to struct zfcp_qdio
+ * @fsf_req: pointer to struct zfcp_fsf_req
+ * Returns: pointer to qdio_buffer_element (sbale) structure
+ */
+static inline struct qdio_buffer_element *
+zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+	return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr];
+}
+
+/**
+ * zfcp_qdio_req_init - initialize qdio request
+ * @qdio: request queue where to start putting the request
+ * @q_req: the qdio request to start
+ * @req_id: The request id
+ * @sbtype: type flags to set for all sbals
+ * @data: First data block
+ * @len: Length of first data block
+ *
+ * This is the start of putting the request into the queue, the last
+ * step is passing the request to zfcp_qdio_send. The request queue
+ * lock must be held during the whole process from init to send.
+ */
+static inline
+void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+			unsigned long req_id, u8 sbtype, void *data, u32 len)
+{
+	struct qdio_buffer_element *sbale;
+	int count = min(atomic_read(&qdio->req_q_free),
+			ZFCP_QDIO_MAX_SBALS_PER_REQ);
+
+	q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx;
+	q_req->sbal_number = 1;
+	q_req->sbtype = sbtype;
+	q_req->sbale_curr = 1;
+	q_req->sbal_limit = (q_req->sbal_first + count - 1)
+					% QDIO_MAX_BUFFERS_PER_Q;
+
+	sbale = zfcp_qdio_sbale_req(qdio, q_req);
+	sbale->addr = (void *) req_id;
+	sbale->eflags = 0;
+	sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
+
+	if (unlikely(!data))
+		return;
+	sbale++;
+	sbale->addr = data;
+	sbale->length = len;
+}
+
+/**
+ * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_queue_req
+ *
+ * This is only required for single sbal requests, calling it when
+ * wrapping around to the next sbal is a bug.
+ */
+static inline
+void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+			 void *data, u32 len)
+{
+	struct qdio_buffer_element *sbale;
+
+	BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1);
+	q_req->sbale_curr++;
+	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+	sbale->addr = data;
+	sbale->length = len;
+}
+
+/**
+ * zfcp_qdio_set_sbale_last - set last entry flag in current sbale
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_queue_req
+ */
+static inline
+void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
+			      struct zfcp_qdio_req *q_req)
+{
+	struct qdio_buffer_element *sbale;
+
+	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+	sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
+}
+
+/**
+ * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data
+ * @sg: The scatterlist where to check the data size
+ *
+ * Returns: 1 when one sbale is enough for the data in the scatterlist,
+ *	    0 if not.
+ */
+static inline
+int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
+{
+	return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN;
+}
+
+/**
+ * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
+ * @q_req: The current zfcp_qdio_req
+ */
+static inline
+void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio *qdio,
+				  struct zfcp_qdio_req *q_req)
+{
+	q_req->sbale_curr = qdio->max_sbale_per_sbal - 1;
+}
+
+/**
+ * zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: The current zfcp_qdio_req
+ * @max_sbals: maximum number of SBALs allowed
+ */
+static inline
+void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
+			  struct zfcp_qdio_req *q_req, int max_sbals)
+{
+	int count = min(atomic_read(&qdio->req_q_free), max_sbals);
+
+	q_req->sbal_limit = (q_req->sbal_first + count - 1) %
+				QDIO_MAX_BUFFERS_PER_Q;
+}
+
+/**
+ * zfcp_qdio_set_data_div - set data division count
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: The current zfcp_qdio_req
+ * @count: The data division count
+ */
+static inline
+void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
+			    struct zfcp_qdio_req *q_req, u32 count)
+{
+	struct qdio_buffer_element *sbale;
+
+	sbale = qdio->req_q[q_req->sbal_first]->element;
+	sbale->length = count;
+}
+
+/**
+ * zfcp_qdio_real_bytes - count bytes used
+ * @sg: pointer to struct scatterlist
+ */
+static inline
+unsigned int zfcp_qdio_real_bytes(struct scatterlist *sg)
+{
+	unsigned int real_bytes = 0;
+
+	for (; sg; sg = sg_next(sg))
+		real_bytes += sg->length;
+
+	return real_bytes;
+}
+
+/**
+ * zfcp_qdio_set_scount - set SBAL count value
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: The current zfcp_qdio_req
+ */
+static inline
+void zfcp_qdio_set_scount(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+	struct qdio_buffer_element *sbale;
+
+	sbale = qdio->req_q[q_req->sbal_first]->element;
+	sbale->scount = q_req->sbal_number - 1;
+}
+
+#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
new file mode 100644
index 0000000..59a943c
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * zfcp device driver
+ *
+ * Data structure and helper functions for tracking pending FSF
+ * requests.
+ *
+ * Copyright IBM Corp. 2009, 2016
+ */
+
+#ifndef ZFCP_REQLIST_H
+#define ZFCP_REQLIST_H
+
+/* number of hash buckets */
+#define ZFCP_REQ_LIST_BUCKETS 128
+
+/**
+ * struct zfcp_reqlist - Container for request list (reqlist)
+ * @lock: Spinlock for protecting the hash list
+ * @list: Array of hashbuckets, each is a list of requests in this bucket
+ */
+struct zfcp_reqlist {
+	spinlock_t lock;
+	struct list_head buckets[ZFCP_REQ_LIST_BUCKETS];
+};
+
+static inline int zfcp_reqlist_hash(unsigned long req_id)
+{
+	return req_id % ZFCP_REQ_LIST_BUCKETS;
+}
+
+/**
+ * zfcp_reqlist_alloc - Allocate and initialize reqlist
+ *
+ * Returns pointer to allocated reqlist on success, or NULL on
+ * allocation failure.
+ */
+static inline struct zfcp_reqlist *zfcp_reqlist_alloc(void)
+{
+	unsigned int i;
+	struct zfcp_reqlist *rl;
+
+	rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL);
+	if (!rl)
+		return NULL;
+
+	spin_lock_init(&rl->lock);
+
+	for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+		INIT_LIST_HEAD(&rl->buckets[i]);
+
+	return rl;
+}
+
+/**
+ * zfcp_reqlist_isempty - Check whether the request list empty
+ * @rl: pointer to reqlist
+ *
+ * Returns: 1 if list is empty, 0 if not
+ */
+static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl)
+{
+	unsigned int i;
+
+	for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+		if (!list_empty(&rl->buckets[i]))
+			return 0;
+	return 1;
+}
+
+/**
+ * zfcp_reqlist_free - Free allocated memory for reqlist
+ * @rl: The reqlist where to free memory
+ */
+static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl)
+{
+	/* sanity check */
+	BUG_ON(!zfcp_reqlist_isempty(rl));
+
+	kfree(rl);
+}
+
+static inline struct zfcp_fsf_req *
+_zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+	struct zfcp_fsf_req *req;
+	unsigned int i;
+
+	i = zfcp_reqlist_hash(req_id);
+	list_for_each_entry(req, &rl->buckets[i], list)
+		if (req->req_id == req_id)
+			return req;
+	return NULL;
+}
+
+/**
+ * zfcp_reqlist_find - Lookup FSF request by its request id
+ * @rl: The reqlist where to lookup the FSF request
+ * @req_id: The request id to look for
+ *
+ * Returns a pointer to the FSF request with the specified request id
+ * or NULL if there is no known FSF request with this id.
+ */
+static inline struct zfcp_fsf_req *
+zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+	unsigned long flags;
+	struct zfcp_fsf_req *req;
+
+	spin_lock_irqsave(&rl->lock, flags);
+	req = _zfcp_reqlist_find(rl, req_id);
+	spin_unlock_irqrestore(&rl->lock, flags);
+
+	return req;
+}
+
+/**
+ * zfcp_reqlist_find_rm - Lookup request by id and remove it from reqlist
+ * @rl: reqlist where to search and remove entry
+ * @req_id: The request id of the request to look for
+ *
+ * This functions tries to find the FSF request with the specified
+ * id and then removes it from the reqlist. The reqlist lock is held
+ * during both steps of the operation.
+ *
+ * Returns: Pointer to the FSF request if the request has been found,
+ * NULL if it has not been found.
+ */
+static inline struct zfcp_fsf_req *
+zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+	unsigned long flags;
+	struct zfcp_fsf_req *req;
+
+	spin_lock_irqsave(&rl->lock, flags);
+	req = _zfcp_reqlist_find(rl, req_id);
+	if (req)
+		list_del(&req->list);
+	spin_unlock_irqrestore(&rl->lock, flags);
+
+	return req;
+}
+
+/**
+ * zfcp_reqlist_add - Add entry to reqlist
+ * @rl: reqlist where to add the entry
+ * @req: The entry to add
+ *
+ * The request id always increases. As an optimization new requests
+ * are added here with list_add_tail at the end of the bucket lists
+ * while old requests are looked up starting at the beginning of the
+ * lists.
+ */
+static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl,
+				    struct zfcp_fsf_req *req)
+{
+	unsigned int i;
+	unsigned long flags;
+
+	i = zfcp_reqlist_hash(req->req_id);
+
+	spin_lock_irqsave(&rl->lock, flags);
+	list_add_tail(&req->list, &rl->buckets[i]);
+	spin_unlock_irqrestore(&rl->lock, flags);
+}
+
+/**
+ * zfcp_reqlist_move - Move all entries from reqlist to simple list
+ * @rl: The zfcp_reqlist where to remove all entries
+ * @list: The list where to move all entries
+ */
+static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
+				     struct list_head *list)
+{
+	unsigned int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rl->lock, flags);
+	for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+		list_splice_init(&rl->buckets[i], list);
+	spin_unlock_irqrestore(&rl->lock, flags);
+}
+
+/**
+ * zfcp_reqlist_apply_for_all() - apply a function to every request.
+ * @rl: the requestlist that contains the target requests.
+ * @f: the function to apply to each request; the first parameter of the
+ *     function will be the target-request; the second parameter is the same
+ *     pointer as given with the argument @data.
+ * @data: freely chosen argument; passed through to @f as second parameter.
+ *
+ * Uses :c:macro:`list_for_each_entry` to iterate over the lists in the hash-
+ * table (not a 'safe' variant, so don't modify the list).
+ *
+ * Holds @rl->lock over the entire request-iteration.
+ */
+static inline void
+zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl,
+			   void (*f)(struct zfcp_fsf_req *, void *), void *data)
+{
+	struct zfcp_fsf_req *req;
+	unsigned long flags;
+	unsigned int i;
+
+	spin_lock_irqsave(&rl->lock, flags);
+	for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+		list_for_each_entry(req, &rl->buckets[i], list)
+			f(req, data);
+	spin_unlock_irqrestore(&rl->lock, flags);
+}
+
+#endif /* ZFCP_REQLIST_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
new file mode 100644
index 0000000..a8efcb3
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -0,0 +1,855 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * zfcp device driver
+ *
+ * Interface to Linux SCSI midlayer.
+ *
+ * Copyright IBM Corp. 2002, 2018
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <scsi/fc/fc_fcp.h>
+#include <scsi/scsi_eh.h>
+#include <linux/atomic.h>
+#include "zfcp_ext.h"
+#include "zfcp_dbf.h"
+#include "zfcp_fc.h"
+#include "zfcp_reqlist.h"
+
+static unsigned int default_depth = 32;
+module_param_named(queue_depth, default_depth, uint, 0600);
+MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
+
+static bool enable_dif;
+module_param_named(dif, enable_dif, bool, 0400);
+MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
+
+static bool allow_lun_scan = true;
+module_param(allow_lun_scan, bool, 0600);
+MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
+
+static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	/* if previous slave_alloc returned early, there is nothing to do */
+	if (!zfcp_sdev->port)
+		return;
+
+	zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
+	put_device(&zfcp_sdev->port->dev);
+}
+
+static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
+{
+	if (sdp->tagged_supported)
+		scsi_change_queue_depth(sdp, default_depth);
+	return 0;
+}
+
+static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
+{
+	set_host_byte(scpnt, result);
+	zfcp_dbf_scsi_fail_send(scpnt);
+	scpnt->scsi_done(scpnt);
+}
+
+static
+int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
+	struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
+	int    status, scsi_result, ret;
+
+	/* reset the status for this request */
+	scpnt->result = 0;
+	scpnt->host_scribble = NULL;
+
+	scsi_result = fc_remote_port_chkready(rport);
+	if (unlikely(scsi_result)) {
+		scpnt->result = scsi_result;
+		zfcp_dbf_scsi_fail_send(scpnt);
+		scpnt->scsi_done(scpnt);
+		return 0;
+	}
+
+	status = atomic_read(&zfcp_sdev->status);
+	if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
+		     !(atomic_read(&zfcp_sdev->port->status) &
+		       ZFCP_STATUS_COMMON_ERP_FAILED)) {
+		/* only LUN access denied, but port is good
+		 * not covered by FC transport, have to fail here */
+		zfcp_scsi_command_fail(scpnt, DID_ERROR);
+		return 0;
+	}
+
+	if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
+		/* This could be
+		 * call to rport_delete pending: mimic retry from
+		 * 	fc_remote_port_chkready until rport is BLOCKED
+		 */
+		zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY);
+		return 0;
+	}
+
+	ret = zfcp_fsf_fcp_cmnd(scpnt);
+	if (unlikely(ret == -EBUSY))
+		return SCSI_MLQUEUE_DEVICE_BUSY;
+	else if (unlikely(ret < 0))
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	return ret;
+}
+
+static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
+{
+	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	struct zfcp_adapter *adapter =
+		(struct zfcp_adapter *) sdev->host->hostdata[0];
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_port *port;
+	struct zfcp_unit *unit;
+	int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
+
+	zfcp_sdev->erp_action.adapter = adapter;
+	zfcp_sdev->erp_action.sdev = sdev;
+
+	port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
+	if (!port)
+		return -ENXIO;
+
+	zfcp_sdev->erp_action.port = port;
+
+	unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
+	if (unit)
+		put_device(&unit->dev);
+
+	if (!unit && !(allow_lun_scan && npiv)) {
+		put_device(&port->dev);
+		return -ENXIO;
+	}
+
+	zfcp_sdev->port = port;
+	zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF;
+	zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF;
+	zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF;
+	zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF;
+	zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF;
+	zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF;
+	spin_lock_init(&zfcp_sdev->latencies.lock);
+
+	zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
+	zfcp_erp_lun_reopen(sdev, 0, "scsla_1");
+	zfcp_erp_wait(port->adapter);
+
+	return 0;
+}
+
+static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
+{
+	struct Scsi_Host *scsi_host = scpnt->device->host;
+	struct zfcp_adapter *adapter =
+		(struct zfcp_adapter *) scsi_host->hostdata[0];
+	struct zfcp_fsf_req *old_req, *abrt_req;
+	unsigned long flags;
+	unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
+	int retval = SUCCESS, ret;
+	int retry = 3;
+	char *dbf_tag;
+
+	/* avoid race condition between late normal completion and abort */
+	write_lock_irqsave(&adapter->abort_lock, flags);
+
+	old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
+	if (!old_req) {
+		write_unlock_irqrestore(&adapter->abort_lock, flags);
+		zfcp_dbf_scsi_abort("abrt_or", scpnt, NULL);
+		return FAILED; /* completion could be in progress */
+	}
+	old_req->data = NULL;
+
+	/* don't access old fsf_req after releasing the abort_lock */
+	write_unlock_irqrestore(&adapter->abort_lock, flags);
+
+	while (retry--) {
+		abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt);
+		if (abrt_req)
+			break;
+
+		zfcp_dbf_scsi_abort("abrt_wt", scpnt, NULL);
+		zfcp_erp_wait(adapter);
+		ret = fc_block_scsi_eh(scpnt);
+		if (ret) {
+			zfcp_dbf_scsi_abort("abrt_bl", scpnt, NULL);
+			return ret;
+		}
+		if (!(atomic_read(&adapter->status) &
+		      ZFCP_STATUS_COMMON_RUNNING)) {
+			zfcp_dbf_scsi_abort("abrt_ru", scpnt, NULL);
+			return SUCCESS;
+		}
+	}
+	if (!abrt_req) {
+		zfcp_dbf_scsi_abort("abrt_ar", scpnt, NULL);
+		return FAILED;
+	}
+
+	wait_for_completion(&abrt_req->completion);
+
+	if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
+		dbf_tag = "abrt_ok";
+	else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED)
+		dbf_tag = "abrt_nn";
+	else {
+		dbf_tag = "abrt_fa";
+		retval = FAILED;
+	}
+	zfcp_dbf_scsi_abort(dbf_tag, scpnt, abrt_req);
+	zfcp_fsf_req_free(abrt_req);
+	return retval;
+}
+
+struct zfcp_scsi_req_filter {
+	u8 tmf_scope;
+	u32 lun_handle;
+	u32 port_handle;
+};
+
+static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data)
+{
+	struct zfcp_scsi_req_filter *filter =
+		(struct zfcp_scsi_req_filter *)data;
+
+	/* already aborted - prevent side-effects - or not a SCSI command */
+	if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND)
+		return;
+
+	/* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
+	if (old_req->qtcb->header.port_handle != filter->port_handle)
+		return;
+
+	if (filter->tmf_scope == FCP_TMF_LUN_RESET &&
+	    old_req->qtcb->header.lun_handle != filter->lun_handle)
+		return;
+
+	zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req);
+	old_req->data = NULL;
+}
+
+static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
+{
+	struct zfcp_adapter *adapter = zsdev->port->adapter;
+	struct zfcp_scsi_req_filter filter = {
+		.tmf_scope = FCP_TMF_TGT_RESET,
+		.port_handle = zsdev->port->handle,
+	};
+	unsigned long flags;
+
+	if (tm_flags == FCP_TMF_LUN_RESET) {
+		filter.tmf_scope = FCP_TMF_LUN_RESET;
+		filter.lun_handle = zsdev->lun_handle;
+	}
+
+	/*
+	 * abort_lock secures against other processings - in the abort-function
+	 * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data
+	 */
+	write_lock_irqsave(&adapter->abort_lock, flags);
+	zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd,
+				   &filter);
+	write_unlock_irqrestore(&adapter->abort_lock, flags);
+}
+
+/**
+ * zfcp_scsi_task_mgmt_function() - Send a task management function (sync).
+ * @sdev: Pointer to SCSI device to send the task management command to.
+ * @tm_flags: Task management flags,
+ *	      here we only handle %FCP_TMF_TGT_RESET or %FCP_TMF_LUN_RESET.
+ */
+static int zfcp_scsi_task_mgmt_function(struct scsi_device *sdev, u8 tm_flags)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
+	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	struct zfcp_fsf_req *fsf_req = NULL;
+	int retval = SUCCESS, ret;
+	int retry = 3;
+
+	while (retry--) {
+		fsf_req = zfcp_fsf_fcp_task_mgmt(sdev, tm_flags);
+		if (fsf_req)
+			break;
+
+		zfcp_dbf_scsi_devreset("wait", sdev, tm_flags, NULL);
+		zfcp_erp_wait(adapter);
+		ret = fc_block_rport(rport);
+		if (ret) {
+			zfcp_dbf_scsi_devreset("fiof", sdev, tm_flags, NULL);
+			return ret;
+		}
+
+		if (!(atomic_read(&adapter->status) &
+		      ZFCP_STATUS_COMMON_RUNNING)) {
+			zfcp_dbf_scsi_devreset("nres", sdev, tm_flags, NULL);
+			return SUCCESS;
+		}
+	}
+	if (!fsf_req) {
+		zfcp_dbf_scsi_devreset("reqf", sdev, tm_flags, NULL);
+		return FAILED;
+	}
+
+	wait_for_completion(&fsf_req->completion);
+
+	if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
+		zfcp_dbf_scsi_devreset("fail", sdev, tm_flags, fsf_req);
+		retval = FAILED;
+	} else {
+		zfcp_dbf_scsi_devreset("okay", sdev, tm_flags, fsf_req);
+		zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
+	}
+
+	zfcp_fsf_req_free(fsf_req);
+	return retval;
+}
+
+static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
+{
+	struct scsi_device *sdev = scpnt->device;
+
+	return zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_LUN_RESET);
+}
+
+static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
+{
+	struct scsi_target *starget = scsi_target(scpnt->device);
+	struct fc_rport *rport = starget_to_rport(starget);
+	struct Scsi_Host *shost = rport_to_shost(rport);
+	struct scsi_device *sdev = NULL, *tmp_sdev;
+	struct zfcp_adapter *adapter =
+		(struct zfcp_adapter *)shost->hostdata[0];
+	int ret;
+
+	shost_for_each_device(tmp_sdev, shost) {
+		if (tmp_sdev->id == starget->id) {
+			sdev = tmp_sdev;
+			break;
+		}
+	}
+	if (!sdev) {
+		ret = FAILED;
+		zfcp_dbf_scsi_eh("tr_nosd", adapter, starget->id, ret);
+		return ret;
+	}
+
+	ret = zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_TGT_RESET);
+
+	/* release reference from above shost_for_each_device */
+	if (sdev)
+		scsi_device_put(tmp_sdev);
+
+	return ret;
+}
+
+static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
+	int ret = SUCCESS, fc_ret;
+
+	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
+	zfcp_erp_wait(adapter);
+	fc_ret = fc_block_scsi_eh(scpnt);
+	if (fc_ret)
+		ret = fc_ret;
+
+	zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret);
+	return ret;
+}
+
+/**
+ * zfcp_scsi_sysfs_host_reset() - Support scsi_host sysfs attribute host_reset.
+ * @shost: Pointer to Scsi_Host to perform action on.
+ * @reset_type: We support %SCSI_ADAPTER_RESET but not %SCSI_FIRMWARE_RESET.
+ *
+ * Return: 0 on %SCSI_ADAPTER_RESET, -%EOPNOTSUPP otherwise.
+ *
+ * This is similar to zfcp_sysfs_adapter_failed_store().
+ */
+static int zfcp_scsi_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+	struct zfcp_adapter *adapter =
+		(struct zfcp_adapter *)shost->hostdata[0];
+	int ret = 0;
+
+	if (reset_type != SCSI_ADAPTER_RESET) {
+		ret = -EOPNOTSUPP;
+		zfcp_dbf_scsi_eh("scshr_n", adapter, ~0, ret);
+		return ret;
+	}
+
+	zfcp_erp_adapter_reset_sync(adapter, "scshr_y");
+	return ret;
+}
+
+struct scsi_transport_template *zfcp_scsi_transport_template;
+
+static struct scsi_host_template zfcp_scsi_host_template = {
+	.module			 = THIS_MODULE,
+	.name			 = "zfcp",
+	.queuecommand		 = zfcp_scsi_queuecommand,
+	.eh_timed_out		 = fc_eh_timed_out,
+	.eh_abort_handler	 = zfcp_scsi_eh_abort_handler,
+	.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
+	.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
+	.eh_host_reset_handler	 = zfcp_scsi_eh_host_reset_handler,
+	.slave_alloc		 = zfcp_scsi_slave_alloc,
+	.slave_configure	 = zfcp_scsi_slave_configure,
+	.slave_destroy		 = zfcp_scsi_slave_destroy,
+	.change_queue_depth	 = scsi_change_queue_depth,
+	.host_reset		 = zfcp_scsi_sysfs_host_reset,
+	.proc_name		 = "zfcp",
+	.can_queue		 = 4096,
+	.this_id		 = -1,
+	.sg_tablesize		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
+				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2),
+				   /* GCD, adjusted later */
+	.max_sectors		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
+				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
+				   /* GCD, adjusted later */
+	.dma_boundary		 = ZFCP_QDIO_SBALE_LEN - 1,
+	.use_clustering		 = 1,
+	.shost_attrs		 = zfcp_sysfs_shost_attrs,
+	.sdev_attrs		 = zfcp_sysfs_sdev_attrs,
+	.track_queue_depth	 = 1,
+	.supported_mode		 = MODE_INITIATOR,
+};
+
+/**
+ * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
+ * @adapter: The zfcp adapter to register with the SCSI midlayer
+ */
+int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
+{
+	struct ccw_dev_id dev_id;
+
+	if (adapter->scsi_host)
+		return 0;
+
+	ccw_device_get_id(adapter->ccw_device, &dev_id);
+	/* register adapter as SCSI host with mid layer of SCSI stack */
+	adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
+					     sizeof (struct zfcp_adapter *));
+	if (!adapter->scsi_host) {
+		dev_err(&adapter->ccw_device->dev,
+			"Registering the FCP device with the "
+			"SCSI stack failed\n");
+		return -EIO;
+	}
+
+	/* tell the SCSI stack some characteristics of this adapter */
+	adapter->scsi_host->max_id = 511;
+	adapter->scsi_host->max_lun = 0xFFFFFFFF;
+	adapter->scsi_host->max_channel = 0;
+	adapter->scsi_host->unique_id = dev_id.devno;
+	adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
+	adapter->scsi_host->transportt = zfcp_scsi_transport_template;
+
+	adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
+
+	if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
+		scsi_host_put(adapter->scsi_host);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
+ * @adapter: The zfcp adapter to unregister.
+ */
+void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
+{
+	struct Scsi_Host *shost;
+	struct zfcp_port *port;
+
+	shost = adapter->scsi_host;
+	if (!shost)
+		return;
+
+	read_lock_irq(&adapter->port_list_lock);
+	list_for_each_entry(port, &adapter->port_list, list)
+		port->rport = NULL;
+	read_unlock_irq(&adapter->port_list_lock);
+
+	fc_remove_host(shost);
+	scsi_remove_host(shost);
+	scsi_host_put(shost);
+	adapter->scsi_host = NULL;
+}
+
+static struct fc_host_statistics*
+zfcp_scsi_init_fc_host_stats(struct zfcp_adapter *adapter)
+{
+	struct fc_host_statistics *fc_stats;
+
+	if (!adapter->fc_stats) {
+		fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL);
+		if (!fc_stats)
+			return NULL;
+		adapter->fc_stats = fc_stats; /* freed in adapter_release */
+	}
+	memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats));
+	return adapter->fc_stats;
+}
+
+static void zfcp_scsi_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
+					   struct fsf_qtcb_bottom_port *data,
+					   struct fsf_qtcb_bottom_port *old)
+{
+	fc_stats->seconds_since_last_reset =
+		data->seconds_since_last_reset - old->seconds_since_last_reset;
+	fc_stats->tx_frames = data->tx_frames - old->tx_frames;
+	fc_stats->tx_words = data->tx_words - old->tx_words;
+	fc_stats->rx_frames = data->rx_frames - old->rx_frames;
+	fc_stats->rx_words = data->rx_words - old->rx_words;
+	fc_stats->lip_count = data->lip - old->lip;
+	fc_stats->nos_count = data->nos - old->nos;
+	fc_stats->error_frames = data->error_frames - old->error_frames;
+	fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames;
+	fc_stats->link_failure_count = data->link_failure - old->link_failure;
+	fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync;
+	fc_stats->loss_of_signal_count =
+		data->loss_of_signal - old->loss_of_signal;
+	fc_stats->prim_seq_protocol_err_count =
+		data->psp_error_counts - old->psp_error_counts;
+	fc_stats->invalid_tx_word_count =
+		data->invalid_tx_words - old->invalid_tx_words;
+	fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs;
+	fc_stats->fcp_input_requests =
+		data->input_requests - old->input_requests;
+	fc_stats->fcp_output_requests =
+		data->output_requests - old->output_requests;
+	fc_stats->fcp_control_requests =
+		data->control_requests - old->control_requests;
+	fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb;
+	fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb;
+}
+
+static void zfcp_scsi_set_fc_host_stats(struct fc_host_statistics *fc_stats,
+					struct fsf_qtcb_bottom_port *data)
+{
+	fc_stats->seconds_since_last_reset = data->seconds_since_last_reset;
+	fc_stats->tx_frames = data->tx_frames;
+	fc_stats->tx_words = data->tx_words;
+	fc_stats->rx_frames = data->rx_frames;
+	fc_stats->rx_words = data->rx_words;
+	fc_stats->lip_count = data->lip;
+	fc_stats->nos_count = data->nos;
+	fc_stats->error_frames = data->error_frames;
+	fc_stats->dumped_frames = data->dumped_frames;
+	fc_stats->link_failure_count = data->link_failure;
+	fc_stats->loss_of_sync_count = data->loss_of_sync;
+	fc_stats->loss_of_signal_count = data->loss_of_signal;
+	fc_stats->prim_seq_protocol_err_count = data->psp_error_counts;
+	fc_stats->invalid_tx_word_count = data->invalid_tx_words;
+	fc_stats->invalid_crc_count = data->invalid_crcs;
+	fc_stats->fcp_input_requests = data->input_requests;
+	fc_stats->fcp_output_requests = data->output_requests;
+	fc_stats->fcp_control_requests = data->control_requests;
+	fc_stats->fcp_input_megabytes = data->input_mb;
+	fc_stats->fcp_output_megabytes = data->output_mb;
+}
+
+static struct fc_host_statistics *
+zfcp_scsi_get_fc_host_stats(struct Scsi_Host *host)
+{
+	struct zfcp_adapter *adapter;
+	struct fc_host_statistics *fc_stats;
+	struct fsf_qtcb_bottom_port *data;
+	int ret;
+
+	adapter = (struct zfcp_adapter *)host->hostdata[0];
+	fc_stats = zfcp_scsi_init_fc_host_stats(adapter);
+	if (!fc_stats)
+		return NULL;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return NULL;
+
+	ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
+	if (ret) {
+		kfree(data);
+		return NULL;
+	}
+
+	if (adapter->stats_reset &&
+	    ((jiffies/HZ - adapter->stats_reset) <
+	     data->seconds_since_last_reset))
+		zfcp_scsi_adjust_fc_host_stats(fc_stats, data,
+					       adapter->stats_reset_data);
+	else
+		zfcp_scsi_set_fc_host_stats(fc_stats, data);
+
+	kfree(data);
+	return fc_stats;
+}
+
+static void zfcp_scsi_reset_fc_host_stats(struct Scsi_Host *shost)
+{
+	struct zfcp_adapter *adapter;
+	struct fsf_qtcb_bottom_port *data;
+	int ret;
+
+	adapter = (struct zfcp_adapter *)shost->hostdata[0];
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return;
+
+	ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
+	if (ret)
+		kfree(data);
+	else {
+		adapter->stats_reset = jiffies/HZ;
+		kfree(adapter->stats_reset_data);
+		adapter->stats_reset_data = data; /* finally freed in
+						     adapter_release */
+	}
+}
+
+static void zfcp_scsi_get_host_port_state(struct Scsi_Host *shost)
+{
+	struct zfcp_adapter *adapter =
+		(struct zfcp_adapter *)shost->hostdata[0];
+	int status = atomic_read(&adapter->status);
+
+	if ((status & ZFCP_STATUS_COMMON_RUNNING) &&
+	    !(status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED))
+		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+	else if (status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
+		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+	else if (status & ZFCP_STATUS_COMMON_ERP_FAILED)
+		fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
+	else
+		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+}
+
+static void zfcp_scsi_set_rport_dev_loss_tmo(struct fc_rport *rport,
+					     u32 timeout)
+{
+	rport->dev_loss_tmo = timeout;
+}
+
+/**
+ * zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport
+ * @rport: The FC rport where to teminate I/O
+ *
+ * Abort all pending SCSI commands for a port by closing the
+ * port. Using a reopen avoids a conflict with a shutdown
+ * overwriting a reopen. The "forced" ensures that a disappeared port
+ * is not opened again as valid due to the cached plogi data in
+ * non-NPIV mode.
+ */
+static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
+{
+	struct zfcp_port *port;
+	struct Scsi_Host *shost = rport_to_shost(rport);
+	struct zfcp_adapter *adapter =
+		(struct zfcp_adapter *)shost->hostdata[0];
+
+	port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
+
+	if (port) {
+		zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
+		put_device(&port->dev);
+	} else {
+		zfcp_erp_port_forced_no_port_dbf(
+			"sctrpin", adapter,
+			rport->port_name /* zfcp_scsi_rport_register */,
+			rport->port_id /* zfcp_scsi_rport_register */);
+	}
+}
+
+static void zfcp_scsi_rport_register(struct zfcp_port *port)
+{
+	struct fc_rport_identifiers ids;
+	struct fc_rport *rport;
+
+	if (port->rport)
+		return;
+
+	ids.node_name = port->wwnn;
+	ids.port_name = port->wwpn;
+	ids.port_id = port->d_id;
+	ids.roles = FC_RPORT_ROLE_FCP_TARGET;
+
+	zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
+			       ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+			       ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
+	rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
+	if (!rport) {
+		dev_err(&port->adapter->ccw_device->dev,
+			"Registering port 0x%016Lx failed\n",
+			(unsigned long long)port->wwpn);
+		return;
+	}
+
+	rport->maxframe_size = port->maxframe_size;
+	rport->supported_classes = port->supported_classes;
+	port->rport = rport;
+	port->starget_id = rport->scsi_target_id;
+
+	zfcp_unit_queue_scsi_scan(port);
+}
+
+static void zfcp_scsi_rport_block(struct zfcp_port *port)
+{
+	struct fc_rport *rport = port->rport;
+
+	if (rport) {
+		zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
+				       ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+				       ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
+		fc_remote_port_delete(rport);
+		port->rport = NULL;
+	}
+}
+
+void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
+{
+	get_device(&port->dev);
+	port->rport_task = RPORT_ADD;
+
+	if (!queue_work(port->adapter->work_queue, &port->rport_work))
+		put_device(&port->dev);
+}
+
+void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
+{
+	get_device(&port->dev);
+	port->rport_task = RPORT_DEL;
+
+	if (port->rport && queue_work(port->adapter->work_queue,
+				      &port->rport_work))
+		return;
+
+	put_device(&port->dev);
+}
+
+void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
+{
+	unsigned long flags;
+	struct zfcp_port *port;
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list)
+		zfcp_scsi_schedule_rport_block(port);
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+}
+
+void zfcp_scsi_rport_work(struct work_struct *work)
+{
+	struct zfcp_port *port = container_of(work, struct zfcp_port,
+					      rport_work);
+
+	set_worker_desc("zrp%c-%16llx",
+			(port->rport_task == RPORT_ADD) ? 'a' : 'd',
+			port->wwpn); /* < WORKER_DESC_LEN=24 */
+	while (port->rport_task) {
+		if (port->rport_task == RPORT_ADD) {
+			port->rport_task = RPORT_NONE;
+			zfcp_scsi_rport_register(port);
+		} else {
+			port->rport_task = RPORT_NONE;
+			zfcp_scsi_rport_block(port);
+		}
+	}
+
+	put_device(&port->dev);
+}
+
+/**
+ * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host
+ * @adapter: The adapter where to configure DIF/DIX for the SCSI host
+ */
+void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
+{
+	unsigned int mask = 0;
+	unsigned int data_div;
+	struct Scsi_Host *shost = adapter->scsi_host;
+
+	data_div = atomic_read(&adapter->status) &
+		   ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED;
+
+	if (enable_dif &&
+	    adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1)
+		mask |= SHOST_DIF_TYPE1_PROTECTION;
+
+	if (enable_dif && data_div &&
+	    adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
+		mask |= SHOST_DIX_TYPE1_PROTECTION;
+		scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
+		shost->sg_prot_tablesize = adapter->qdio->max_sbale_per_req / 2;
+		shost->sg_tablesize = adapter->qdio->max_sbale_per_req / 2;
+		shost->max_sectors = shost->sg_tablesize * 8;
+	}
+
+	scsi_host_set_prot(shost, mask);
+}
+
+/**
+ * zfcp_scsi_dif_sense_error - Report DIF/DIX error as driver sense error
+ * @scmd: The SCSI command to report the error for
+ * @ascq: The ASCQ to put in the sense buffer
+ *
+ * See the error handling in sd_done for the sense codes used here.
+ * Set DID_SOFT_ERROR to retry the request, if possible.
+ */
+void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
+{
+	scsi_build_sense_buffer(1, scmd->sense_buffer,
+				ILLEGAL_REQUEST, 0x10, ascq);
+	set_driver_byte(scmd, DRIVER_SENSE);
+	scmd->result |= SAM_STAT_CHECK_CONDITION;
+	set_host_byte(scmd, DID_SOFT_ERROR);
+}
+
+struct fc_function_template zfcp_transport_functions = {
+	.show_starget_port_id = 1,
+	.show_starget_port_name = 1,
+	.show_starget_node_name = 1,
+	.show_rport_supported_classes = 1,
+	.show_rport_maxframe_size = 1,
+	.show_rport_dev_loss_tmo = 1,
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_permanent_port_name = 1,
+	.show_host_supported_classes = 1,
+	.show_host_supported_fc4s = 1,
+	.show_host_supported_speeds = 1,
+	.show_host_maxframe_size = 1,
+	.show_host_serial_number = 1,
+	.get_fc_host_stats = zfcp_scsi_get_fc_host_stats,
+	.reset_fc_host_stats = zfcp_scsi_reset_fc_host_stats,
+	.set_rport_dev_loss_tmo = zfcp_scsi_set_rport_dev_loss_tmo,
+	.get_host_port_state = zfcp_scsi_get_host_port_state,
+	.terminate_rport_io = zfcp_scsi_terminate_rport_io,
+	.show_host_port_state = 1,
+	.show_host_active_fc4s = 1,
+	.bsg_request = zfcp_fc_exec_bsg_job,
+	.bsg_timeout = zfcp_fc_timeout_bsg_job,
+	/* no functions registered for following dynamic attributes but
+	   directly set by LLDD */
+	.show_host_port_type = 1,
+	.show_host_symbolic_name = 1,
+	.show_host_speed = 1,
+	.show_host_port_id = 1,
+	.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
+};
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
new file mode 100644
index 0000000..b277be6
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * zfcp device driver
+ *
+ * sysfs attributes.
+ *
+ * Copyright IBM Corp. 2008, 2010
+ */
+
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/slab.h>
+#include "zfcp_ext.h"
+
+#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
+struct device_attribute dev_attr_##_feat##_##_name = __ATTR(_name, _mode,\
+							    _show, _store)
+#define ZFCP_DEFINE_ATTR(_feat_def, _feat, _name, _format, _value)	       \
+static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev,	       \
+						   struct device_attribute *at,\
+						   char *buf)		       \
+{									       \
+	struct _feat_def *_feat = container_of(dev, struct _feat_def, dev);    \
+									       \
+	return sprintf(buf, _format, _value);				       \
+}									       \
+static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO,				       \
+		     zfcp_sysfs_##_feat##_##_name##_show, NULL);
+
+#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value)		       \
+static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev,	       \
+						   struct device_attribute *at,\
+						   char *buf)		       \
+{									       \
+	return sprintf(buf, _format, _value);				       \
+}									       \
+static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO,				       \
+		     zfcp_sysfs_##_feat##_##_name##_show, NULL);
+
+#define ZFCP_DEFINE_A_ATTR(_name, _format, _value)			     \
+static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev,	     \
+						 struct device_attribute *at,\
+						 char *buf)		     \
+{									     \
+	struct ccw_device *cdev = to_ccwdev(dev);			     \
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);	     \
+	int i;								     \
+									     \
+	if (!adapter)							     \
+		return -ENODEV;						     \
+									     \
+	i = sprintf(buf, _format, _value);				     \
+	zfcp_ccw_adapter_put(adapter);					     \
+	return i;							     \
+}									     \
+static ZFCP_DEV_ATTR(adapter, _name, S_IRUGO,				     \
+		     zfcp_sysfs_adapter_##_name##_show, NULL);
+
+ZFCP_DEFINE_A_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
+ZFCP_DEFINE_A_ATTR(peer_wwnn, "0x%016llx\n",
+		   (unsigned long long) adapter->peer_wwnn);
+ZFCP_DEFINE_A_ATTR(peer_wwpn, "0x%016llx\n",
+		   (unsigned long long) adapter->peer_wwpn);
+ZFCP_DEFINE_A_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
+ZFCP_DEFINE_A_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
+ZFCP_DEFINE_A_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
+ZFCP_DEFINE_A_ATTR(hardware_version, "0x%08x\n", adapter->hardware_version);
+ZFCP_DEFINE_A_ATTR(in_recovery, "%d\n", (atomic_read(&adapter->status) &
+					 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
+
+ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
+		 atomic_read(&port->status));
+ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n",
+		 (atomic_read(&port->status) &
+		  ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
+ZFCP_DEFINE_ATTR_CONST(port, access_denied, "%d\n", 0);
+
+ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
+		 zfcp_unit_sdev_status(unit));
+ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
+		 (zfcp_unit_sdev_status(unit) &
+		  ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
+ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
+		 (zfcp_unit_sdev_status(unit) &
+		  ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
+ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0);
+ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0);
+
+static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+
+	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+		return sprintf(buf, "1\n");
+
+	return sprintf(buf, "0\n");
+}
+
+static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+	unsigned long val;
+
+	if (kstrtoul(buf, 0, &val) || val != 0)
+		return -EINVAL;
+
+	zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
+	zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2");
+	zfcp_erp_wait(port->adapter);
+
+	return count;
+}
+static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO,
+		     zfcp_sysfs_port_failed_show,
+		     zfcp_sysfs_port_failed_store);
+
+static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
+	struct scsi_device *sdev;
+	unsigned int status, failed = 1;
+
+	sdev = zfcp_unit_sdev(unit);
+	if (sdev) {
+		status = atomic_read(&sdev_to_zfcp(sdev)->status);
+		failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
+		scsi_device_put(sdev);
+	}
+
+	return sprintf(buf, "%d\n", failed);
+}
+
+static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
+	unsigned long val;
+	struct scsi_device *sdev;
+
+	if (kstrtoul(buf, 0, &val) || val != 0)
+		return -EINVAL;
+
+	sdev = zfcp_unit_sdev(unit);
+	if (sdev) {
+		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
+		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
+				    "syufai2");
+		zfcp_erp_wait(unit->port->adapter);
+	} else
+		zfcp_unit_scsi_scan(unit);
+
+	return count;
+}
+static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO,
+		     zfcp_sysfs_unit_failed_show,
+		     zfcp_sysfs_unit_failed_store);
+
+static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
+					      struct device_attribute *attr,
+					      char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+	int i;
+
+	if (!adapter)
+		return -ENODEV;
+
+	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+		i = sprintf(buf, "1\n");
+	else
+		i = sprintf(buf, "0\n");
+
+	zfcp_ccw_adapter_put(adapter);
+	return i;
+}
+
+static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
+					       struct device_attribute *attr,
+					       const char *buf, size_t count)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+	unsigned long val;
+	int retval = 0;
+
+	if (!adapter)
+		return -ENODEV;
+
+	if (kstrtoul(buf, 0, &val) || val != 0) {
+		retval = -EINVAL;
+		goto out;
+	}
+
+	zfcp_erp_adapter_reset_sync(adapter, "syafai2");
+out:
+	zfcp_ccw_adapter_put(adapter);
+	return retval ? retval : (ssize_t) count;
+}
+static ZFCP_DEV_ATTR(adapter, failed, S_IWUSR | S_IRUGO,
+		     zfcp_sysfs_adapter_failed_show,
+		     zfcp_sysfs_adapter_failed_store);
+
+static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+	if (!adapter)
+		return -ENODEV;
+
+	/*
+	 * Users wish is our command: immediately schedule and flush a
+	 * worker to conduct a synchronous port scan, that is, neither
+	 * a random delay nor a rate limit is applied here.
+	 */
+	queue_delayed_work(adapter->work_queue, &adapter->scan_work, 0);
+	flush_delayed_work(&adapter->scan_work);
+	zfcp_ccw_adapter_put(adapter);
+
+	return (ssize_t) count;
+}
+static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
+		     zfcp_sysfs_port_rescan_store);
+
+DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
+
+static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+	struct zfcp_port *port;
+	u64 wwpn;
+	int retval = -EINVAL;
+
+	if (!adapter)
+		return -ENODEV;
+
+	if (kstrtoull(buf, 0, (unsigned long long *) &wwpn))
+		goto out;
+
+	port = zfcp_get_port_by_wwpn(adapter, wwpn);
+	if (!port)
+		goto out;
+	else
+		retval = 0;
+
+	mutex_lock(&zfcp_sysfs_port_units_mutex);
+	if (atomic_read(&port->units) > 0) {
+		retval = -EBUSY;
+		mutex_unlock(&zfcp_sysfs_port_units_mutex);
+		goto out;
+	}
+	/* port is about to be removed, so no more unit_add */
+	atomic_set(&port->units, -1);
+	mutex_unlock(&zfcp_sysfs_port_units_mutex);
+
+	write_lock_irq(&adapter->port_list_lock);
+	list_del(&port->list);
+	write_unlock_irq(&adapter->port_list_lock);
+
+	put_device(&port->dev);
+
+	zfcp_erp_port_shutdown(port, 0, "syprs_1");
+	device_unregister(&port->dev);
+ out:
+	zfcp_ccw_adapter_put(adapter);
+	return retval ? retval : (ssize_t) count;
+}
+static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
+		     zfcp_sysfs_port_remove_store);
+
+static struct attribute *zfcp_adapter_attrs[] = {
+	&dev_attr_adapter_failed.attr,
+	&dev_attr_adapter_in_recovery.attr,
+	&dev_attr_adapter_port_remove.attr,
+	&dev_attr_adapter_port_rescan.attr,
+	&dev_attr_adapter_peer_wwnn.attr,
+	&dev_attr_adapter_peer_wwpn.attr,
+	&dev_attr_adapter_peer_d_id.attr,
+	&dev_attr_adapter_card_version.attr,
+	&dev_attr_adapter_lic_version.attr,
+	&dev_attr_adapter_status.attr,
+	&dev_attr_adapter_hardware_version.attr,
+	NULL
+};
+
+struct attribute_group zfcp_sysfs_adapter_attrs = {
+	.attrs = zfcp_adapter_attrs,
+};
+
+static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+	u64 fcp_lun;
+	int retval;
+
+	if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
+		return -EINVAL;
+
+	retval = zfcp_unit_add(port, fcp_lun);
+	if (retval)
+		return retval;
+
+	return count;
+}
+static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
+
+static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+	u64 fcp_lun;
+
+	if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
+		return -EINVAL;
+
+	if (zfcp_unit_remove(port, fcp_lun))
+		return -EINVAL;
+
+	return count;
+}
+static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
+
+static struct attribute *zfcp_port_attrs[] = {
+	&dev_attr_unit_add.attr,
+	&dev_attr_unit_remove.attr,
+	&dev_attr_port_failed.attr,
+	&dev_attr_port_in_recovery.attr,
+	&dev_attr_port_status.attr,
+	&dev_attr_port_access_denied.attr,
+	NULL
+};
+static struct attribute_group zfcp_port_attr_group = {
+	.attrs = zfcp_port_attrs,
+};
+const struct attribute_group *zfcp_port_attr_groups[] = {
+	&zfcp_port_attr_group,
+	NULL,
+};
+
+static struct attribute *zfcp_unit_attrs[] = {
+	&dev_attr_unit_failed.attr,
+	&dev_attr_unit_in_recovery.attr,
+	&dev_attr_unit_status.attr,
+	&dev_attr_unit_access_denied.attr,
+	&dev_attr_unit_access_shared.attr,
+	&dev_attr_unit_access_readonly.attr,
+	NULL
+};
+static struct attribute_group zfcp_unit_attr_group = {
+	.attrs = zfcp_unit_attrs,
+};
+const struct attribute_group *zfcp_unit_attr_groups[] = {
+	&zfcp_unit_attr_group,
+	NULL,
+};
+
+#define ZFCP_DEFINE_LATENCY_ATTR(_name) 				\
+static ssize_t								\
+zfcp_sysfs_unit_##_name##_latency_show(struct device *dev,		\
+				       struct device_attribute *attr,	\
+				       char *buf) {			\
+	struct scsi_device *sdev = to_scsi_device(dev);			\
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);		\
+	struct zfcp_latencies *lat = &zfcp_sdev->latencies;		\
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;	\
+	unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc;	\
+									\
+	spin_lock_bh(&lat->lock);					\
+	fsum = lat->_name.fabric.sum * adapter->timer_ticks;		\
+	fmin = lat->_name.fabric.min * adapter->timer_ticks;		\
+	fmax = lat->_name.fabric.max * adapter->timer_ticks;		\
+	csum = lat->_name.channel.sum * adapter->timer_ticks;		\
+	cmin = lat->_name.channel.min * adapter->timer_ticks;		\
+	cmax = lat->_name.channel.max * adapter->timer_ticks;		\
+	cc  = lat->_name.counter;					\
+	spin_unlock_bh(&lat->lock);					\
+									\
+	do_div(fsum, 1000);						\
+	do_div(fmin, 1000);						\
+	do_div(fmax, 1000);						\
+	do_div(csum, 1000);						\
+	do_div(cmin, 1000);						\
+	do_div(cmax, 1000);						\
+									\
+	return sprintf(buf, "%llu %llu %llu %llu %llu %llu %llu\n",	\
+		       fmin, fmax, fsum, cmin, cmax, csum, cc); 	\
+}									\
+static ssize_t								\
+zfcp_sysfs_unit_##_name##_latency_store(struct device *dev,		\
+					struct device_attribute *attr,	\
+					const char *buf, size_t count)	\
+{									\
+	struct scsi_device *sdev = to_scsi_device(dev);			\
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);		\
+	struct zfcp_latencies *lat = &zfcp_sdev->latencies;		\
+	unsigned long flags;						\
+									\
+	spin_lock_irqsave(&lat->lock, flags);				\
+	lat->_name.fabric.sum = 0;					\
+	lat->_name.fabric.min = 0xFFFFFFFF;				\
+	lat->_name.fabric.max = 0;					\
+	lat->_name.channel.sum = 0;					\
+	lat->_name.channel.min = 0xFFFFFFFF;				\
+	lat->_name.channel.max = 0;					\
+	lat->_name.counter = 0;						\
+	spin_unlock_irqrestore(&lat->lock, flags);			\
+									\
+	return (ssize_t) count;						\
+}									\
+static DEVICE_ATTR(_name##_latency, S_IWUSR | S_IRUGO,			\
+		   zfcp_sysfs_unit_##_name##_latency_show,		\
+		   zfcp_sysfs_unit_##_name##_latency_store);
+
+ZFCP_DEFINE_LATENCY_ATTR(read);
+ZFCP_DEFINE_LATENCY_ATTR(write);
+ZFCP_DEFINE_LATENCY_ATTR(cmd);
+
+#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value)			\
+static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev,	\
+					      struct device_attribute *attr,\
+					      char *buf)                 \
+{                                                                        \
+	struct scsi_device *sdev = to_scsi_device(dev);			 \
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);		 \
+									 \
+	return sprintf(buf, _format, _value);                            \
+}                                                                        \
+static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
+
+ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
+		      dev_name(&zfcp_sdev->port->adapter->ccw_device->dev));
+ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
+		      (unsigned long long) zfcp_sdev->port->wwpn);
+
+static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+
+	return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev));
+}
+static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
+
+ZFCP_DEFINE_SCSI_ATTR(zfcp_access_denied, "%d\n",
+		      (atomic_read(&zfcp_sdev->status) &
+		       ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
+
+static ssize_t zfcp_sysfs_scsi_zfcp_failed_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	unsigned int status = atomic_read(&sdev_to_zfcp(sdev)->status);
+	unsigned int failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
+
+	return sprintf(buf, "%d\n", failed);
+}
+
+static ssize_t zfcp_sysfs_scsi_zfcp_failed_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	unsigned long val;
+
+	if (kstrtoul(buf, 0, &val) || val != 0)
+		return -EINVAL;
+
+	zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
+	zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
+			    "syufai3");
+	zfcp_erp_wait(sdev_to_zfcp(sdev)->port->adapter);
+
+	return count;
+}
+static DEVICE_ATTR(zfcp_failed, S_IWUSR | S_IRUGO,
+		   zfcp_sysfs_scsi_zfcp_failed_show,
+		   zfcp_sysfs_scsi_zfcp_failed_store);
+
+ZFCP_DEFINE_SCSI_ATTR(zfcp_in_recovery, "%d\n",
+		      (atomic_read(&zfcp_sdev->status) &
+		       ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
+
+ZFCP_DEFINE_SCSI_ATTR(zfcp_status, "0x%08x\n",
+		      atomic_read(&zfcp_sdev->status));
+
+struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
+	&dev_attr_fcp_lun,
+	&dev_attr_wwpn,
+	&dev_attr_hba_id,
+	&dev_attr_read_latency,
+	&dev_attr_write_latency,
+	&dev_attr_cmd_latency,
+	&dev_attr_zfcp_access_denied,
+	&dev_attr_zfcp_failed,
+	&dev_attr_zfcp_in_recovery,
+	&dev_attr_zfcp_status,
+	NULL
+};
+
+static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct Scsi_Host *scsi_host = dev_to_shost(dev);
+	struct fsf_qtcb_bottom_port *qtcb_port;
+	struct zfcp_adapter *adapter;
+	int retval;
+
+	adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
+	if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
+		return -EOPNOTSUPP;
+
+	qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
+	if (!qtcb_port)
+		return -ENOMEM;
+
+	retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
+	if (!retval)
+		retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
+				 qtcb_port->cb_util, qtcb_port->a_util);
+	kfree(qtcb_port);
+	return retval;
+}
+static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
+
+static int zfcp_sysfs_adapter_ex_config(struct device *dev,
+					struct fsf_statistics_info *stat_inf)
+{
+	struct Scsi_Host *scsi_host = dev_to_shost(dev);
+	struct fsf_qtcb_bottom_config *qtcb_config;
+	struct zfcp_adapter *adapter;
+	int retval;
+
+	adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
+	if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
+		return -EOPNOTSUPP;
+
+	qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
+			      GFP_KERNEL);
+	if (!qtcb_config)
+		return -ENOMEM;
+
+	retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config);
+	if (!retval)
+		*stat_inf = qtcb_config->stat_info;
+
+	kfree(qtcb_config);
+	return retval;
+}
+
+#define ZFCP_SHOST_ATTR(_name, _format, _arg...)			\
+static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev,	\
+						 struct device_attribute *attr,\
+						 char *buf)		\
+{									\
+	struct fsf_statistics_info stat_info;				\
+	int retval;							\
+									\
+	retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);		\
+	if (retval)							\
+		return retval;						\
+									\
+	return sprintf(buf, _format, ## _arg);				\
+}									\
+static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
+
+ZFCP_SHOST_ATTR(requests, "%llu %llu %llu\n",
+		(unsigned long long) stat_info.input_req,
+		(unsigned long long) stat_info.output_req,
+		(unsigned long long) stat_info.control_req);
+
+ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n",
+		(unsigned long long) stat_info.input_mb,
+		(unsigned long long) stat_info.output_mb);
+
+ZFCP_SHOST_ATTR(seconds_active, "%llu\n",
+		(unsigned long long) stat_info.seconds_act);
+
+static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
+					      struct device_attribute *attr,
+					      char *buf)
+{
+	struct Scsi_Host *scsi_host = class_to_shost(dev);
+	struct zfcp_qdio *qdio =
+		((struct zfcp_adapter *) scsi_host->hostdata[0])->qdio;
+	u64 util;
+
+	spin_lock_bh(&qdio->stat_lock);
+	util = qdio->req_q_util;
+	spin_unlock_bh(&qdio->stat_lock);
+
+	return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full),
+		       (unsigned long long)util);
+}
+static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
+
+struct device_attribute *zfcp_sysfs_shost_attrs[] = {
+	&dev_attr_utilization,
+	&dev_attr_requests,
+	&dev_attr_megabytes,
+	&dev_attr_seconds_active,
+	&dev_attr_queue_full,
+	NULL
+};
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
new file mode 100644
index 0000000..1bf0a09
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * zfcp device driver
+ *
+ * Tracking of manually configured LUNs and helper functions to
+ * register the LUNs with the SCSI midlayer.
+ *
+ * Copyright IBM Corp. 2010
+ */
+
+#include "zfcp_def.h"
+#include "zfcp_ext.h"
+
+/**
+ * zfcp_unit_scsi_scan - Register LUN with SCSI midlayer
+ * @unit: The zfcp LUN/unit to register
+ *
+ * When the SCSI midlayer is not allowed to automatically scan and
+ * attach SCSI devices, zfcp has to register the single devices with
+ * the SCSI midlayer.
+ */
+void zfcp_unit_scsi_scan(struct zfcp_unit *unit)
+{
+	struct fc_rport *rport = unit->port->rport;
+	u64 lun;
+
+	lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
+
+	if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
+		scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun,
+				 SCSI_SCAN_MANUAL);
+}
+
+static void zfcp_unit_scsi_scan_work(struct work_struct *work)
+{
+	struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
+					      scsi_work);
+
+	zfcp_unit_scsi_scan(unit);
+	put_device(&unit->dev);
+}
+
+/**
+ * zfcp_unit_queue_scsi_scan - Register configured units on port
+ * @port: The zfcp_port where to register units
+ *
+ * After opening a port, all units configured on this port have to be
+ * registered with the SCSI midlayer. This function should be called
+ * after calling fc_remote_port_add, so that the fc_rport is already
+ * ONLINE and the call to scsi_scan_target runs the same way as the
+ * call in the FC transport class.
+ */
+void zfcp_unit_queue_scsi_scan(struct zfcp_port *port)
+{
+	struct zfcp_unit *unit;
+
+	read_lock_irq(&port->unit_list_lock);
+	list_for_each_entry(unit, &port->unit_list, list) {
+		get_device(&unit->dev);
+		if (scsi_queue_work(port->adapter->scsi_host,
+				    &unit->scsi_work) <= 0)
+			put_device(&unit->dev);
+	}
+	read_unlock_irq(&port->unit_list_lock);
+}
+
+static struct zfcp_unit *_zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
+{
+	struct zfcp_unit *unit;
+
+	list_for_each_entry(unit, &port->unit_list, list)
+		if (unit->fcp_lun == fcp_lun) {
+			get_device(&unit->dev);
+			return unit;
+		}
+
+	return NULL;
+}
+
+/**
+ * zfcp_unit_find - Find and return zfcp_unit with specified FCP LUN
+ * @port: zfcp_port where to look for the unit
+ * @fcp_lun: 64 Bit FCP LUN used to identify the zfcp_unit
+ *
+ * If zfcp_unit is found, a reference is acquired that has to be
+ * released later.
+ *
+ * Returns: Pointer to the zfcp_unit, or NULL if there is no zfcp_unit
+ *          with the specified FCP LUN.
+ */
+struct zfcp_unit *zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
+{
+	struct zfcp_unit *unit;
+
+	read_lock_irq(&port->unit_list_lock);
+	unit = _zfcp_unit_find(port, fcp_lun);
+	read_unlock_irq(&port->unit_list_lock);
+	return unit;
+}
+
+/**
+ * zfcp_unit_release - Drop reference to zfcp_port and free memory of zfcp_unit.
+ * @dev: pointer to device in zfcp_unit
+ */
+static void zfcp_unit_release(struct device *dev)
+{
+	struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
+
+	atomic_dec(&unit->port->units);
+	kfree(unit);
+}
+
+/**
+ * zfcp_unit_enqueue - enqueue unit to unit list of a port.
+ * @port: pointer to port where unit is added
+ * @fcp_lun: FCP LUN of unit to be enqueued
+ * Returns: 0 success
+ *
+ * Sets up some unit internal structures and creates sysfs entry.
+ */
+int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
+{
+	struct zfcp_unit *unit;
+	int retval = 0;
+
+	mutex_lock(&zfcp_sysfs_port_units_mutex);
+	if (atomic_read(&port->units) == -1) {
+		/* port is already gone */
+		retval = -ENODEV;
+		goto out;
+	}
+
+	unit = zfcp_unit_find(port, fcp_lun);
+	if (unit) {
+		put_device(&unit->dev);
+		retval = -EEXIST;
+		goto out;
+	}
+
+	unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
+	if (!unit) {
+		retval = -ENOMEM;
+		goto out;
+	}
+
+	unit->port = port;
+	unit->fcp_lun = fcp_lun;
+	unit->dev.parent = &port->dev;
+	unit->dev.release = zfcp_unit_release;
+	unit->dev.groups = zfcp_unit_attr_groups;
+	INIT_WORK(&unit->scsi_work, zfcp_unit_scsi_scan_work);
+
+	if (dev_set_name(&unit->dev, "0x%016llx",
+			 (unsigned long long) fcp_lun)) {
+		kfree(unit);
+		retval = -ENOMEM;
+		goto out;
+	}
+
+	if (device_register(&unit->dev)) {
+		put_device(&unit->dev);
+		retval = -ENOMEM;
+		goto out;
+	}
+
+	atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
+
+	write_lock_irq(&port->unit_list_lock);
+	list_add_tail(&unit->list, &port->unit_list);
+	write_unlock_irq(&port->unit_list_lock);
+
+	zfcp_unit_scsi_scan(unit);
+
+out:
+	mutex_unlock(&zfcp_sysfs_port_units_mutex);
+	return retval;
+}
+
+/**
+ * zfcp_unit_sdev - Return SCSI device for zfcp_unit
+ * @unit: The zfcp_unit where to get the SCSI device for
+ *
+ * Returns: scsi_device pointer on success, NULL if there is no SCSI
+ *          device for this zfcp_unit
+ *
+ * On success, the caller also holds a reference to the SCSI device
+ * that must be released with scsi_device_put.
+ */
+struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit)
+{
+	struct Scsi_Host *shost;
+	struct zfcp_port *port;
+	u64 lun;
+
+	lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
+	port = unit->port;
+	shost = port->adapter->scsi_host;
+	return scsi_device_lookup(shost, 0, port->starget_id, lun);
+}
+
+/**
+ * zfcp_unit_sdev_status - Return zfcp LUN status for SCSI device
+ * @unit: The unit to lookup the SCSI device for
+ *
+ * Returns the zfcp LUN status field of the SCSI device if the SCSI device
+ * for the zfcp_unit exists, 0 otherwise.
+ */
+unsigned int zfcp_unit_sdev_status(struct zfcp_unit *unit)
+{
+	unsigned int status = 0;
+	struct scsi_device *sdev;
+	struct zfcp_scsi_dev *zfcp_sdev;
+
+	sdev = zfcp_unit_sdev(unit);
+	if (sdev) {
+		zfcp_sdev = sdev_to_zfcp(sdev);
+		status = atomic_read(&zfcp_sdev->status);
+		scsi_device_put(sdev);
+	}
+
+	return status;
+}
+
+/**
+ * zfcp_unit_remove - Remove entry from list of configured units
+ * @port: The port where to remove the unit from the configuration
+ * @fcp_lun: The 64 bit LUN of the unit to remove
+ *
+ * Returns: -EINVAL if a unit with the specified LUN does not exist,
+ *          0 on success.
+ */
+int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun)
+{
+	struct zfcp_unit *unit;
+	struct scsi_device *sdev;
+
+	write_lock_irq(&port->unit_list_lock);
+	unit = _zfcp_unit_find(port, fcp_lun);
+	if (unit)
+		list_del(&unit->list);
+	write_unlock_irq(&port->unit_list_lock);
+
+	if (!unit)
+		return -EINVAL;
+
+	sdev = zfcp_unit_sdev(unit);
+	if (sdev) {
+		scsi_remove_device(sdev);
+		scsi_device_put(sdev);
+	}
+
+	put_device(&unit->dev);
+
+	device_unregister(&unit->dev);
+
+	return 0;
+}
diff --git a/drivers/s390/virtio/Makefile b/drivers/s390/virtio/Makefile
new file mode 100644
index 0000000..2dc4d9a
--- /dev/null
+++ b/drivers/s390/virtio/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for kvm guest drivers on s390
+#
+# Copyright IBM Corp. 2008
+
+obj-$(CONFIG_S390_GUEST) += virtio_ccw.o
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
new file mode 100644
index 0000000..b67dc49
--- /dev/null
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -0,0 +1,1462 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ccw based virtio transport
+ *
+ * Copyright IBM Corp. 2012, 2014
+ *
+ *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/err.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/virtio_ring.h>
+#include <linux/pfn.h>
+#include <linux/async.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/moduleparam.h>
+#include <linux/io.h>
+#include <linux/kvm_para.h>
+#include <linux/notifier.h>
+#include <asm/diag.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+#include <asm/virtio-ccw.h>
+#include <asm/isc.h>
+#include <asm/airq.h>
+
+/*
+ * virtio related functions
+ */
+
+struct vq_config_block {
+	__u16 index;
+	__u16 num;
+} __packed;
+
+#define VIRTIO_CCW_CONFIG_SIZE 0x100
+/* same as PCI config space size, should be enough for all drivers */
+
+struct virtio_ccw_device {
+	struct virtio_device vdev;
+	__u8 *status;
+	__u8 config[VIRTIO_CCW_CONFIG_SIZE];
+	struct ccw_device *cdev;
+	__u32 curr_io;
+	int err;
+	unsigned int revision; /* Transport revision */
+	wait_queue_head_t wait_q;
+	spinlock_t lock;
+	struct mutex io_lock; /* Serializes I/O requests */
+	struct list_head virtqueues;
+	unsigned long indicators;
+	unsigned long indicators2;
+	struct vq_config_block *config_block;
+	bool is_thinint;
+	bool going_away;
+	bool device_lost;
+	unsigned int config_ready;
+	void *airq_info;
+};
+
+struct vq_info_block_legacy {
+	__u64 queue;
+	__u32 align;
+	__u16 index;
+	__u16 num;
+} __packed;
+
+struct vq_info_block {
+	__u64 desc;
+	__u32 res0;
+	__u16 index;
+	__u16 num;
+	__u64 avail;
+	__u64 used;
+} __packed;
+
+struct virtio_feature_desc {
+	__le32 features;
+	__u8 index;
+} __packed;
+
+struct virtio_thinint_area {
+	unsigned long summary_indicator;
+	unsigned long indicator;
+	u64 bit_nr;
+	u8 isc;
+} __packed;
+
+struct virtio_rev_info {
+	__u16 revision;
+	__u16 length;
+	__u8 data[];
+};
+
+/* the highest virtio-ccw revision we support */
+#define VIRTIO_CCW_REV_MAX 1
+
+struct virtio_ccw_vq_info {
+	struct virtqueue *vq;
+	int num;
+	void *queue;
+	union {
+		struct vq_info_block s;
+		struct vq_info_block_legacy l;
+	} *info_block;
+	int bit_nr;
+	struct list_head node;
+	long cookie;
+};
+
+#define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */
+
+#define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8)
+#define MAX_AIRQ_AREAS 20
+
+static int virtio_ccw_use_airq = 1;
+
+struct airq_info {
+	rwlock_t lock;
+	u8 summary_indicator;
+	struct airq_struct airq;
+	struct airq_iv *aiv;
+};
+static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
+
+#define CCW_CMD_SET_VQ 0x13
+#define CCW_CMD_VDEV_RESET 0x33
+#define CCW_CMD_SET_IND 0x43
+#define CCW_CMD_SET_CONF_IND 0x53
+#define CCW_CMD_READ_FEAT 0x12
+#define CCW_CMD_WRITE_FEAT 0x11
+#define CCW_CMD_READ_CONF 0x22
+#define CCW_CMD_WRITE_CONF 0x21
+#define CCW_CMD_WRITE_STATUS 0x31
+#define CCW_CMD_READ_VQ_CONF 0x32
+#define CCW_CMD_READ_STATUS 0x72
+#define CCW_CMD_SET_IND_ADAPTER 0x73
+#define CCW_CMD_SET_VIRTIO_REV 0x83
+
+#define VIRTIO_CCW_DOING_SET_VQ 0x00010000
+#define VIRTIO_CCW_DOING_RESET 0x00040000
+#define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
+#define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
+#define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
+#define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
+#define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
+#define VIRTIO_CCW_DOING_SET_IND 0x01000000
+#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
+#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
+#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
+#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
+#define VIRTIO_CCW_DOING_READ_STATUS 0x20000000
+#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
+
+static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
+{
+	return container_of(vdev, struct virtio_ccw_device, vdev);
+}
+
+static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
+{
+	unsigned long i, flags;
+
+	write_lock_irqsave(&info->lock, flags);
+	for (i = 0; i < airq_iv_end(info->aiv); i++) {
+		if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) {
+			airq_iv_free_bit(info->aiv, i);
+			airq_iv_set_ptr(info->aiv, i, 0);
+			break;
+		}
+	}
+	write_unlock_irqrestore(&info->lock, flags);
+}
+
+static void virtio_airq_handler(struct airq_struct *airq)
+{
+	struct airq_info *info = container_of(airq, struct airq_info, airq);
+	unsigned long ai;
+
+	inc_irq_stat(IRQIO_VAI);
+	read_lock(&info->lock);
+	/* Walk through indicators field, summary indicator active. */
+	for (ai = 0;;) {
+		ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
+		if (ai == -1UL)
+			break;
+		vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
+	}
+	info->summary_indicator = 0;
+	smp_wmb();
+	/* Walk through indicators field, summary indicator not active. */
+	for (ai = 0;;) {
+		ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
+		if (ai == -1UL)
+			break;
+		vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
+	}
+	read_unlock(&info->lock);
+}
+
+static struct airq_info *new_airq_info(void)
+{
+	struct airq_info *info;
+	int rc;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return NULL;
+	rwlock_init(&info->lock);
+	info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR);
+	if (!info->aiv) {
+		kfree(info);
+		return NULL;
+	}
+	info->airq.handler = virtio_airq_handler;
+	info->airq.lsi_ptr = &info->summary_indicator;
+	info->airq.lsi_mask = 0xff;
+	info->airq.isc = VIRTIO_AIRQ_ISC;
+	rc = register_adapter_interrupt(&info->airq);
+	if (rc) {
+		airq_iv_release(info->aiv);
+		kfree(info);
+		return NULL;
+	}
+	return info;
+}
+
+static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
+					u64 *first, void **airq_info)
+{
+	int i, j;
+	struct airq_info *info;
+	unsigned long indicator_addr = 0;
+	unsigned long bit, flags;
+
+	for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
+		if (!airq_areas[i])
+			airq_areas[i] = new_airq_info();
+		info = airq_areas[i];
+		if (!info)
+			return 0;
+		write_lock_irqsave(&info->lock, flags);
+		bit = airq_iv_alloc(info->aiv, nvqs);
+		if (bit == -1UL) {
+			/* Not enough vacancies. */
+			write_unlock_irqrestore(&info->lock, flags);
+			continue;
+		}
+		*first = bit;
+		*airq_info = info;
+		indicator_addr = (unsigned long)info->aiv->vector;
+		for (j = 0; j < nvqs; j++) {
+			airq_iv_set_ptr(info->aiv, bit + j,
+					(unsigned long)vqs[j]);
+		}
+		write_unlock_irqrestore(&info->lock, flags);
+	}
+	return indicator_addr;
+}
+
+static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
+{
+	struct virtio_ccw_vq_info *info;
+
+	list_for_each_entry(info, &vcdev->virtqueues, node)
+		drop_airq_indicator(info->vq, vcdev->airq_info);
+}
+
+static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
+{
+	unsigned long flags;
+	__u32 ret;
+
+	spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
+	if (vcdev->err)
+		ret = 0;
+	else
+		ret = vcdev->curr_io & flag;
+	spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
+	return ret;
+}
+
+static int ccw_io_helper(struct virtio_ccw_device *vcdev,
+			 struct ccw1 *ccw, __u32 intparm)
+{
+	int ret;
+	unsigned long flags;
+	int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
+
+	mutex_lock(&vcdev->io_lock);
+	do {
+		spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
+		ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
+		if (!ret) {
+			if (!vcdev->curr_io)
+				vcdev->err = 0;
+			vcdev->curr_io |= flag;
+		}
+		spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
+		cpu_relax();
+	} while (ret == -EBUSY);
+	wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
+	ret = ret ? ret : vcdev->err;
+	mutex_unlock(&vcdev->io_lock);
+	return ret;
+}
+
+static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
+				      struct ccw1 *ccw)
+{
+	int ret;
+	unsigned long *indicatorp = NULL;
+	struct virtio_thinint_area *thinint_area = NULL;
+	struct airq_info *airq_info = vcdev->airq_info;
+
+	if (vcdev->is_thinint) {
+		thinint_area = kzalloc(sizeof(*thinint_area),
+				       GFP_DMA | GFP_KERNEL);
+		if (!thinint_area)
+			return;
+		thinint_area->summary_indicator =
+			(unsigned long) &airq_info->summary_indicator;
+		thinint_area->isc = VIRTIO_AIRQ_ISC;
+		ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
+		ccw->count = sizeof(*thinint_area);
+		ccw->cda = (__u32)(unsigned long) thinint_area;
+	} else {
+		/* payload is the address of the indicators */
+		indicatorp = kmalloc(sizeof(&vcdev->indicators),
+				     GFP_DMA | GFP_KERNEL);
+		if (!indicatorp)
+			return;
+		*indicatorp = 0;
+		ccw->cmd_code = CCW_CMD_SET_IND;
+		ccw->count = sizeof(&vcdev->indicators);
+		ccw->cda = (__u32)(unsigned long) indicatorp;
+	}
+	/* Deregister indicators from host. */
+	vcdev->indicators = 0;
+	ccw->flags = 0;
+	ret = ccw_io_helper(vcdev, ccw,
+			    vcdev->is_thinint ?
+			    VIRTIO_CCW_DOING_SET_IND_ADAPTER :
+			    VIRTIO_CCW_DOING_SET_IND);
+	if (ret && (ret != -ENODEV))
+		dev_info(&vcdev->cdev->dev,
+			 "Failed to deregister indicators (%d)\n", ret);
+	else if (vcdev->is_thinint)
+		virtio_ccw_drop_indicators(vcdev);
+	kfree(indicatorp);
+	kfree(thinint_area);
+}
+
+static inline long __do_kvm_notify(struct subchannel_id schid,
+				   unsigned long queue_index,
+				   long cookie)
+{
+	register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
+	register struct subchannel_id __schid asm("2") = schid;
+	register unsigned long __index asm("3") = queue_index;
+	register long __rc asm("2");
+	register long __cookie asm("4") = cookie;
+
+	asm volatile ("diag 2,4,0x500\n"
+		      : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index),
+		      "d"(__cookie)
+		      : "memory", "cc");
+	return __rc;
+}
+
+static inline long do_kvm_notify(struct subchannel_id schid,
+				 unsigned long queue_index,
+				 long cookie)
+{
+	diag_stat_inc(DIAG_STAT_X500);
+	return __do_kvm_notify(schid, queue_index, cookie);
+}
+
+static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
+{
+	struct virtio_ccw_vq_info *info = vq->priv;
+	struct virtio_ccw_device *vcdev;
+	struct subchannel_id schid;
+
+	vcdev = to_vc_device(info->vq->vdev);
+	ccw_device_get_schid(vcdev->cdev, &schid);
+	info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
+	if (info->cookie < 0)
+		return false;
+	return true;
+}
+
+static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
+				   struct ccw1 *ccw, int index)
+{
+	int ret;
+
+	vcdev->config_block->index = index;
+	ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
+	ccw->flags = 0;
+	ccw->count = sizeof(struct vq_config_block);
+	ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
+	ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
+	if (ret)
+		return ret;
+	return vcdev->config_block->num;
+}
+
+static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
+{
+	struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
+	struct virtio_ccw_vq_info *info = vq->priv;
+	unsigned long flags;
+	unsigned long size;
+	int ret;
+	unsigned int index = vq->index;
+
+	/* Remove from our list. */
+	spin_lock_irqsave(&vcdev->lock, flags);
+	list_del(&info->node);
+	spin_unlock_irqrestore(&vcdev->lock, flags);
+
+	/* Release from host. */
+	if (vcdev->revision == 0) {
+		info->info_block->l.queue = 0;
+		info->info_block->l.align = 0;
+		info->info_block->l.index = index;
+		info->info_block->l.num = 0;
+		ccw->count = sizeof(info->info_block->l);
+	} else {
+		info->info_block->s.desc = 0;
+		info->info_block->s.index = index;
+		info->info_block->s.num = 0;
+		info->info_block->s.avail = 0;
+		info->info_block->s.used = 0;
+		ccw->count = sizeof(info->info_block->s);
+	}
+	ccw->cmd_code = CCW_CMD_SET_VQ;
+	ccw->flags = 0;
+	ccw->cda = (__u32)(unsigned long)(info->info_block);
+	ret = ccw_io_helper(vcdev, ccw,
+			    VIRTIO_CCW_DOING_SET_VQ | index);
+	/*
+	 * -ENODEV isn't considered an error: The device is gone anyway.
+	 * This may happen on device detach.
+	 */
+	if (ret && (ret != -ENODEV))
+		dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n",
+			 ret, index);
+
+	vring_del_virtqueue(vq);
+	size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
+	free_pages_exact(info->queue, size);
+	kfree(info->info_block);
+	kfree(info);
+}
+
+static void virtio_ccw_del_vqs(struct virtio_device *vdev)
+{
+	struct virtqueue *vq, *n;
+	struct ccw1 *ccw;
+	struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+
+	ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+	if (!ccw)
+		return;
+
+	virtio_ccw_drop_indicator(vcdev, ccw);
+
+	list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+		virtio_ccw_del_vq(vq, ccw);
+
+	kfree(ccw);
+}
+
+static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
+					     int i, vq_callback_t *callback,
+					     const char *name, bool ctx,
+					     struct ccw1 *ccw)
+{
+	struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+	int err;
+	struct virtqueue *vq = NULL;
+	struct virtio_ccw_vq_info *info;
+	unsigned long size = 0; /* silence the compiler */
+	unsigned long flags;
+
+	/* Allocate queue. */
+	info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
+	if (!info) {
+		dev_warn(&vcdev->cdev->dev, "no info\n");
+		err = -ENOMEM;
+		goto out_err;
+	}
+	info->info_block = kzalloc(sizeof(*info->info_block),
+				   GFP_DMA | GFP_KERNEL);
+	if (!info->info_block) {
+		dev_warn(&vcdev->cdev->dev, "no info block\n");
+		err = -ENOMEM;
+		goto out_err;
+	}
+	info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
+	if (info->num < 0) {
+		err = info->num;
+		goto out_err;
+	}
+	size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
+	info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
+	if (info->queue == NULL) {
+		dev_warn(&vcdev->cdev->dev, "no queue\n");
+		err = -ENOMEM;
+		goto out_err;
+	}
+
+	vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
+				 true, ctx, info->queue, virtio_ccw_kvm_notify,
+				 callback, name);
+	if (!vq) {
+		/* For now, we fail if we can't get the requested size. */
+		dev_warn(&vcdev->cdev->dev, "no vq\n");
+		err = -ENOMEM;
+		goto out_err;
+	}
+
+	/* Register it with the host. */
+	if (vcdev->revision == 0) {
+		info->info_block->l.queue = (__u64)info->queue;
+		info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
+		info->info_block->l.index = i;
+		info->info_block->l.num = info->num;
+		ccw->count = sizeof(info->info_block->l);
+	} else {
+		info->info_block->s.desc = (__u64)info->queue;
+		info->info_block->s.index = i;
+		info->info_block->s.num = info->num;
+		info->info_block->s.avail = (__u64)virtqueue_get_avail(vq);
+		info->info_block->s.used = (__u64)virtqueue_get_used(vq);
+		ccw->count = sizeof(info->info_block->s);
+	}
+	ccw->cmd_code = CCW_CMD_SET_VQ;
+	ccw->flags = 0;
+	ccw->cda = (__u32)(unsigned long)(info->info_block);
+	err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
+	if (err) {
+		dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
+		goto out_err;
+	}
+
+	info->vq = vq;
+	vq->priv = info;
+
+	/* Save it to our list. */
+	spin_lock_irqsave(&vcdev->lock, flags);
+	list_add(&info->node, &vcdev->virtqueues);
+	spin_unlock_irqrestore(&vcdev->lock, flags);
+
+	return vq;
+
+out_err:
+	if (vq)
+		vring_del_virtqueue(vq);
+	if (info) {
+		if (info->queue)
+			free_pages_exact(info->queue, size);
+		kfree(info->info_block);
+	}
+	kfree(info);
+	return ERR_PTR(err);
+}
+
+static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
+					   struct virtqueue *vqs[], int nvqs,
+					   struct ccw1 *ccw)
+{
+	int ret;
+	struct virtio_thinint_area *thinint_area = NULL;
+	struct airq_info *info;
+
+	thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL);
+	if (!thinint_area) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	/* Try to get an indicator. */
+	thinint_area->indicator = get_airq_indicator(vqs, nvqs,
+						     &thinint_area->bit_nr,
+						     &vcdev->airq_info);
+	if (!thinint_area->indicator) {
+		ret = -ENOSPC;
+		goto out;
+	}
+	info = vcdev->airq_info;
+	thinint_area->summary_indicator =
+		(unsigned long) &info->summary_indicator;
+	thinint_area->isc = VIRTIO_AIRQ_ISC;
+	ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
+	ccw->flags = CCW_FLAG_SLI;
+	ccw->count = sizeof(*thinint_area);
+	ccw->cda = (__u32)(unsigned long)thinint_area;
+	ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER);
+	if (ret) {
+		if (ret == -EOPNOTSUPP) {
+			/*
+			 * The host does not support adapter interrupts
+			 * for virtio-ccw, stop trying.
+			 */
+			virtio_ccw_use_airq = 0;
+			pr_info("Adapter interrupts unsupported on host\n");
+		} else
+			dev_warn(&vcdev->cdev->dev,
+				 "enabling adapter interrupts = %d\n", ret);
+		virtio_ccw_drop_indicators(vcdev);
+	}
+out:
+	kfree(thinint_area);
+	return ret;
+}
+
+static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+			       struct virtqueue *vqs[],
+			       vq_callback_t *callbacks[],
+			       const char * const names[],
+			       const bool *ctx,
+			       struct irq_affinity *desc)
+{
+	struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+	unsigned long *indicatorp = NULL;
+	int ret, i;
+	struct ccw1 *ccw;
+
+	ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+	if (!ccw)
+		return -ENOMEM;
+
+	for (i = 0; i < nvqs; ++i) {
+		vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
+					     ctx ? ctx[i] : false, ccw);
+		if (IS_ERR(vqs[i])) {
+			ret = PTR_ERR(vqs[i]);
+			vqs[i] = NULL;
+			goto out;
+		}
+	}
+	ret = -ENOMEM;
+	/*
+	 * We need a data area under 2G to communicate. Our payload is
+	 * the address of the indicators.
+	*/
+	indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
+	if (!indicatorp)
+		goto out;
+	*indicatorp = (unsigned long) &vcdev->indicators;
+	if (vcdev->is_thinint) {
+		ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
+		if (ret)
+			/* no error, just fall back to legacy interrupts */
+			vcdev->is_thinint = false;
+	}
+	if (!vcdev->is_thinint) {
+		/* Register queue indicators with host. */
+		vcdev->indicators = 0;
+		ccw->cmd_code = CCW_CMD_SET_IND;
+		ccw->flags = 0;
+		ccw->count = sizeof(&vcdev->indicators);
+		ccw->cda = (__u32)(unsigned long) indicatorp;
+		ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
+		if (ret)
+			goto out;
+	}
+	/* Register indicators2 with host for config changes */
+	*indicatorp = (unsigned long) &vcdev->indicators2;
+	vcdev->indicators2 = 0;
+	ccw->cmd_code = CCW_CMD_SET_CONF_IND;
+	ccw->flags = 0;
+	ccw->count = sizeof(&vcdev->indicators2);
+	ccw->cda = (__u32)(unsigned long) indicatorp;
+	ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
+	if (ret)
+		goto out;
+
+	kfree(indicatorp);
+	kfree(ccw);
+	return 0;
+out:
+	kfree(indicatorp);
+	kfree(ccw);
+	virtio_ccw_del_vqs(vdev);
+	return ret;
+}
+
+static void virtio_ccw_reset(struct virtio_device *vdev)
+{
+	struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+	struct ccw1 *ccw;
+
+	ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+	if (!ccw)
+		return;
+
+	/* Zero status bits. */
+	*vcdev->status = 0;
+
+	/* Send a reset ccw on device. */
+	ccw->cmd_code = CCW_CMD_VDEV_RESET;
+	ccw->flags = 0;
+	ccw->count = 0;
+	ccw->cda = 0;
+	ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
+	kfree(ccw);
+}
+
+static u64 virtio_ccw_get_features(struct virtio_device *vdev)
+{
+	struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+	struct virtio_feature_desc *features;
+	int ret;
+	u64 rc;
+	struct ccw1 *ccw;
+
+	ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+	if (!ccw)
+		return 0;
+
+	features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
+	if (!features) {
+		rc = 0;
+		goto out_free;
+	}
+	/* Read the feature bits from the host. */
+	features->index = 0;
+	ccw->cmd_code = CCW_CMD_READ_FEAT;
+	ccw->flags = 0;
+	ccw->count = sizeof(*features);
+	ccw->cda = (__u32)(unsigned long)features;
+	ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
+	if (ret) {
+		rc = 0;
+		goto out_free;
+	}
+
+	rc = le32_to_cpu(features->features);
+
+	if (vcdev->revision == 0)
+		goto out_free;
+
+	/* Read second half of the feature bits from the host. */
+	features->index = 1;
+	ccw->cmd_code = CCW_CMD_READ_FEAT;
+	ccw->flags = 0;
+	ccw->count = sizeof(*features);
+	ccw->cda = (__u32)(unsigned long)features;
+	ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
+	if (ret == 0)
+		rc |= (u64)le32_to_cpu(features->features) << 32;
+
+out_free:
+	kfree(features);
+	kfree(ccw);
+	return rc;
+}
+
+static int virtio_ccw_finalize_features(struct virtio_device *vdev)
+{
+	struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+	struct virtio_feature_desc *features;
+	struct ccw1 *ccw;
+	int ret;
+
+	if (vcdev->revision >= 1 &&
+	    !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
+		dev_err(&vdev->dev, "virtio: device uses revision 1 "
+			"but does not have VIRTIO_F_VERSION_1\n");
+		return -EINVAL;
+	}
+
+	ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+	if (!ccw)
+		return -ENOMEM;
+
+	features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
+	if (!features) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	/* Give virtio_ring a chance to accept features. */
+	vring_transport_features(vdev);
+
+	features->index = 0;
+	features->features = cpu_to_le32((u32)vdev->features);
+	/* Write the first half of the feature bits to the host. */
+	ccw->cmd_code = CCW_CMD_WRITE_FEAT;
+	ccw->flags = 0;
+	ccw->count = sizeof(*features);
+	ccw->cda = (__u32)(unsigned long)features;
+	ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
+	if (ret)
+		goto out_free;
+
+	if (vcdev->revision == 0)
+		goto out_free;
+
+	features->index = 1;
+	features->features = cpu_to_le32(vdev->features >> 32);
+	/* Write the second half of the feature bits to the host. */
+	ccw->cmd_code = CCW_CMD_WRITE_FEAT;
+	ccw->flags = 0;
+	ccw->count = sizeof(*features);
+	ccw->cda = (__u32)(unsigned long)features;
+	ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
+
+out_free:
+	kfree(features);
+	kfree(ccw);
+
+	return ret;
+}
+
+static void virtio_ccw_get_config(struct virtio_device *vdev,
+				  unsigned int offset, void *buf, unsigned len)
+{
+	struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+	int ret;
+	struct ccw1 *ccw;
+	void *config_area;
+	unsigned long flags;
+
+	ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+	if (!ccw)
+		return;
+
+	config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
+	if (!config_area)
+		goto out_free;
+
+	/* Read the config area from the host. */
+	ccw->cmd_code = CCW_CMD_READ_CONF;
+	ccw->flags = 0;
+	ccw->count = offset + len;
+	ccw->cda = (__u32)(unsigned long)config_area;
+	ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
+	if (ret)
+		goto out_free;
+
+	spin_lock_irqsave(&vcdev->lock, flags);
+	memcpy(vcdev->config, config_area, offset + len);
+	if (vcdev->config_ready < offset + len)
+		vcdev->config_ready = offset + len;
+	spin_unlock_irqrestore(&vcdev->lock, flags);
+	if (buf)
+		memcpy(buf, config_area + offset, len);
+
+out_free:
+	kfree(config_area);
+	kfree(ccw);
+}
+
+static void virtio_ccw_set_config(struct virtio_device *vdev,
+				  unsigned int offset, const void *buf,
+				  unsigned len)
+{
+	struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+	struct ccw1 *ccw;
+	void *config_area;
+	unsigned long flags;
+
+	ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+	if (!ccw)
+		return;
+
+	config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
+	if (!config_area)
+		goto out_free;
+
+	/* Make sure we don't overwrite fields. */
+	if (vcdev->config_ready < offset)
+		virtio_ccw_get_config(vdev, 0, NULL, offset);
+	spin_lock_irqsave(&vcdev->lock, flags);
+	memcpy(&vcdev->config[offset], buf, len);
+	/* Write the config area to the host. */
+	memcpy(config_area, vcdev->config, sizeof(vcdev->config));
+	spin_unlock_irqrestore(&vcdev->lock, flags);
+	ccw->cmd_code = CCW_CMD_WRITE_CONF;
+	ccw->flags = 0;
+	ccw->count = offset + len;
+	ccw->cda = (__u32)(unsigned long)config_area;
+	ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
+
+out_free:
+	kfree(config_area);
+	kfree(ccw);
+}
+
+static u8 virtio_ccw_get_status(struct virtio_device *vdev)
+{
+	struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+	u8 old_status = *vcdev->status;
+	struct ccw1 *ccw;
+
+	if (vcdev->revision < 1)
+		return *vcdev->status;
+
+	ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+	if (!ccw)
+		return old_status;
+
+	ccw->cmd_code = CCW_CMD_READ_STATUS;
+	ccw->flags = 0;
+	ccw->count = sizeof(*vcdev->status);
+	ccw->cda = (__u32)(unsigned long)vcdev->status;
+	ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
+/*
+ * If the channel program failed (should only happen if the device
+ * was hotunplugged, and then we clean up via the machine check
+ * handler anyway), vcdev->status was not overwritten and we just
+ * return the old status, which is fine.
+*/
+	kfree(ccw);
+
+	return *vcdev->status;
+}
+
+static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
+{
+	struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+	u8 old_status = *vcdev->status;
+	struct ccw1 *ccw;
+	int ret;
+
+	ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+	if (!ccw)
+		return;
+
+	/* Write the status to the host. */
+	*vcdev->status = status;
+	ccw->cmd_code = CCW_CMD_WRITE_STATUS;
+	ccw->flags = 0;
+	ccw->count = sizeof(status);
+	ccw->cda = (__u32)(unsigned long)vcdev->status;
+	ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
+	/* Write failed? We assume status is unchanged. */
+	if (ret)
+		*vcdev->status = old_status;
+	kfree(ccw);
+}
+
+static const struct virtio_config_ops virtio_ccw_config_ops = {
+	.get_features = virtio_ccw_get_features,
+	.finalize_features = virtio_ccw_finalize_features,
+	.get = virtio_ccw_get_config,
+	.set = virtio_ccw_set_config,
+	.get_status = virtio_ccw_get_status,
+	.set_status = virtio_ccw_set_status,
+	.reset = virtio_ccw_reset,
+	.find_vqs = virtio_ccw_find_vqs,
+	.del_vqs = virtio_ccw_del_vqs,
+};
+
+
+/*
+ * ccw bus driver related functions
+ */
+
+static void virtio_ccw_release_dev(struct device *_d)
+{
+	struct virtio_device *dev = dev_to_virtio(_d);
+	struct virtio_ccw_device *vcdev = to_vc_device(dev);
+
+	kfree(vcdev->status);
+	kfree(vcdev->config_block);
+	kfree(vcdev);
+}
+
+static int irb_is_error(struct irb *irb)
+{
+	if (scsw_cstat(&irb->scsw) != 0)
+		return 1;
+	if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+		return 1;
+	if (scsw_cc(&irb->scsw) != 0)
+		return 1;
+	return 0;
+}
+
+static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
+					      int index)
+{
+	struct virtio_ccw_vq_info *info;
+	unsigned long flags;
+	struct virtqueue *vq;
+
+	vq = NULL;
+	spin_lock_irqsave(&vcdev->lock, flags);
+	list_for_each_entry(info, &vcdev->virtqueues, node) {
+		if (info->vq->index == index) {
+			vq = info->vq;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&vcdev->lock, flags);
+	return vq;
+}
+
+static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
+				      __u32 activity)
+{
+	if (vcdev->curr_io & activity) {
+		switch (activity) {
+		case VIRTIO_CCW_DOING_READ_FEAT:
+		case VIRTIO_CCW_DOING_WRITE_FEAT:
+		case VIRTIO_CCW_DOING_READ_CONFIG:
+		case VIRTIO_CCW_DOING_WRITE_CONFIG:
+		case VIRTIO_CCW_DOING_WRITE_STATUS:
+		case VIRTIO_CCW_DOING_READ_STATUS:
+		case VIRTIO_CCW_DOING_SET_VQ:
+		case VIRTIO_CCW_DOING_SET_IND:
+		case VIRTIO_CCW_DOING_SET_CONF_IND:
+		case VIRTIO_CCW_DOING_RESET:
+		case VIRTIO_CCW_DOING_READ_VQ_CONF:
+		case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
+		case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
+			vcdev->curr_io &= ~activity;
+			wake_up(&vcdev->wait_q);
+			break;
+		default:
+			/* don't know what to do... */
+			dev_warn(&vcdev->cdev->dev,
+				 "Suspicious activity '%08x'\n", activity);
+			WARN_ON(1);
+			break;
+		}
+	}
+}
+
+static void virtio_ccw_int_handler(struct ccw_device *cdev,
+				   unsigned long intparm,
+				   struct irb *irb)
+{
+	__u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
+	struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+	int i;
+	struct virtqueue *vq;
+
+	if (!vcdev)
+		return;
+	if (IS_ERR(irb)) {
+		vcdev->err = PTR_ERR(irb);
+		virtio_ccw_check_activity(vcdev, activity);
+		/* Don't poke around indicators, something's wrong. */
+		return;
+	}
+	/* Check if it's a notification from the host. */
+	if ((intparm == 0) &&
+	    (scsw_stctl(&irb->scsw) ==
+	     (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
+		/* OK */
+	}
+	if (irb_is_error(irb)) {
+		/* Command reject? */
+		if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
+		    (irb->ecw[0] & SNS0_CMD_REJECT))
+			vcdev->err = -EOPNOTSUPP;
+		else
+			/* Map everything else to -EIO. */
+			vcdev->err = -EIO;
+	}
+	virtio_ccw_check_activity(vcdev, activity);
+	for_each_set_bit(i, &vcdev->indicators,
+			 sizeof(vcdev->indicators) * BITS_PER_BYTE) {
+		/* The bit clear must happen before the vring kick. */
+		clear_bit(i, &vcdev->indicators);
+		barrier();
+		vq = virtio_ccw_vq_by_ind(vcdev, i);
+		vring_interrupt(0, vq);
+	}
+	if (test_bit(0, &vcdev->indicators2)) {
+		virtio_config_changed(&vcdev->vdev);
+		clear_bit(0, &vcdev->indicators2);
+	}
+}
+
+/*
+ * We usually want to autoonline all devices, but give the admin
+ * a way to exempt devices from this.
+ */
+#define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
+		     (8*sizeof(long)))
+static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
+
+static char *no_auto = "";
+
+module_param(no_auto, charp, 0444);
+MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
+
+static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
+{
+	struct ccw_dev_id id;
+
+	ccw_device_get_id(cdev, &id);
+	if (test_bit(id.devno, devs_no_auto[id.ssid]))
+		return 0;
+	return 1;
+}
+
+static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
+{
+	struct ccw_device *cdev = data;
+	int ret;
+
+	ret = ccw_device_set_online(cdev);
+	if (ret)
+		dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
+}
+
+static int virtio_ccw_probe(struct ccw_device *cdev)
+{
+	cdev->handler = virtio_ccw_int_handler;
+
+	if (virtio_ccw_check_autoonline(cdev))
+		async_schedule(virtio_ccw_auto_online, cdev);
+	return 0;
+}
+
+static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev)
+{
+	unsigned long flags;
+	struct virtio_ccw_device *vcdev;
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	vcdev = dev_get_drvdata(&cdev->dev);
+	if (!vcdev || vcdev->going_away) {
+		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+		return NULL;
+	}
+	vcdev->going_away = true;
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+	return vcdev;
+}
+
+static void virtio_ccw_remove(struct ccw_device *cdev)
+{
+	unsigned long flags;
+	struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
+
+	if (vcdev && cdev->online) {
+		if (vcdev->device_lost)
+			virtio_break_device(&vcdev->vdev);
+		unregister_virtio_device(&vcdev->vdev);
+		spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+		dev_set_drvdata(&cdev->dev, NULL);
+		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+	}
+	cdev->handler = NULL;
+}
+
+static int virtio_ccw_offline(struct ccw_device *cdev)
+{
+	unsigned long flags;
+	struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
+
+	if (!vcdev)
+		return 0;
+	if (vcdev->device_lost)
+		virtio_break_device(&vcdev->vdev);
+	unregister_virtio_device(&vcdev->vdev);
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	dev_set_drvdata(&cdev->dev, NULL);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+	return 0;
+}
+
+static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
+{
+	struct virtio_rev_info *rev;
+	struct ccw1 *ccw;
+	int ret;
+
+	ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+	if (!ccw)
+		return -ENOMEM;
+	rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL);
+	if (!rev) {
+		kfree(ccw);
+		return -ENOMEM;
+	}
+
+	/* Set transport revision */
+	ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV;
+	ccw->flags = 0;
+	ccw->count = sizeof(*rev);
+	ccw->cda = (__u32)(unsigned long)rev;
+
+	vcdev->revision = VIRTIO_CCW_REV_MAX;
+	do {
+		rev->revision = vcdev->revision;
+		/* none of our supported revisions carry payload */
+		rev->length = 0;
+		ret = ccw_io_helper(vcdev, ccw,
+				    VIRTIO_CCW_DOING_SET_VIRTIO_REV);
+		if (ret == -EOPNOTSUPP) {
+			if (vcdev->revision == 0)
+				/*
+				 * The host device does not support setting
+				 * the revision: let's operate it in legacy
+				 * mode.
+				 */
+				ret = 0;
+			else
+				vcdev->revision--;
+		}
+	} while (ret == -EOPNOTSUPP);
+
+	kfree(ccw);
+	kfree(rev);
+	return ret;
+}
+
+static int virtio_ccw_online(struct ccw_device *cdev)
+{
+	int ret;
+	struct virtio_ccw_device *vcdev;
+	unsigned long flags;
+
+	vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
+	if (!vcdev) {
+		dev_warn(&cdev->dev, "Could not get memory for virtio\n");
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
+				   GFP_DMA | GFP_KERNEL);
+	if (!vcdev->config_block) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
+	if (!vcdev->status) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+
+	vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
+
+	vcdev->vdev.dev.parent = &cdev->dev;
+	vcdev->vdev.dev.release = virtio_ccw_release_dev;
+	vcdev->vdev.config = &virtio_ccw_config_ops;
+	vcdev->cdev = cdev;
+	init_waitqueue_head(&vcdev->wait_q);
+	INIT_LIST_HEAD(&vcdev->virtqueues);
+	spin_lock_init(&vcdev->lock);
+	mutex_init(&vcdev->io_lock);
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	dev_set_drvdata(&cdev->dev, vcdev);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+	vcdev->vdev.id.vendor = cdev->id.cu_type;
+	vcdev->vdev.id.device = cdev->id.cu_model;
+
+	ret = virtio_ccw_set_transport_rev(vcdev);
+	if (ret)
+		goto out_free;
+
+	ret = register_virtio_device(&vcdev->vdev);
+	if (ret) {
+		dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
+			 ret);
+		goto out_put;
+	}
+	return 0;
+out_put:
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	dev_set_drvdata(&cdev->dev, NULL);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+	put_device(&vcdev->vdev.dev);
+	return ret;
+out_free:
+	if (vcdev) {
+		kfree(vcdev->status);
+		kfree(vcdev->config_block);
+	}
+	kfree(vcdev);
+	return ret;
+}
+
+static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
+{
+	int rc;
+	struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+
+	/*
+	 * Make sure vcdev is set
+	 * i.e. set_offline/remove callback not already running
+	 */
+	if (!vcdev)
+		return NOTIFY_DONE;
+
+	switch (event) {
+	case CIO_GONE:
+		vcdev->device_lost = true;
+		rc = NOTIFY_DONE;
+		break;
+	case CIO_OPER:
+		rc = NOTIFY_OK;
+		break;
+	default:
+		rc = NOTIFY_DONE;
+		break;
+	}
+	return rc;
+}
+
+static struct ccw_device_id virtio_ids[] = {
+	{ CCW_DEVICE(0x3832, 0) },
+	{},
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int virtio_ccw_freeze(struct ccw_device *cdev)
+{
+	struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+
+	return virtio_device_freeze(&vcdev->vdev);
+}
+
+static int virtio_ccw_restore(struct ccw_device *cdev)
+{
+	struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+	int ret;
+
+	ret = virtio_ccw_set_transport_rev(vcdev);
+	if (ret)
+		return ret;
+
+	return virtio_device_restore(&vcdev->vdev);
+}
+#endif
+
+static struct ccw_driver virtio_ccw_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "virtio_ccw",
+	},
+	.ids = virtio_ids,
+	.probe = virtio_ccw_probe,
+	.remove = virtio_ccw_remove,
+	.set_offline = virtio_ccw_offline,
+	.set_online = virtio_ccw_online,
+	.notify = virtio_ccw_cio_notify,
+	.int_class = IRQIO_VIR,
+#ifdef CONFIG_PM_SLEEP
+	.freeze = virtio_ccw_freeze,
+	.thaw = virtio_ccw_restore,
+	.restore = virtio_ccw_restore,
+#endif
+};
+
+static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
+			   int max_digit, int max_val)
+{
+	int diff;
+
+	diff = 0;
+	*val = 0;
+
+	while (diff <= max_digit) {
+		int value = hex_to_bin(**cp);
+
+		if (value < 0)
+			break;
+		*val = *val * 16 + value;
+		(*cp)++;
+		diff++;
+	}
+
+	if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
+		return 1;
+
+	return 0;
+}
+
+static int __init parse_busid(char *str, unsigned int *cssid,
+			      unsigned int *ssid, unsigned int *devno)
+{
+	char *str_work;
+	int rc, ret;
+
+	rc = 1;
+
+	if (*str == '\0')
+		goto out;
+
+	str_work = str;
+	ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
+	if (ret || (str_work[0] != '.'))
+		goto out;
+	str_work++;
+	ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
+	if (ret || (str_work[0] != '.'))
+		goto out;
+	str_work++;
+	ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
+	if (ret || (str_work[0] != '\0'))
+		goto out;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static void __init no_auto_parse(void)
+{
+	unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
+	char *parm, *str;
+	int rc;
+
+	str = no_auto;
+	while ((parm = strsep(&str, ","))) {
+		rc = parse_busid(strsep(&parm, "-"), &from_cssid,
+				 &from_ssid, &from);
+		if (rc)
+			continue;
+		if (parm != NULL) {
+			rc = parse_busid(parm, &to_cssid,
+					 &to_ssid, &to);
+			if ((from_ssid > to_ssid) ||
+			    ((from_ssid == to_ssid) && (from > to)))
+				rc = -EINVAL;
+		} else {
+			to_cssid = from_cssid;
+			to_ssid = from_ssid;
+			to = from;
+		}
+		if (rc)
+			continue;
+		while ((from_ssid < to_ssid) ||
+		       ((from_ssid == to_ssid) && (from <= to))) {
+			set_bit(from, devs_no_auto[from_ssid]);
+			from++;
+			if (from > __MAX_SUBCHANNEL) {
+				from_ssid++;
+				from = 0;
+			}
+		}
+	}
+}
+
+static int __init virtio_ccw_init(void)
+{
+	/* parse no_auto string before we do anything further */
+	no_auto_parse();
+	return ccw_driver_register(&virtio_ccw_driver);
+}
+device_initcall(virtio_ccw_init);