Update Linux to v5.10.109

Sourced from [1]

[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz

Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index a8682f6..376f1ef 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -26,7 +26,6 @@
 	def_tristate y
 	prompt "Support for DASD devices"
 	depends on CCW && BLOCK
-	select IOSCHED_DEADLINE
 	help
 	  Enable this option if you want to access DASDs directly utilizing
 	  S/390s channel subsystem commands. This is necessary for running
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index b577c8f..2adfab5 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2802,7 +2802,7 @@
 			blk_update_request(req, BLK_STS_OK,
 					   blk_rq_bytes(req) - proc_bytes);
 			blk_mq_requeue_request(req, true);
-		} else {
+		} else if (likely(!blk_should_fake_timeout(req->q))) {
 			blk_mq_complete_request(req);
 		}
 	}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index ee73b06..4691a3c 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1987,7 +1987,7 @@
  * DASD_3990_ERP_COMPOUND_CONFIG
  *
  * DESCRIPTION
- *   Handles the compound ERP action for configruation
+ *   Handles the compound ERP action for configuration
  *   dependent error.
  *   Note: duplex handling is not implemented (yet).
  *
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index f7ae03f..1b9e144 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -58,7 +58,7 @@
 
 struct dasd_diag_req {
 	unsigned int block_count;
-	struct dasd_diag_bio bio[0];
+	struct dasd_diag_bio bio[];
 };
 
 static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
@@ -515,7 +515,7 @@
 	struct req_iterator iter;
 	struct bio_vec bv;
 	char *dst;
-	unsigned int count, datasize;
+	unsigned int count;
 	sector_t recid, first_rec, last_rec;
 	unsigned int blksize, off;
 	unsigned char rw_cmd;
@@ -543,10 +543,8 @@
 	if (count != last_rec - first_rec + 1)
 		return ERR_PTR(-EINVAL);
 	/* Build the request */
-	datasize = sizeof(struct dasd_diag_req) +
-		count*sizeof(struct dasd_diag_bio);
-	cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev,
-				   blk_mq_rq_to_pdu(req));
+	cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, struct_size(dreq, bio, count),
+				   memdev, blk_mq_rq_to_pdu(req));
 	if (IS_ERR(cqr))
 		return cqr;
 
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 6943508..ca24a78 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -220,7 +220,7 @@
 	__u8 imbedded_count;
 	__u8 extended_operation;
 	__u16 extended_parameter_length;
-	__u8 extended_parameter[0];
+	__u8 extended_parameter[];
 } __attribute__ ((packed));
 
 /* Prefix data for format 0x00 and 0x01 */
diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h
index 8f75df0..45ddabe 100644
--- a/drivers/s390/block/dasd_fba.h
+++ b/drivers/s390/block/dasd_fba.h
@@ -2,7 +2,7 @@
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  * Bugreports.to..: <Linux390@de.ibm.com>
- * Coypright IBM Corp. 1999, 2000
+ * Copyright IBM Corp. 1999, 2000
  *
  */
 
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 5542d9e..a9698fb 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -101,22 +101,17 @@
 	struct block_device *bdev;
 	int rc;
 
-	bdev = bdget_disk(block->gdp, 0);
-	if (!bdev) {
-		DBF_DEV_EVENT(DBF_ERR, block->base, "%s",
-			      "scan partitions error, bdget returned NULL");
-		return -ENODEV;
-	}
-
-	rc = blkdev_get(bdev, FMODE_READ, NULL);
-	if (rc < 0) {
+	bdev = blkdev_get_by_dev(disk_devt(block->gdp), FMODE_READ, NULL);
+	if (IS_ERR(bdev)) {
 		DBF_DEV_EVENT(DBF_ERR, block->base,
-			      "scan partitions error, blkdev_get returned %d",
-			      rc);
+			      "scan partitions error, blkdev_get returned %ld",
+			      PTR_ERR(bdev));
 		return -ENODEV;
 	}
 
-	rc = blkdev_reread_part(bdev);
+	mutex_lock(&bdev->bd_mutex);
+	rc = bdev_disk_changed(bdev, false);
+	mutex_unlock(&bdev->bd_mutex);
 	if (rc)
 		DBF_DEV_EVENT(DBF_ERR, block->base,
 				"scan partitions error, rc %d", rc);
@@ -141,9 +136,6 @@
  */
 void dasd_destroy_partitions(struct dasd_block *block)
 {
-	/* The two structs have 168/176 byte on 31/64 bit. */
-	struct blkpg_partition bpart;
-	struct blkpg_ioctl_arg barg;
 	struct block_device *bdev;
 
 	/*
@@ -153,19 +145,10 @@
 	bdev = block->bdev;
 	block->bdev = NULL;
 
-	/*
-	 * See fs/partition/check.c:delete_partition
-	 * Can't call delete_partitions directly. Use ioctl.
-	 * The ioctl also does locking and invalidation.
-	 */
-	memset(&bpart, 0, sizeof(struct blkpg_partition));
-	memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
-	barg.data = (void __force __user *) &bpart;
-	barg.op = BLKPG_DEL_PARTITION;
-	for (bpart.pno = block->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
-		ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
+	mutex_lock(&bdev->bd_mutex);
+	blk_drop_partitions(bdev);
+	mutex_unlock(&bdev->bd_mutex);
 
-	invalidate_partition(block->gdp, 0);
 	/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
 	blkdev_put(bdev, FMODE_READ);
 	set_capacity(block->gdp, 0);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 9a5f3ad..cb6427f 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -22,6 +22,7 @@
 #include <asm/schid.h>
 #include <asm/cmb.h>
 #include <linux/uaccess.h>
+#include <linux/dasd_mod.h>
 
 /* This is ugly... */
 #define PRINTK_HEADER "dasd_ioctl:"
@@ -54,10 +55,7 @@
 
 	dasd_enable_device(base);
 	/* Formatting the dasd device can change the capacity. */
-	mutex_lock(&bdev->bd_mutex);
-	i_size_write(bdev->bd_inode,
-		     (loff_t)get_capacity(base->block->gdp) << 9);
-	mutex_unlock(&bdev->bd_mutex);
+	bd_set_nr_sectors(bdev, get_capacity(base->block->gdp));
 	dasd_put_device(base);
 	return 0;
 }
@@ -90,9 +88,7 @@
 	 * Set i_size to zero, since read, write, etc. check against this
 	 * value.
 	 */
-	mutex_lock(&bdev->bd_mutex);
-	i_size_write(bdev->bd_inode, 0);
-	mutex_unlock(&bdev->bd_mutex);
+	bd_set_nr_sectors(bdev, 0);
 	dasd_put_device(base);
 	return 0;
 }
@@ -281,7 +277,7 @@
 		dasd_put_device(base);
 		return -EFAULT;
 	}
-	if (bdev != bdev->bd_contains) {
+	if (bdev_is_partition(bdev)) {
 		pr_warn("%s: The specified DASD is a partition and cannot be formatted\n",
 			dev_name(&base->cdev->dev));
 		dasd_put_device(base);
@@ -308,7 +304,7 @@
 	base = dasd_device_from_gendisk(bdev->bd_disk);
 	if (!base)
 		return -ENODEV;
-	if (bdev != bdev->bd_contains) {
+	if (bdev_is_partition(bdev)) {
 		pr_warn("%s: The specified DASD is a partition and cannot be checked\n",
 			dev_name(&base->cdev->dev));
 		rc = -EINVAL;
@@ -366,7 +362,7 @@
 		rc = -EROFS;
 		goto out_err;
 	}
-	if (bdev != bdev->bd_contains) {
+	if (bdev_is_partition(bdev)) {
 		pr_warn("%s: The specified DASD is a partition and tracks cannot be released\n",
 			dev_name(&base->cdev->dev));
 		rc = -EINVAL;
@@ -457,10 +453,9 @@
 /*
  * Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
  */
-static int dasd_ioctl_information(struct dasd_block *block,
-				  unsigned int cmd, void __user *argp)
+static int __dasd_ioctl_information(struct dasd_block *block,
+		struct dasd_information2_t *dasd_info)
 {
-	struct dasd_information2_t *dasd_info;
 	struct subchannel_id sch_id;
 	struct ccw_dev_id dev_id;
 	struct dasd_device *base;
@@ -473,15 +468,9 @@
 	if (!base->discipline || !base->discipline->fill_info)
 		return -EINVAL;
 
-	dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
-	if (dasd_info == NULL)
-		return -ENOMEM;
-
 	rc = base->discipline->fill_info(base, dasd_info);
-	if (rc) {
-		kfree(dasd_info);
+	if (rc)
 		return rc;
-	}
 
 	cdev = base->cdev;
 	ccw_device_get_id(cdev, &dev_id);
@@ -520,15 +509,24 @@
 	list_for_each(l, &base->ccw_queue)
 		dasd_info->chanq_len++;
 	spin_unlock_irqrestore(&block->queue_lock, flags);
+	return 0;
+}
 
-	rc = 0;
-	if (copy_to_user(argp, dasd_info,
-			 ((cmd == (unsigned int) BIODASDINFO2) ?
-			  sizeof(struct dasd_information2_t) :
-			  sizeof(struct dasd_information_t))))
-		rc = -EFAULT;
+static int dasd_ioctl_information(struct dasd_block *block, void __user *argp,
+		size_t copy_size)
+{
+	struct dasd_information2_t *dasd_info;
+	int error;
+
+	dasd_info = kzalloc(sizeof(*dasd_info), GFP_KERNEL);
+	if (!dasd_info)
+		return -ENOMEM;
+
+	error = __dasd_ioctl_information(block, dasd_info);
+	if (!error && copy_to_user(argp, dasd_info, copy_size))
+		error = -EFAULT;
 	kfree(dasd_info);
-	return rc;
+	return error;
 }
 
 /*
@@ -542,7 +540,7 @@
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
-	if (bdev != bdev->bd_contains)
+	if (bdev_is_partition(bdev))
 		// ro setting is not allowed for partitions
 		return -EINVAL;
 	if (get_user(intval, (int __user *)argp))
@@ -622,10 +620,12 @@
 		rc = dasd_ioctl_check_format(bdev, argp);
 		break;
 	case BIODASDINFO:
-		rc = dasd_ioctl_information(block, cmd, argp);
+		rc = dasd_ioctl_information(block, argp,
+				sizeof(struct dasd_information_t));
 		break;
 	case BIODASDINFO2:
-		rc = dasd_ioctl_information(block, cmd, argp);
+		rc = dasd_ioctl_information(block, argp,
+				sizeof(struct dasd_information2_t));
 		break;
 	case BIODASDPRRD:
 		rc = dasd_ioctl_read_profile(block, argp);
@@ -660,3 +660,36 @@
 	dasd_put_device(base);
 	return rc;
 }
+
+
+/**
+ * dasd_biodasdinfo() - fill out the dasd information structure
+ * @disk [in]: pointer to gendisk structure that references a DASD
+ * @info [out]: pointer to the dasd_information2_t structure
+ *
+ * Provide access to DASD specific information.
+ * The gendisk structure is checked if it belongs to the DASD driver by
+ * comparing the gendisk->fops pointer.
+ * If it does not belong to the DASD driver -EINVAL is returned.
+ * Otherwise the provided dasd_information2_t structure is filled out.
+ *
+ * Returns:
+ *   %0 on success and a negative error value on failure.
+ */
+int dasd_biodasdinfo(struct gendisk *disk, struct dasd_information2_t *info)
+{
+	struct dasd_device *base;
+	int error;
+
+	if (disk->fops != &dasd_device_operations)
+		return -EINVAL;
+
+	base = dasd_device_from_gendisk(disk);
+	if (!base)
+		return -ENODEV;
+	error = __dasd_ioctl_information(base->block, info);
+	dasd_put_device(base);
+	return error;
+}
+/* export that symbol_get in partition detection is possible */
+EXPORT_SYMBOL_GPL(dasd_biodasdinfo);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 1770b99..62a859e 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -5,7 +5,7 @@
  *		    Carsten Otte <Cotte@de.ibm.com>
  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
  * Bugreports.to..: <Linux390@de.ibm.com>
- * Coypright IBM Corp. 1999, 2002
+ * Copyright IBM Corp. 1999, 2002
  *
  * /proc interface for the dasd driver.
  *
@@ -320,13 +320,12 @@
 #endif				/* CONFIG_DASD_PROFILE */
 }
 
-static const struct file_operations dasd_stats_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= dasd_stats_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= dasd_stats_proc_write,
+static const struct proc_ops dasd_stats_proc_ops = {
+	.proc_open	= dasd_stats_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= dasd_stats_proc_write,
 };
 
 /*
@@ -347,7 +346,7 @@
 	dasd_statistics_entry = proc_create("statistics",
 					    S_IFREG | S_IRUGO | S_IWUSR,
 					    dasd_proc_root_entry,
-					    &dasd_stats_proc_fops);
+					    &dasd_stats_proc_ops);
 	if (!dasd_statistics_entry)
 		goto out_nostatistics;
 	return 0;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 63502ca..299e77e 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -31,8 +31,7 @@
 
 static int dcssblk_open(struct block_device *bdev, fmode_t mode);
 static void dcssblk_release(struct gendisk *disk, fmode_t mode);
-static blk_qc_t dcssblk_make_request(struct request_queue *q,
-						struct bio *bio);
+static blk_qc_t dcssblk_submit_bio(struct bio *bio);
 static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
 		long nr_pages, void **kaddr, pfn_t *pfn);
 
@@ -41,6 +40,7 @@
 static int dcssblk_major;
 static const struct block_device_operations dcssblk_devops = {
 	.owner   	= THIS_MODULE,
+	.submit_bio	= dcssblk_submit_bio,
 	.open    	= dcssblk_open,
 	.release 	= dcssblk_release,
 };
@@ -57,11 +57,26 @@
 	return copy_to_iter(addr, bytes, i);
 }
 
+static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev,
+				       pgoff_t pgoff, size_t nr_pages)
+{
+	long rc;
+	void *kaddr;
+
+	rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
+	if (rc < 0)
+		return rc;
+	memset(kaddr, 0, nr_pages << PAGE_SHIFT);
+	dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
+	return 0;
+}
+
 static const struct dax_operations dcssblk_dax_ops = {
 	.direct_access = dcssblk_dax_direct_access,
 	.dax_supported = generic_fsdax_supported,
 	.copy_from_iter = dcssblk_dax_copy_from_iter,
 	.copy_to_iter = dcssblk_dax_copy_to_iter,
+	.zero_page_range = dcssblk_dax_zero_page_range,
 };
 
 struct dcssblk_dev_info {
@@ -636,10 +651,9 @@
 	}
 	dev_info->gd->major = dcssblk_major;
 	dev_info->gd->fops = &dcssblk_devops;
-	dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL);
+	dev_info->dcssblk_queue = blk_alloc_queue(NUMA_NO_NODE);
 	dev_info->gd->queue = dev_info->dcssblk_queue;
 	dev_info->gd->private_data = dev_info;
-	blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
 	blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
 	blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->dcssblk_queue);
 
@@ -680,8 +694,9 @@
 
 	dev_info->dax_dev = alloc_dax(dev_info, dev_info->gd->disk_name,
 			&dcssblk_dax_ops, DAXDEV_F_SYNC);
-	if (!dev_info->dax_dev) {
-		rc = -ENOMEM;
+	if (IS_ERR(dev_info->dax_dev)) {
+		rc = PTR_ERR(dev_info->dax_dev);
+		dev_info->dax_dev = NULL;
 		goto put_dev;
 	}
 
@@ -817,7 +832,6 @@
 		goto out;
 	}
 	atomic_inc(&dev_info->use_count);
-	bdev->bd_block_size = 4096;
 	rc = 0;
 out:
 	return rc;
@@ -852,7 +866,7 @@
 }
 
 static blk_qc_t
-dcssblk_make_request(struct request_queue *q, struct bio *bio)
+dcssblk_submit_bio(struct bio *bio)
 {
 	struct dcssblk_dev_info *dev_info;
 	struct bio_vec bvec;
@@ -862,7 +876,7 @@
 	unsigned long source_addr;
 	unsigned long bytes_done;
 
-	blk_queue_split(q, &bio);
+	blk_queue_split(&bio);
 
 	bytes_done = 0;
 	dev_info = bio->bi_disk->private_data;
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index e018893..a4f6f2e 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -256,7 +256,8 @@
 	for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
 		error = blk_mq_rq_to_pdu(scmrq->request[i]);
 		*error = scmrq->error;
-		blk_mq_complete_request(scmrq->request[i]);
+		if (likely(!blk_should_fake_timeout(scmrq->request[i]->q)))
+			blk_mq_complete_request(scmrq->request[i]);
 	}
 
 	atomic_dec(&bdev->queued_reqs);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 3df5d68..c2536f7 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -182,7 +182,7 @@
 /*
  * Block device make request function.
  */
-static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t xpram_submit_bio(struct bio *bio)
 {
 	xpram_device_t *xdev = bio->bi_disk->private_data;
 	struct bio_vec bvec;
@@ -191,7 +191,7 @@
 	unsigned long page_addr;
 	unsigned long bytes;
 
-	blk_queue_split(q, &bio);
+	blk_queue_split(&bio);
 
 	if ((bio->bi_iter.bi_sector & 7) != 0 ||
 	    (bio->bi_iter.bi_size & 4095) != 0)
@@ -250,6 +250,7 @@
 static const struct block_device_operations xpram_devops =
 {
 	.owner	= THIS_MODULE,
+	.submit_bio = xpram_submit_bio,
 	.getgeo	= xpram_getgeo,
 };
 
@@ -343,14 +344,13 @@
 		xpram_disks[i] = alloc_disk(1);
 		if (!xpram_disks[i])
 			goto out;
-		xpram_queues[i] = blk_alloc_queue(GFP_KERNEL);
+		xpram_queues[i] = blk_alloc_queue(NUMA_NO_NODE);
 		if (!xpram_queues[i]) {
 			put_disk(xpram_disks[i]);
 			goto out;
 		}
 		blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_queues[i]);
 		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]);
-		blk_queue_make_request(xpram_queues[i], xpram_make_request);
 		blk_queue_logical_block_size(xpram_queues[i], 4096);
 	}
 
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 845e12a..c6fdb81 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -34,6 +34,8 @@
 
 obj-$(CONFIG_PCI) += sclp_pci.o
 
+obj-$(subst m,y,$(CONFIG_ZCRYPT)) += sclp_ap.o
+
 obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
 obj-$(CONFIG_VMCP) += vmcp.o
 
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index e7cf0a1..d8acabb 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -398,7 +398,7 @@
 		}
 		if (dstat == 0x08)
 			break;
-		/* else, fall through */
+		fallthrough;
 	case 0x04:
 		/* Device end interrupt. */
 		if ((raw = req->info) == NULL)
@@ -978,7 +978,6 @@
 static int tty3215_open(struct tty_struct *tty, struct file * filp)
 {
 	struct raw3215_info *raw = tty->driver_data;
-	int retval;
 
 	tty_port_tty_set(&raw->port, tty);
 
@@ -986,11 +985,7 @@
 	/*
 	 * Start up 3215 device
 	 */
-	retval = raw3215_startup(raw);
-	if (retval)
-		return retval;
-
-	return 0;
+	return raw3215_startup(raw);
 }
 
 /*
diff --git a/drivers/s390/char/hmcdrv_ftp.c b/drivers/s390/char/hmcdrv_ftp.c
index 0e70397..37ee8f6 100644
--- a/drivers/s390/char/hmcdrv_ftp.c
+++ b/drivers/s390/char/hmcdrv_ftp.c
@@ -137,7 +137,7 @@
 			while ((*cmd != '\0') && !iscntrl(*cmd))
 				++cmd;
 			ftp->fname = start;
-			/* fall through */
+			fallthrough;
 		default:
 			*cmd = '\0';
 			break;
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index 3afaa35..8d979e0 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -110,7 +110,6 @@
 };
 
 struct raw3270_request *raw3270_request_alloc(size_t size);
-struct raw3270_request *raw3270_request_alloc_bootmem(size_t size);
 void raw3270_request_free(struct raw3270_request *);
 void raw3270_request_reset(struct raw3270_request *);
 void raw3270_request_set_cmd(struct raw3270_request *, u8 cmd);
@@ -211,7 +210,7 @@
 	struct list_head update;
 	unsigned long size;
 	unsigned long len;
-	char string[0];
+	char string[];
 } __attribute__ ((aligned(8)));
 
 static inline struct string *
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 1963330..69d9cde 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -229,7 +229,7 @@
 #define SCLP_HAS_CPU_INFO	(sclp.facilities & 0x0800000000000000ULL)
 #define SCLP_HAS_CPU_RECONFIG	(sclp.facilities & 0x0400000000000000ULL)
 #define SCLP_HAS_PCI_RECONFIG	(sclp.facilities & 0x0000000040000000ULL)
-
+#define SCLP_HAS_AP_RECONFIG	(sclp.facilities & 0x0000000100000000ULL)
 
 struct gds_subvector {
 	u8	length;
@@ -305,9 +305,7 @@
 int sclp_reactivate(void);
 int sclp_sync_request(sclp_cmdw_t command, void *sccb);
 int sclp_sync_request_timeout(sclp_cmdw_t command, void *sccb, int timeout);
-
 int sclp_sdias_init(void);
-void sclp_sdias_exit(void);
 
 enum {
 	sclp_init_state_uninitialized,
diff --git a/drivers/s390/char/sclp_ap.c b/drivers/s390/char/sclp_ap.c
new file mode 100644
index 0000000..0dd1ca7
--- /dev/null
+++ b/drivers/s390/char/sclp_ap.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * s390 crypto adapter related sclp functions.
+ *
+ * Copyright IBM Corp. 2020
+ */
+#define KMSG_COMPONENT "sclp_cmd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <asm/sclp.h>
+#include "sclp.h"
+
+#define SCLP_CMDW_CONFIGURE_AP			0x001f0001
+#define SCLP_CMDW_DECONFIGURE_AP		0x001e0001
+
+struct ap_cfg_sccb {
+	struct sccb_header header;
+} __packed;
+
+static int do_ap_configure(sclp_cmdw_t cmd, u32 apid)
+{
+	struct ap_cfg_sccb *sccb;
+	int rc;
+
+	if (!SCLP_HAS_AP_RECONFIG)
+		return -EOPNOTSUPP;
+
+	sccb = (struct ap_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+
+	sccb->header.length = PAGE_SIZE;
+	cmd |= (apid & 0xFF) << 8;
+	rc = sclp_sync_request(cmd, sccb);
+	if (rc)
+		goto out;
+	switch (sccb->header.response_code) {
+	case 0x0020: case 0x0120: case 0x0440: case 0x0450:
+		break;
+	default:
+		pr_warn("configure AP adapter %u failed: cmd=0x%08x response=0x%04x\n",
+			apid, cmd, sccb->header.response_code);
+		rc = -EIO;
+		break;
+	}
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+int sclp_ap_configure(u32 apid)
+{
+	return do_ap_configure(SCLP_CMDW_CONFIGURE_AP, apid);
+}
+EXPORT_SYMBOL(sclp_ap_configure);
+
+int sclp_ap_deconfigure(u32 apid)
+{
+	return do_ap_configure(SCLP_CMDW_DECONFIGURE_AP, apid);
+}
+EXPORT_SYMBOL(sclp_ap_deconfigure);
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 37d42de..f6e97f0 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -406,7 +406,7 @@
 	if (!size)
 		goto skip_add;
 	for (addr = start; addr < start + size; addr += block_size)
-		add_memory(numa_pfn_to_nid(PFN_DOWN(addr)), addr, block_size);
+		add_memory(0, addr, block_size, MHP_NONE);
 skip_add:
 	first_rn = rn;
 	num = 1;
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index 7737470..a960afa 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -17,12 +17,12 @@
 static struct read_info_sccb __bootdata(sclp_info_sccb);
 static int __bootdata(sclp_info_sccb_valid);
 char *sclp_early_sccb = (char *) EARLY_SCCB_OFFSET;
-int sclp_init_state __section(.data) = sclp_init_state_uninitialized;
+int sclp_init_state = sclp_init_state_uninitialized;
 /*
  * Used to keep track of the size of the event masks. Qemu until version 2.11
  * only supports 4 and needs a workaround.
  */
-bool sclp_mask_compat_mode __section(.data);
+bool sclp_mask_compat_mode;
 
 void sclp_early_wait_irq(void)
 {
@@ -214,11 +214,11 @@
  * Output one or more lines of text on the SCLP console (VT220 and /
  * or line-mode).
  */
-void __sclp_early_printk(const char *str, unsigned int len, unsigned int force)
+void __sclp_early_printk(const char *str, unsigned int len)
 {
 	int have_linemode, have_vt220;
 
-	if (!force && sclp_init_state != sclp_init_state_uninitialized)
+	if (sclp_init_state != sclp_init_state_uninitialized)
 		return;
 	if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0)
 		return;
@@ -231,12 +231,7 @@
 
 void sclp_early_printk(const char *str)
 {
-	__sclp_early_printk(str, strlen(str), 0);
-}
-
-void sclp_early_printk_force(const char *str)
-{
-	__sclp_early_printk(str, strlen(str), 1);
+	__sclp_early_printk(str, strlen(str));
 }
 
 int __init sclp_early_read_info(void)
diff --git a/drivers/s390/char/sclp_pci.c b/drivers/s390/char/sclp_pci.c
index 995e919..a3e5a5f 100644
--- a/drivers/s390/char/sclp_pci.c
+++ b/drivers/s390/char/sclp_pci.c
@@ -39,7 +39,7 @@
 	u8 atype;
 	u32 fh;
 	u32 fid;
-	u8 data[0];
+	u8 data[];
 } __packed;
 
 struct err_notify_sccb {
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 44594a4..d6c84e3 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -337,24 +337,6 @@
 }
 
 /*
- * sets or provides some values that influence the drivers behaviour
- */
-void
-sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns)
-{
-	buffer->columns = columns;
-	if (buffer->current_line != NULL &&
-	    buffer->current_length > buffer->columns)
-		sclp_finalize_mto(buffer);
-}
-
-void
-sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab)
-{
-	buffer->htab = htab;
-}
-
-/*
  * called by sclp_console_init and/or sclp_tty_init
  */
 int
diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h
index a2eb22f..93d706e 100644
--- a/drivers/s390/char/sclp_rw.h
+++ b/drivers/s390/char/sclp_rw.h
@@ -86,8 +86,6 @@
 int sclp_buffer_space(struct sclp_buffer *);
 int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int);
 int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int));
-void sclp_set_columns(struct sclp_buffer *, unsigned short);
-void sclp_set_htab(struct sclp_buffer *, unsigned short);
 int sclp_chars_in_buffer(struct sclp_buffer *);
 
 #ifdef CONFIG_SCLP_CONSOLE
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 13f97fd..215d4b4 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -214,7 +214,7 @@
 		break;
 	case SDIAS_EVSTATE_NO_DATA:
 		TRACE("no data\n");
-		/* fall through */
+		fallthrough;
 	default:
 		pr_err("Error from SCLP while copying hsa. Event status = %x\n",
 		       sdias_evbuf.event_status);
@@ -257,7 +257,7 @@
 
 int __init sclp_sdias_init(void)
 {
-	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+	if (!is_ipl_type_dump())
 		return 0;
 	sclp_sdias_sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
 	BUG_ON(!sclp_sdias_sccb);
@@ -275,9 +275,3 @@
 	TRACE("init done\n");
 	return 0;
 }
-
-void __exit sclp_sdias_exit(void)
-{
-	debug_unregister(sdias_dbf);
-	sclp_unregister(&sclp_sdias_register);
-}
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 8bec5f9..e2c6047 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -238,7 +238,6 @@
 extern int tape_do_io_async(struct tape_device *, struct tape_request *);
 extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *);
 extern int tape_cancel_io(struct tape_device *, struct tape_request *);
-void tape_hotplug_event(struct tape_device *, int major, int action);
 
 static inline int
 tape_do_io_free(struct tape_device *device, struct tape_request *request)
@@ -258,8 +257,6 @@
 	tape_do_io_async(device, request);
 }
 
-extern int tape_oper_handler(int irq, int status);
-extern void tape_noper_handler(int irq, int status);
 extern int tape_open(struct tape_device *);
 extern int tape_release(struct tape_device *);
 extern int tape_mtop(struct tape_device *, int, int);
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index ea42539..8abb429 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -341,14 +341,14 @@
  */
 static int
 __tapechar_ioctl(struct tape_device *device,
-		 unsigned int no, unsigned long data)
+		 unsigned int no, void __user *data)
 {
 	int rc;
 
 	if (no == MTIOCTOP) {
 		struct mtop op;
 
-		if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0)
+		if (copy_from_user(&op, data, sizeof(op)) != 0)
 			return -EFAULT;
 		if (op.mt_count < 0)
 			return -EINVAL;
@@ -392,9 +392,7 @@
 		if (rc < 0)
 			return rc;
 		pos.mt_blkno = rc;
-		if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0)
-			return -EFAULT;
-		return 0;
+		return put_user_mtpos(data, &pos);
 	}
 	if (no == MTIOCGET) {
 		/* MTIOCGET: query the tape drive status. */
@@ -424,15 +422,12 @@
 			get.mt_blkno = rc;
 		}
 
-		if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0)
-			return -EFAULT;
-
-		return 0;
+		return put_user_mtget(data, &get);
 	}
 	/* Try the discipline ioctl function. */
 	if (device->discipline->ioctl_fn == NULL)
 		return -EINVAL;
-	return device->discipline->ioctl_fn(device, no, data);
+	return device->discipline->ioctl_fn(device, no, (unsigned long)data);
 }
 
 static long
@@ -445,7 +440,7 @@
 
 	device = (struct tape_device *) filp->private_data;
 	mutex_lock(&device->mutex);
-	rc = __tapechar_ioctl(device, no, data);
+	rc = __tapechar_ioctl(device, no, (void __user *)data);
 	mutex_unlock(&device->mutex);
 	return rc;
 }
@@ -455,23 +450,17 @@
 tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
 {
 	struct tape_device *device = filp->private_data;
-	int rval = -ENOIOCTLCMD;
-	unsigned long argp;
+	long rc;
 
-	/* The 'arg' argument of any ioctl function may only be used for
-	 * pointers because of the compat pointer conversion.
-	 * Consider this when adding new ioctls.
-	 */
-	argp = (unsigned long) compat_ptr(data);
-	if (device->discipline->ioctl_fn) {
-		mutex_lock(&device->mutex);
-		rval = device->discipline->ioctl_fn(device, no, argp);
-		mutex_unlock(&device->mutex);
-		if (rval == -EINVAL)
-			rval = -ENOIOCTLCMD;
-	}
+	if (no == MTIOCPOS32)
+		no = MTIOCPOS;
+	else if (no == MTIOCGET32)
+		no = MTIOCGET;
 
-	return rval;
+	mutex_lock(&device->mutex);
+	rc = __tapechar_ioctl(device, no, compat_ptr(data));
+	mutex_unlock(&device->mutex);
+	return rc;
 }
 #endif /* CONFIG_COMPAT */
 
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 3e0b2f6..380e6a6 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -677,7 +677,7 @@
 	switch (device->tape_state) {
 		case TS_INIT:
 			tape_state_set(device, TS_NOT_OPER);
-			/* fallthrough */
+			fallthrough;
 		case TS_NOT_OPER:
 			/*
 			 * Nothing to do.
@@ -950,7 +950,7 @@
 				break;
 			if (device->tape_state == TS_UNUSED)
 				break;
-			/* fallthrough */
+			fallthrough;
 		default:
 			if (device->tape_state == TS_BLKUSE)
 				break;
@@ -1118,7 +1118,7 @@
 			case -ETIMEDOUT:
 				DBF_LH(1, "(%08x): Request timed out\n",
 				       device->cdev_id);
-				/* fallthrough */
+				fallthrough;
 			case -EIO:
 				__tape_end_request(device, request, -EIO);
 				break;
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 1f5fab6..f7e75d9 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -53,7 +53,6 @@
 tape_std_assign(struct tape_device *device)
 {
 	int                  rc;
-	struct timer_list    timeout;
 	struct tape_request *request;
 
 	request = tape_alloc_request(2, 11);
@@ -70,7 +69,7 @@
 	 * So we set up a timeout for this call.
 	 */
 	timer_setup(&request->timer, tape_std_assign_timeout, 0);
-	mod_timer(&timeout, jiffies + 2 * HZ);
+	mod_timer(&request->timer, jiffies + msecs_to_jiffies(2000));
 
 	rc = tape_do_io_interruptible(device, request);
 
diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h
index 53ec8e2..dcc63ff 100644
--- a/drivers/s390/char/tape_std.h
+++ b/drivers/s390/char/tape_std.h
@@ -101,7 +101,6 @@
 void tape_std_read_backward(struct tape_device *device,
 			    struct tape_request *request);
 struct tape_request *tape_std_write_block(struct tape_device *, size_t);
-void tape_std_check_locate(struct tape_device *, struct tape_request *);
 
 /* Some non-mtop commands. */
 int tape_std_assign(struct tape_device *);
@@ -131,19 +130,8 @@
 int tape_std_mtweof(struct tape_device *, int);
 
 /* Event handlers */
-void tape_std_default_handler(struct tape_device *);
-void tape_std_unexpect_uchk_handler(struct tape_device *);
-void tape_std_irq(struct tape_device *);
 void tape_std_process_eov(struct tape_device *);
 
-// the error recovery stuff:
-void tape_std_error_recovery(struct tape_device *);
-void tape_std_error_recovery_has_failed(struct tape_device *,int error_id);
-void tape_std_error_recovery_succeded(struct tape_device *);
-void tape_std_error_recovery_do_retry(struct tape_device *);
-void tape_std_error_recovery_read_opposite(struct tape_device *);
-void tape_std_error_recovery_HWBUG(struct tape_device *, int condno);
-
 /* S390 tape types */
 enum s390_tape_type {
         tape_3480,
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 98d7fc1..aec996d 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -556,8 +556,9 @@
  * Pass input line to tty.
  */
 static void
-tty3270_read_tasklet(struct raw3270_request *rrq)
+tty3270_read_tasklet(unsigned long data)
 {
+	struct raw3270_request *rrq = (struct raw3270_request *)data;
 	static char kreset_data = TW_KR;
 	struct tty3270 *tp = container_of(rrq->view, struct tty3270, view);
 	char *input;
@@ -652,8 +653,9 @@
  * Hang up the tty
  */
 static void
-tty3270_hangup_tasklet(struct tty3270 *tp)
+tty3270_hangup_tasklet(unsigned long data)
 {
+	struct tty3270 *tp = (struct tty3270 *)data;
 	tty_port_tty_hangup(&tp->port, true);
 	raw3270_put_view(&tp->view);
 }
@@ -752,11 +754,9 @@
 
 	tty_port_init(&tp->port);
 	timer_setup(&tp->timer, tty3270_update, 0);
-	tasklet_init(&tp->readlet,
-		     (void (*)(unsigned long)) tty3270_read_tasklet,
+	tasklet_init(&tp->readlet, tty3270_read_tasklet,
 		     (unsigned long) tp->read);
-	tasklet_init(&tp->hanglet,
-		     (void (*)(unsigned long)) tty3270_hangup_tasklet,
+	tasklet_init(&tp->hanglet, tty3270_hangup_tasklet,
 		     (unsigned long) tp);
 	INIT_WORK(&tp->resize_work, tty3270_resize_work);
 
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 08f8124..1515fdc 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -1,8 +1,7 @@
 // SPDX-License-Identifier: GPL-1.0+
 /*
  * zcore module to export memory content and register sets for creating system
- * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
- * dump format as s390 standalone dumps.
+ * dumps on SCSI/NVMe disks (zfcp/nvme dump).
  *
  * For more information please refer to Documentation/s390/zfcpdump.rst
  *
@@ -16,7 +15,6 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/debugfs.h>
-#include <linux/memblock.h>
 
 #include <asm/asm-offsets.h>
 #include <asm/ipl.h>
@@ -33,8 +31,6 @@
 
 #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
 
-#define CHUNK_INFO_SIZE	34 /* 2 16-byte char, each followed by blank */
-
 enum arch_id {
 	ARCH_S390	= 0,
 	ARCH_S390X	= 1,
@@ -48,7 +44,6 @@
 static struct debug_info *zcore_dbf;
 static int hsa_available;
 static struct dentry *zcore_dir;
-static struct dentry *zcore_memmap_file;
 static struct dentry *zcore_reipl_file;
 static struct dentry *zcore_hsa_file;
 static struct ipl_parameter_block *zcore_ipl_block;
@@ -139,46 +134,6 @@
 	hsa_available = 0;
 }
 
-static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
-				 size_t count, loff_t *ppos)
-{
-	return simple_read_from_buffer(buf, count, ppos, filp->private_data,
-				       memblock.memory.cnt * CHUNK_INFO_SIZE);
-}
-
-static int zcore_memmap_open(struct inode *inode, struct file *filp)
-{
-	struct memblock_region *reg;
-	char *buf;
-	int i = 0;
-
-	buf = kcalloc(memblock.memory.cnt, CHUNK_INFO_SIZE, GFP_KERNEL);
-	if (!buf) {
-		return -ENOMEM;
-	}
-	for_each_memblock(memory, reg) {
-		sprintf(buf + (i++ * CHUNK_INFO_SIZE), "%016llx %016llx ",
-			(unsigned long long) reg->base,
-			(unsigned long long) reg->size);
-	}
-	filp->private_data = buf;
-	return nonseekable_open(inode, filp);
-}
-
-static int zcore_memmap_release(struct inode *inode, struct file *filp)
-{
-	kfree(filp->private_data);
-	return 0;
-}
-
-static const struct file_operations zcore_memmap_fops = {
-	.owner		= THIS_MODULE,
-	.read		= zcore_memmap_read,
-	.open		= zcore_memmap_open,
-	.release	= zcore_memmap_release,
-	.llseek		= no_llseek,
-};
-
 static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
 				 size_t count, loff_t *ppos)
 {
@@ -288,7 +243,7 @@
 	unsigned char arch;
 	int rc;
 
-	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+	if (!is_ipl_type_dump())
 		return -ENODATA;
 	if (OLDMEM_BASE)
 		return -ENODATA;
@@ -297,9 +252,16 @@
 	debug_register_view(zcore_dbf, &debug_sprintf_view);
 	debug_set_level(zcore_dbf, 6);
 
-	TRACE("devno:  %x\n", ipl_info.data.fcp.dev_id.devno);
-	TRACE("wwpn:   %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
-	TRACE("lun:    %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
+	if (ipl_info.type == IPL_TYPE_FCP_DUMP) {
+		TRACE("type:   fcp\n");
+		TRACE("devno:  %x\n", ipl_info.data.fcp.dev_id.devno);
+		TRACE("wwpn:   %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
+		TRACE("lun:    %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
+	} else if (ipl_info.type == IPL_TYPE_NVME_DUMP) {
+		TRACE("type:   nvme\n");
+		TRACE("fid:    %x\n", ipl_info.data.nvme.fid);
+		TRACE("nsid:   %x\n", ipl_info.data.nvme.nsid);
+	}
 
 	rc = sclp_sdias_init();
 	if (rc)
@@ -335,17 +297,11 @@
 		rc = -ENOMEM;
 		goto fail;
 	}
-	zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
-						NULL, &zcore_memmap_fops);
-	if (!zcore_memmap_file) {
-		rc = -ENOMEM;
-		goto fail_dir;
-	}
 	zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
 						NULL, &zcore_reipl_fops);
 	if (!zcore_reipl_file) {
 		rc = -ENOMEM;
-		goto fail_memmap_file;
+		goto fail_dir;
 	}
 	zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir,
 					     NULL, &zcore_hsa_fops);
@@ -357,8 +313,6 @@
 
 fail_reipl_file:
 	debugfs_remove(zcore_reipl_file);
-fail_memmap_file:
-	debugfs_remove(zcore_memmap_file);
 fail_dir:
 	debugfs_remove(zcore_dir);
 fail:
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index f6a8db0..a9235f1 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -5,7 +5,7 @@
 
 # The following is required for define_trace.h to find ./trace.h
 CFLAGS_trace.o := -I$(src)
-CFLAGS_vfio_ccw_fsm.o := -I$(src)
+CFLAGS_vfio_ccw_trace.o := -I$(src)
 
 obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
 	fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o
@@ -21,5 +21,5 @@
 obj-$(CONFIG_QDIO) += qdio.o
 
 vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \
-	vfio_ccw_async.o
+	vfio_ccw_async.o vfio_ccw_trace.o vfio_ccw_chp.o
 obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 9cebff8..4dd2eb6 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -399,12 +399,12 @@
 				sizeof(struct ccwdev_iter));
 }
 
-static const struct file_operations cio_ignore_proc_fops = {
-	.open    = cio_ignore_proc_open,
-	.read    = seq_read,
-	.llseek  = seq_lseek,
-	.release = seq_release_private,
-	.write   = cio_ignore_write,
+static const struct proc_ops cio_ignore_proc_ops = {
+	.proc_open	= cio_ignore_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release_private,
+	.proc_write	= cio_ignore_write,
 };
 
 static int
@@ -413,7 +413,7 @@
 	struct proc_dir_entry *entry;
 
 	entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL,
-			    &cio_ignore_proc_fops);
+			    &cio_ignore_proc_ops);
 	if (!entry)
 		return -ENOENT;
 	return 0;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index b42a937..483a9ec 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -485,79 +485,10 @@
 		gdrv->shutdown(gdev);
 }
 
-static int ccwgroup_pm_prepare(struct device *dev)
-{
-	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
-	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
-
-	/* Fail while device is being set online/offline. */
-	if (atomic_read(&gdev->onoff))
-		return -EAGAIN;
-
-	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
-		return 0;
-
-	return gdrv->prepare ? gdrv->prepare(gdev) : 0;
-}
-
-static void ccwgroup_pm_complete(struct device *dev)
-{
-	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
-	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
-
-	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
-		return;
-
-	if (gdrv->complete)
-		gdrv->complete(gdev);
-}
-
-static int ccwgroup_pm_freeze(struct device *dev)
-{
-	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
-	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
-
-	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
-		return 0;
-
-	return gdrv->freeze ? gdrv->freeze(gdev) : 0;
-}
-
-static int ccwgroup_pm_thaw(struct device *dev)
-{
-	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
-	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
-
-	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
-		return 0;
-
-	return gdrv->thaw ? gdrv->thaw(gdev) : 0;
-}
-
-static int ccwgroup_pm_restore(struct device *dev)
-{
-	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
-	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
-
-	if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
-		return 0;
-
-	return gdrv->restore ? gdrv->restore(gdev) : 0;
-}
-
-static const struct dev_pm_ops ccwgroup_pm_ops = {
-	.prepare = ccwgroup_pm_prepare,
-	.complete = ccwgroup_pm_complete,
-	.freeze = ccwgroup_pm_freeze,
-	.thaw = ccwgroup_pm_thaw,
-	.restore = ccwgroup_pm_restore,
-};
-
 static struct bus_type ccwgroup_bus_type = {
 	.name   = "ccwgroup",
 	.remove = ccwgroup_remove,
 	.shutdown = ccwgroup_shutdown,
-	.pm = &ccwgroup_pm_ops,
 };
 
 bool dev_is_ccwgroup(struct device *dev)
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 1fd982b..93e2278 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -135,7 +135,7 @@
 	struct channel_path *chp;
 	struct device *device;
 
-	device = container_of(kobj, struct device, kobj);
+	device = kobj_to_dev(kobj);
 	chp = to_channelpath(device);
 	if (chp->cmg == -1)
 		return 0;
@@ -184,7 +184,7 @@
 	struct device *device;
 	unsigned int size;
 
-	device = container_of(kobj, struct device, kobj);
+	device = kobj_to_dev(kobj);
 	chp = to_channelpath(device);
 	css = to_css(chp->dev.parent);
 
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 980a78d..93aa7ea 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -57,6 +57,7 @@
 	case 0x0104:
 		return -EINVAL;
 	case 0x0004:
+	case 0x0106:		/* "Wrong Channel Parm" for the op 0x003d */
 		return -EOPNOTSUPP;
 	case 0x000b:
 	case 0x0107:		/* "Channel busy" for the op 0x003d */
@@ -64,6 +65,8 @@
 	case 0x0100:
 	case 0x0102:
 		return -ENOMEM;
+	case 0x0108:		/* "HW limit exceeded" for the op 0x003d */
+		return -EUSERS;
 	default:
 		return -EIO;
 	}
@@ -180,11 +183,12 @@
  * @scssc: request and response block for SADC
  * @summary_indicator_addr: summary indicator address
  * @subchannel_indicator_addr: subchannel indicator address
+ * @isc: Interruption Subclass for this subchannel
  *
  * Returns 0 on success.
  */
 int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
-	      u64 summary_indicator_addr, u64 subchannel_indicator_addr)
+	      u64 summary_indicator_addr, u64 subchannel_indicator_addr, u8 isc)
 {
 	memset(scssc, 0, sizeof(*scssc));
 	scssc->request.length = 0x0fe0;
@@ -196,7 +200,7 @@
 
 	scssc->ks = PAGE_DEFAULT_KEY >> 4;
 	scssc->kc = PAGE_DEFAULT_KEY >> 4;
-	scssc->isc = QDIO_AIRQ_ISC;
+	scssc->isc = isc;
 	scssc->schid = schid;
 
 	/* enable the time delay disablement facility */
@@ -1110,7 +1114,7 @@
 	return ret;
 }
 
-int __init chsc_get_cssid(int idx)
+int __init chsc_get_cssid_iid(int idx, u8 *cssid, u8 *iid)
 {
 	struct {
 		struct chsc_header request;
@@ -1121,7 +1125,8 @@
 		u32 reserved2[3];
 		struct {
 			u8 cssid;
-			u32 : 24;
+			u8 iid;
+			u32 : 16;
 		} list[0];
 	} *sdcal_area;
 	int ret;
@@ -1147,8 +1152,10 @@
 	}
 
 	if ((addr_t) &sdcal_area->list[idx] <
-	    (addr_t) &sdcal_area->response + sdcal_area->response.length)
-		ret = sdcal_area->list[idx].cssid;
+	    (addr_t) &sdcal_area->response + sdcal_area->response.length) {
+		*cssid = sdcal_area->list[idx].cssid;
+		*iid = sdcal_area->list[idx].iid;
+	}
 	else
 		ret = -ENODEV;
 exit:
@@ -1256,6 +1263,27 @@
 	return (rr->response.code == 0x0001) ? 0 : -EIO;
 }
 
+int chsc_stzi(void *page, void *result, size_t size)
+{
+	struct {
+		struct chsc_header request;
+		unsigned int rsvd0[3];
+		struct chsc_header response;
+		char data[];
+	} *rr;
+	int rc;
+
+	memset(page, 0, PAGE_SIZE);
+	rr = page;
+	rr->request.length = 0x0010;
+	rr->request.code = 0x003e;
+	rc = chsc(rr);
+	if (rc)
+		return -EIO;
+	memcpy(result, &rr->data, size);
+	return (rr->response.code == 0x0001) ? 0 : -EIO;
+}
+
 int chsc_siosl(struct subchannel_id schid)
 {
 	struct {
@@ -1333,36 +1361,34 @@
 EXPORT_SYMBOL_GPL(chsc_scm_info);
 
 /**
- * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
+ * chsc_pnso() - Perform Network-Subchannel Operation
  * @schid:		id of the subchannel on which PNSO is performed
- * @brinfo_area:	request and response block for the operation
+ * @pnso_area:		request and response block for the operation
+ * @oc:			Operation Code
  * @resume_token:	resume token for multiblock response
  * @cnc:		Boolean change-notification control
  *
- * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
+ * pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
  *
  * Returns 0 on success.
  */
-int chsc_pnso_brinfo(struct subchannel_id schid,
-		struct chsc_pnso_area *brinfo_area,
-		struct chsc_brinfo_resume_token resume_token,
-		int cnc)
+int chsc_pnso(struct subchannel_id schid, struct chsc_pnso_area *pnso_area,
+	      u8 oc, struct chsc_pnso_resume_token resume_token, int cnc)
 {
-	memset(brinfo_area, 0, sizeof(*brinfo_area));
-	brinfo_area->request.length = 0x0030;
-	brinfo_area->request.code = 0x003d; /* network-subchannel operation */
-	brinfo_area->m	   = schid.m;
-	brinfo_area->ssid  = schid.ssid;
-	brinfo_area->sch   = schid.sch_no;
-	brinfo_area->cssid = schid.cssid;
-	brinfo_area->oc    = 0; /* Store-network-bridging-information list */
-	brinfo_area->resume_token = resume_token;
-	brinfo_area->n	   = (cnc != 0);
-	if (chsc(brinfo_area))
+	memset(pnso_area, 0, sizeof(*pnso_area));
+	pnso_area->request.length = 0x0030;
+	pnso_area->request.code = 0x003d; /* network-subchannel operation */
+	pnso_area->m	   = schid.m;
+	pnso_area->ssid  = schid.ssid;
+	pnso_area->sch	 = schid.sch_no;
+	pnso_area->cssid = schid.cssid;
+	pnso_area->oc	 = oc;
+	pnso_area->resume_token = resume_token;
+	pnso_area->n	   = (cnc != 0);
+	if (chsc(pnso_area))
 		return -EIO;
-	return chsc_error_from_response(brinfo_area->response.code);
+	return chsc_error_from_response(pnso_area->response.code);
 }
-EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
 
 int chsc_sgib(u32 origin)
 {
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index e57d68e..c2b83b6 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -163,7 +163,8 @@
 int chsc_get_channel_measurement_chars(struct channel_path *chp);
 int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
 int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
-	      u64 summary_indicator_addr, u64 subchannel_indicator_addr);
+	      u64 summary_indicator_addr, u64 subchannel_indicator_addr,
+	      u8 isc);
 int chsc_sgib(u32 origin);
 int chsc_error_from_response(int response);
 
@@ -204,54 +205,10 @@
 
 int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
 
-struct chsc_brinfo_resume_token {
-	u64 t1;
-	u64 t2;
-} __packed;
+int chsc_pnso(struct subchannel_id schid, struct chsc_pnso_area *pnso_area,
+	      u8 oc, struct chsc_pnso_resume_token resume_token, int cnc);
 
-struct chsc_brinfo_naihdr {
-	struct chsc_brinfo_resume_token resume_token;
-	u32:32;
-	u32 instance;
-	u32:24;
-	u8 naids;
-	u32 reserved[3];
-} __packed;
-
-struct chsc_pnso_area {
-	struct chsc_header request;
-	u8:2;
-	u8 m:1;
-	u8:5;
-	u8:2;
-	u8 ssid:2;
-	u8 fmt:4;
-	u16 sch;
-	u8:8;
-	u8 cssid;
-	u16:16;
-	u8 oc;
-	u32:24;
-	struct chsc_brinfo_resume_token resume_token;
-	u32 n:1;
-	u32:31;
-	u32 reserved[3];
-	struct chsc_header response;
-	u32:32;
-	struct chsc_brinfo_naihdr naihdr;
-	union {
-		struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0];
-		struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0];
-		struct qdio_brinfo_entry_l2	 l2[0];
-	} entries;
-} __packed __aligned(PAGE_SIZE);
-
-int chsc_pnso_brinfo(struct subchannel_id schid,
-		struct chsc_pnso_area *brinfo_area,
-		struct chsc_brinfo_resume_token resume_token,
-		int cnc);
-
-int __init chsc_get_cssid(int idx);
+int __init chsc_get_cssid_iid(int idx, u8 *cssid, u8 *iid);
 
 #ifdef CONFIG_SCM_BUS
 int scm_update_information(void);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 7950ac5..cf2c3c4 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -433,8 +433,8 @@
 	struct subchannel *sch = to_subchannel(dev);
 	struct pmcw *pmcw = &sch->schib.pmcw;
 
-	if ((pmcw->st == SUBCHANNEL_TYPE_IO ||
-	     pmcw->st == SUBCHANNEL_TYPE_MSG) && pmcw->dnv)
+	if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
+	    (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
 		return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
 				  pmcw->dev);
 	else
@@ -871,7 +871,7 @@
 	if (css_general_characteristics.mcss) {
 		css->global_pgid.pgid_high.ext_cssid.version = 0x80;
 		css->global_pgid.pgid_high.ext_cssid.cssid =
-			(css->cssid < 0) ? 0 : css->cssid;
+			css->id_valid ? css->cssid : 0;
 	} else {
 		css->global_pgid.pgid_high.cpu_addr = stap();
 	}
@@ -894,7 +894,7 @@
 {
 	struct channel_subsystem *css = to_css(dev);
 
-	if (css->cssid < 0)
+	if (!css->id_valid)
 		return -EINVAL;
 
 	return sprintf(buf, "%x\n", css->cssid);
@@ -992,7 +992,12 @@
 	css->device.dma_mask = &css->device.coherent_dma_mask;
 
 	mutex_init(&css->mutex);
-	css->cssid = chsc_get_cssid(nr);
+	ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
+	if (!ret) {
+		css->id_valid = true;
+		pr_info("Partition identifier %01x.%01x\n", css->cssid,
+			css->iid);
+	}
 	css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
 
 	ret = device_register(&css->device);
@@ -1367,20 +1372,6 @@
 }
 subsys_initcall_sync(channel_subsystem_init_sync);
 
-void channel_subsystem_reinit(void)
-{
-	struct channel_path *chp;
-	struct chp_id chpid;
-
-	chsc_enable_facility(CHSC_SDA_OC_MSS);
-	chp_id_for_each(&chpid) {
-		chp = chpid_to_chp(chpid);
-		if (chp)
-			chp_update_desc(chp);
-	}
-	cmf_reactivate();
-}
-
 #ifdef CONFIG_PROC_FS
 static ssize_t cio_settle_write(struct file *file, const char __user *buf,
 				size_t count, loff_t *ppos)
@@ -1394,18 +1385,17 @@
 	return ret ? ret : count;
 }
 
-static const struct file_operations cio_settle_proc_fops = {
-	.open = nonseekable_open,
-	.write = cio_settle_write,
-	.llseek = no_llseek,
+static const struct proc_ops cio_settle_proc_ops = {
+	.proc_open	= nonseekable_open,
+	.proc_write	= cio_settle_write,
+	.proc_lseek	= no_llseek,
 };
 
 static int __init cio_settle_init(void)
 {
 	struct proc_dir_entry *entry;
 
-	entry = proc_create("cio_settle", S_IWUSR, NULL,
-			    &cio_settle_proc_fops);
+	entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
 	if (!entry)
 		return -ENOMEM;
 	return 0;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 8d83290..3f322ea 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -115,7 +115,9 @@
 void css_update_ssd_info(struct subchannel *sch);
 
 struct channel_subsystem {
-	int cssid;
+	u8 cssid;
+	u8 iid;
+	bool id_valid; /* cssid,iid */
 	struct channel_path *chps[__MAX_CHPID + 1];
 	struct device device;
 	struct pgid global_pgid;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 23e9227..33280ca 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1267,7 +1267,7 @@
 		sch = to_subchannel(cdev->dev.parent);
 		if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
 			break;
-		/* fall through */
+		fallthrough;
 	case DEV_STATE_DISCONNECTED:
 		CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
 			      cdev->private->dev_id.ssid,
@@ -2096,7 +2096,7 @@
 	case CDEV_TODO_UNREG_EVAL:
 		if (!sch_is_pseudo_sch(sch))
 			css_schedule_eval(sch->schid);
-		/* fall-through */
+		fallthrough;
 	case CDEV_TODO_UNREG:
 		if (sch_is_pseudo_sch(sch))
 			ccw_device_unregister(cdev);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index f5c427e..853b6a8 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -96,7 +96,6 @@
 int ccw_device_offline(struct ccw_device *);
 void ccw_device_update_sense_data(struct ccw_device *);
 int ccw_device_test_sense_data(struct ccw_device *);
-void ccw_device_schedule_sch_unregister(struct ccw_device *);
 int ccw_purge_blacklisted(void);
 void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
 struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index ccecf6b..c533d1d 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -710,6 +710,114 @@
 }
 EXPORT_SYMBOL_GPL(ccw_device_get_schid);
 
+/**
+ * ccw_device_pnso() - Perform Network-Subchannel Operation
+ * @cdev:		device on which PNSO is performed
+ * @pnso_area:		request and response block for the operation
+ * @oc:			Operation Code
+ * @resume_token:	resume token for multiblock response
+ * @cnc:		Boolean change-notification control
+ *
+ * pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
+ *
+ * Returns 0 on success.
+ */
+int ccw_device_pnso(struct ccw_device *cdev,
+		    struct chsc_pnso_area *pnso_area, u8 oc,
+		    struct chsc_pnso_resume_token resume_token, int cnc)
+{
+	struct subchannel_id schid;
+
+	ccw_device_get_schid(cdev, &schid);
+	return chsc_pnso(schid, pnso_area, oc, resume_token, cnc);
+}
+EXPORT_SYMBOL_GPL(ccw_device_pnso);
+
+/**
+ * ccw_device_get_cssid() - obtain Channel Subsystem ID
+ * @cdev: device to obtain the CSSID for
+ * @cssid: The resulting Channel Subsystem ID
+ */
+int ccw_device_get_cssid(struct ccw_device *cdev, u8 *cssid)
+{
+	struct device *sch_dev = cdev->dev.parent;
+	struct channel_subsystem *css = to_css(sch_dev->parent);
+
+	if (css->id_valid)
+		*cssid = css->cssid;
+	return css->id_valid ? 0 : -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_cssid);
+
+/**
+ * ccw_device_get_iid() - obtain MIF-image ID
+ * @cdev: device to obtain the MIF-image ID for
+ * @iid: The resulting MIF-image ID
+ */
+int ccw_device_get_iid(struct ccw_device *cdev, u8 *iid)
+{
+	struct device *sch_dev = cdev->dev.parent;
+	struct channel_subsystem *css = to_css(sch_dev->parent);
+
+	if (css->id_valid)
+		*iid = css->iid;
+	return css->id_valid ? 0 : -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_iid);
+
+/**
+ * ccw_device_get_chpid() - obtain Channel Path ID
+ * @cdev: device to obtain the Channel Path ID for
+ * @chp_idx: Index of the channel path
+ * @chpid: The resulting Channel Path ID
+ */
+int ccw_device_get_chpid(struct ccw_device *cdev, int chp_idx, u8 *chpid)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	int mask;
+
+	if ((chp_idx < 0) || (chp_idx > 7))
+		return -EINVAL;
+	mask = 0x80 >> chp_idx;
+	if (!(sch->schib.pmcw.pim & mask))
+		return -ENODEV;
+
+	*chpid = sch->schib.pmcw.chpid[chp_idx];
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_chpid);
+
+/**
+ * ccw_device_get_chid() - obtain Channel ID associated with specified CHPID
+ * @cdev: device to obtain the Channel ID for
+ * @chp_idx: Index of the channel path
+ * @chid: The resulting Channel ID
+ */
+int ccw_device_get_chid(struct ccw_device *cdev, int chp_idx, u16 *chid)
+{
+	struct chp_id cssid_chpid;
+	struct channel_path *chp;
+	int rc;
+
+	chp_id_init(&cssid_chpid);
+	rc = ccw_device_get_chpid(cdev, chp_idx, &cssid_chpid.id);
+	if (rc)
+		return rc;
+	chp = chpid_to_chp(cssid_chpid);
+	if (!chp)
+		return -ENODEV;
+
+	mutex_lock(&chp->lock);
+	if (chp->desc_fmt1.flags & 0x10)
+		*chid = chp->desc_fmt1.chid;
+	else
+		rc = -ENODEV;
+	mutex_unlock(&chp->lock);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_chid);
+
 /*
  * Allocate zeroed dma coherent 31 bit addressable memory using
  * the subchannels dma pool. Maximal size of allocation supported
@@ -717,13 +825,23 @@
  */
 void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size)
 {
-	return cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size);
+	void *addr;
+
+	if (!get_device(&cdev->dev))
+		return NULL;
+	addr = cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size);
+	if (IS_ERR_OR_NULL(addr))
+		put_device(&cdev->dev);
+	return addr;
 }
 EXPORT_SYMBOL(ccw_device_dma_zalloc);
 
 void ccw_device_dma_free(struct ccw_device *cdev, void *cpu_addr, size_t size)
 {
+	if (!cpu_addr)
+		return;
 	cio_gp_dma_free(cdev->private->dma_pool, cpu_addr, size);
+	put_device(&cdev->dev);
 }
 EXPORT_SYMBOL(ccw_device_dma_free);
 
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index 835de44..45f9c07 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -13,7 +13,7 @@
 struct idset {
 	int num_ssid;
 	int num_id;
-	unsigned long bitmap[0];
+	unsigned long bitmap[];
 };
 
 static inline unsigned long bitmap_size(int num_ssid, int num_id)
@@ -59,18 +59,6 @@
 	return test_bit(ssid * set->num_id + id, set->bitmap);
 }
 
-static inline int idset_get_first(struct idset *set, int *ssid, int *id)
-{
-	int bitnum;
-
-	bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
-	if (bitnum >= set->num_ssid * set->num_id)
-		return 0;
-	*ssid = bitnum / set->num_id;
-	*id = bitnum % set->num_id;
-	return 1;
-}
-
 struct idset *idset_sch_new(void)
 {
 	return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 3b0a448..cd2df4f 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -15,7 +15,6 @@
 #define QDIO_BUSY_BIT_PATIENCE		(100 << 12)	/* 100 microseconds */
 #define QDIO_BUSY_BIT_RETRY_DELAY	10		/* 10 milliseconds */
 #define QDIO_BUSY_BIT_RETRIES		1000		/* = 10s retry time */
-#define QDIO_INPUT_THRESHOLD		(500 << 12)	/* 500 microseconds */
 
 enum qdio_irq_states {
 	QDIO_IRQ_STATE_INACTIVE,
@@ -82,6 +81,7 @@
 #define QDIO_SIGA_WRITE		0x00
 #define QDIO_SIGA_READ		0x01
 #define QDIO_SIGA_SYNC		0x02
+#define QDIO_SIGA_WRITEM	0x03
 #define QDIO_SIGA_WRITEQ	0x04
 #define QDIO_SIGA_QEBSM_FLAG	0x80
 
@@ -165,34 +165,21 @@
 } ____cacheline_aligned;
 
 struct qdio_queue_perf_stat {
-	/*
-	 * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
-	 * Since max. 127 SBALs are scanned reuse entry for 128 as queue full
-	 * aka 127 SBALs found.
-	 */
+	/* Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128. */
 	unsigned int nr_sbals[8];
 	unsigned int nr_sbal_error;
 	unsigned int nr_sbal_nop;
 	unsigned int nr_sbal_total;
 };
 
-enum qdio_queue_irq_states {
-	QDIO_QUEUE_IRQS_DISABLED,
+enum qdio_irq_poll_states {
+	QDIO_IRQ_DISABLED,
 };
 
 struct qdio_input_q {
-	/* input buffer acknowledgement flag */
-	int polling;
-	/* first ACK'ed buffer */
-	int ack_start;
-	/* how much sbals are acknowledged with qebsm */
-	int ack_count;
-	/* last time of noticing incoming data */
-	u64 timestamp;
-	/* upper-layer polling flag */
-	unsigned long queue_irq_state;
-	/* callback to start upper-layer polling */
-	void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
+	/* Batch of SBALs that we processed while polling the queue: */
+	unsigned int batch_start;
+	unsigned int batch_count;
 };
 
 struct qdio_output_q {
@@ -226,9 +213,6 @@
 	 */
 	int first_to_check;
 
-	/* beginning position for calling the program */
-	int first_to_kick;
-
 	/* number of buffers in use by the adapter */
 	atomic_t nr_buf_used;
 
@@ -252,13 +236,9 @@
 	/* input or output queue */
 	int is_input_q;
 
-	/* list of thinint input queues */
-	struct list_head entry;
-
 	/* upper-layer program handler */
 	qdio_handler_t (*handler);
 
-	struct dentry *debugfs_q;
 	struct qdio_irq *irq_ptr;
 	struct sl *sl;
 	/*
@@ -272,8 +252,8 @@
 	struct qib qib;
 	u32 *dsci;		/* address of device state change indicator */
 	struct ccw_device *cdev;
+	struct list_head entry;		/* list of thinint devices */
 	struct dentry *debugfs_dev;
-	struct dentry *debugfs_perf;
 
 	unsigned long int_parm;
 	struct subchannel_id schid;
@@ -301,6 +281,11 @@
 
 	struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
 	struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
+	unsigned int max_input_qs;
+	unsigned int max_output_qs;
+
+	void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
+	unsigned long poll_state;
 
 	debug_info_t *debug_area;
 	struct mutex setup_mutex;
@@ -317,13 +302,15 @@
 
 #define qperf(__qdev, __attr)	((__qdev)->perf_stat.(__attr))
 
-#define qperf_inc(__q, __attr)						\
+#define QDIO_PERF_STAT_INC(__irq, __attr)				\
 ({									\
-	struct qdio_irq *qdev = (__q)->irq_ptr;				\
+	struct qdio_irq *qdev = __irq;					\
 	if (qdev->perf_stat_enabled)					\
 		(qdev->perf_stat.__attr)++;				\
 })
 
+#define qperf_inc(__q, __attr)	QDIO_PERF_STAT_INC((__q)->irq_ptr, __attr)
+
 static inline void account_sbals_error(struct qdio_q *q, int count)
 {
 	q->q_stats.nr_sbal_error += count;
@@ -355,14 +342,10 @@
 	for (i = 0; i < irq_ptr->nr_output_qs &&	\
 		({ q = irq_ptr->output_qs[i]; 1; }); i++)
 
-#define prev_buf(bufnr)	\
-	((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
-#define next_buf(bufnr)	\
-	((bufnr + 1) & QDIO_MAX_BUFFERS_MASK)
-#define add_buf(bufnr, inc) \
-	((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
-#define sub_buf(bufnr, dec) \
-	((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
+#define add_buf(bufnr, inc)	QDIO_BUFNR((bufnr) + (inc))
+#define next_buf(bufnr)		add_buf(bufnr, 1)
+#define sub_buf(bufnr, dec)	QDIO_BUFNR((bufnr) - (dec))
+#define prev_buf(bufnr)		sub_buf(bufnr, 1)
 
 #define queue_irqs_enabled(q)			\
 	(test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
@@ -374,14 +357,11 @@
 /* prototypes for thin interrupt */
 int qdio_establish_thinint(struct qdio_irq *irq_ptr);
 void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
-void tiqdio_add_input_queues(struct qdio_irq *irq_ptr);
-void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr);
+void tiqdio_add_device(struct qdio_irq *irq_ptr);
+void tiqdio_remove_device(struct qdio_irq *irq_ptr);
 void tiqdio_inbound_processing(unsigned long q);
-int tiqdio_allocate_memory(void);
-void tiqdio_free_memory(void);
-int tiqdio_register_thinints(void);
-void tiqdio_unregister_thinints(void);
-void clear_nonshared_ind(struct qdio_irq *);
+int qdio_thinint_init(void);
+void qdio_thinint_exit(void);
 int test_nonshared_ind(struct qdio_irq *);
 
 /* prototypes for setup */
@@ -396,12 +376,11 @@
 int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
 			struct subchannel_id *schid,
 			struct qdio_ssqd_desc *data);
-int qdio_setup_irq(struct qdio_initialize *init_data);
-void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
-				struct ccw_device *cdev);
-void qdio_release_memory(struct qdio_irq *irq_ptr);
-int qdio_setup_create_sysfs(struct ccw_device *cdev);
-void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
+int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data);
+void qdio_shutdown_irq(struct qdio_irq *irq);
+void qdio_print_subchannel_info(struct qdio_irq *irq_ptr);
+void qdio_free_queues(struct qdio_irq *irq_ptr);
+void qdio_free_async_data(struct qdio_irq *irq_ptr);
 int qdio_setup_init(void);
 void qdio_setup_exit(void);
 int qdio_enable_async_operation(struct qdio_output_q *q);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 35410e6..863d17c 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -58,30 +58,16 @@
 	mutex_unlock(&qdio_dbf_list_mutex);
 }
 
-int qdio_allocate_dbf(struct qdio_initialize *init_data,
-		       struct qdio_irq *irq_ptr)
+int qdio_allocate_dbf(struct qdio_irq *irq_ptr)
 {
 	char text[QDIO_DBF_NAME_LEN];
 	struct qdio_dbf_entry *new_entry;
 
-	DBF_EVENT("qfmt:%1d", init_data->q_format);
-	DBF_HEX(init_data->adapter_name, 8);
-	DBF_EVENT("qpff%4x", init_data->qib_param_field_format);
-	DBF_HEX(&init_data->qib_param_field, sizeof(void *));
-	DBF_HEX(&init_data->input_slib_elements, sizeof(void *));
-	DBF_HEX(&init_data->output_slib_elements, sizeof(void *));
-	DBF_EVENT("niq:%1d noq:%1d", init_data->no_input_qs,
-		  init_data->no_output_qs);
-	DBF_HEX(&init_data->input_handler, sizeof(void *));
-	DBF_HEX(&init_data->output_handler, sizeof(void *));
-	DBF_HEX(&init_data->int_parm, sizeof(long));
-	DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *));
-	DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *));
 	DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
 
 	/* allocate trace view for the interface */
 	snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s",
-					dev_name(&init_data->cdev->dev));
+		 dev_name(&irq_ptr->cdev->dev));
 	irq_ptr->debug_area = qdio_get_dbf_entry(text);
 	if (irq_ptr->debug_area)
 		DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused");
@@ -124,13 +110,12 @@
 	seq_printf(m, "nr_used: %d  ftc: %d\n",
 		   atomic_read(&q->nr_buf_used), q->first_to_check);
 	if (q->is_input_q) {
-		seq_printf(m, "polling: %d  ack start: %d  ack count: %d\n",
-			   q->u.in.polling, q->u.in.ack_start,
-			   q->u.in.ack_count);
+		seq_printf(m, "batch start: %u  batch count: %u\n",
+			   q->u.in.batch_start, q->u.in.batch_count);
 		seq_printf(m, "DSCI: %x   IRQs disabled: %u\n",
 			   *(u8 *)q->irq_ptr->dsci,
-			   test_bit(QDIO_QUEUE_IRQS_DISABLED,
-			   &q->u.in.queue_irq_state));
+			   test_bit(QDIO_IRQ_DISABLED,
+				    &q->irq_ptr->poll_state));
 	}
 	seq_printf(m, "SBAL states:\n");
 	seq_printf(m, "|0      |8      |16     |24     |32     |40     |48     |56  63|\n");
@@ -180,7 +165,7 @@
 	}
 
 	seq_printf(m, "\n1          2..        4..        8..        "
-		   "16..       32..       64..       127\n");
+		   "16..       32..       64..       128\n");
 	for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
 		seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
 	seq_printf(m, "\nError      NOP        Total\n%-10u %-10u %-10u\n\n",
@@ -191,6 +176,23 @@
 
 DEFINE_SHOW_ATTRIBUTE(qstat);
 
+static int ssqd_show(struct seq_file *m, void *v)
+{
+	struct ccw_device *cdev = m->private;
+	struct qdio_ssqd_desc ssqd;
+	int rc;
+
+	rc = qdio_get_ssqd_desc(cdev, &ssqd);
+	if (rc)
+		return rc;
+
+	seq_hex_dump(m, "", DUMP_PREFIX_NONE, 16, 4, &ssqd, sizeof(ssqd),
+		     false);
+	return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(ssqd);
+
 static char *qperf_names[] = {
 	"Assumed adapter interrupts",
 	"QDIO interrupts",
@@ -285,53 +287,37 @@
 	.release = single_release,
 };
 
-static void setup_debugfs_entry(struct qdio_q *q)
+static void setup_debugfs_entry(struct dentry *parent, struct qdio_q *q)
 {
 	char name[QDIO_DEBUGFS_NAME_LEN];
 
 	snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
 		 q->is_input_q ? "input" : "output",
 		 q->nr);
-	q->debugfs_q = debugfs_create_file(name, 0444,
-				q->irq_ptr->debugfs_dev, q, &qstat_fops);
-	if (IS_ERR(q->debugfs_q))
-		q->debugfs_q = NULL;
+	debugfs_create_file(name, 0444, parent, q, &qstat_fops);
 }
 
-void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
+void qdio_setup_debug_entries(struct qdio_irq *irq_ptr)
 {
 	struct qdio_q *q;
 	int i;
 
-	irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev),
+	irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&irq_ptr->cdev->dev),
 						  debugfs_root);
-	if (IS_ERR(irq_ptr->debugfs_dev))
-		irq_ptr->debugfs_dev = NULL;
-
-	irq_ptr->debugfs_perf = debugfs_create_file("statistics",
-				S_IFREG | S_IRUGO | S_IWUSR,
-				irq_ptr->debugfs_dev, irq_ptr,
-				&debugfs_perf_fops);
-	if (IS_ERR(irq_ptr->debugfs_perf))
-		irq_ptr->debugfs_perf = NULL;
+	debugfs_create_file("statistics", S_IFREG | S_IRUGO | S_IWUSR,
+			    irq_ptr->debugfs_dev, irq_ptr, &debugfs_perf_fops);
+	debugfs_create_file("ssqd", 0444, irq_ptr->debugfs_dev, irq_ptr->cdev,
+			    &ssqd_fops);
 
 	for_each_input_queue(irq_ptr, q, i)
-		setup_debugfs_entry(q);
+		setup_debugfs_entry(irq_ptr->debugfs_dev, q);
 	for_each_output_queue(irq_ptr, q, i)
-		setup_debugfs_entry(q);
+		setup_debugfs_entry(irq_ptr->debugfs_dev, q);
 }
 
 void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr)
 {
-	struct qdio_q *q;
-	int i;
-
-	for_each_input_queue(irq_ptr, q, i)
-		debugfs_remove(q->debugfs_q);
-	for_each_output_queue(irq_ptr, q, i)
-		debugfs_remove(q->debugfs_q);
-	debugfs_remove(irq_ptr->debugfs_perf);
-	debugfs_remove(irq_ptr->debugfs_dev);
+	debugfs_remove_recursive(irq_ptr->debugfs_dev);
 }
 
 int __init qdio_debug_init(void)
@@ -353,7 +339,7 @@
 void qdio_debug_exit(void)
 {
 	qdio_clear_dbf_list();
-	debugfs_remove(debugfs_root);
+	debugfs_remove_recursive(debugfs_root);
 	debug_unregister(qdio_dbf_setup);
 	debug_unregister(qdio_dbf_error);
 }
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index f85f5fa..0dfba08 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -64,10 +64,8 @@
 	debug_event(dev->debug_area, level, addr, len);
 }
 
-int qdio_allocate_dbf(struct qdio_initialize *init_data,
-		       struct qdio_irq *irq_ptr);
-void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
-			      struct ccw_device *cdev);
+int qdio_allocate_dbf(struct qdio_irq *irq_ptr);
+void qdio_setup_debug_entries(struct qdio_irq *irq_ptr);
 void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr);
 int qdio_debug_init(void);
 void qdio_debug_exit(void);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 5b63c50..3e29c26 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -131,7 +131,7 @@
 	case 96:
 		/* not all buffers processed */
 		qperf_inc(q, eqbs_partial);
-		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
 			tmp_count);
 		return count - tmp_count;
 	case 97:
@@ -143,7 +143,7 @@
 		DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
 		DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 		q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
-			   q->first_to_kick, count, q->irq_ptr->int_parm);
+			   q->first_to_check, count, q->irq_ptr->int_parm);
 		return 0;
 	}
 }
@@ -191,7 +191,7 @@
 		DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
 		DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 		q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
-			   q->first_to_kick, count, q->irq_ptr->int_parm);
+			   q->first_to_check, count, q->irq_ptr->int_parm);
 		return 0;
 	}
 }
@@ -254,10 +254,17 @@
 	if (is_qebsm(q))
 		return qdio_do_sqbs(q, state, bufnr, count);
 
+	/* Ensure that all preceding changes to the SBALs are visible: */
+	mb();
+
 	for (i = 0; i < count; i++) {
-		xchg(&q->slsb.val[bufnr], state);
+		WRITE_ONCE(q->slsb.val[bufnr], state);
 		bufnr = next_buf(bufnr);
 	}
+
+	/* Make our SLSB changes visible: */
+	mb();
+
 	return count;
 }
 
@@ -310,18 +317,19 @@
 		return qdio_siga_sync(q, q->mask, 0);
 }
 
-static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
-	unsigned long aob)
+static int qdio_siga_output(struct qdio_q *q, unsigned int count,
+			    unsigned int *busy_bit, unsigned long aob)
 {
 	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 	unsigned int fc = QDIO_SIGA_WRITE;
 	u64 start_time = 0;
 	int retries = 0, cc;
-	unsigned long laob = 0;
 
-	if (aob) {
-		fc = QDIO_SIGA_WRITEQ;
-		laob = aob;
+	if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
+		if (count > 1)
+			fc = QDIO_SIGA_WRITEM;
+		else if (aob)
+			fc = QDIO_SIGA_WRITEQ;
 	}
 
 	if (is_qebsm(q)) {
@@ -329,7 +337,7 @@
 		fc |= QDIO_SIGA_QEBSM_FLAG;
 	}
 again:
-	cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
+	cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
 
 	/* hipersocket busy condition */
 	if (unlikely(*busy_bit)) {
@@ -392,40 +400,26 @@
 
 static inline void qdio_stop_polling(struct qdio_q *q)
 {
-	if (!q->u.in.polling)
+	if (!q->u.in.batch_count)
 		return;
 
-	q->u.in.polling = 0;
 	qperf_inc(q, stop_polling);
 
 	/* show the card that we are not polling anymore */
-	if (is_qebsm(q)) {
-		set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
-			       q->u.in.ack_count);
-		q->u.in.ack_count = 0;
-	} else
-		set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
+	set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
+		       q->u.in.batch_count);
+	q->u.in.batch_count = 0;
 }
 
 static inline void account_sbals(struct qdio_q *q, unsigned int count)
 {
-	int pos;
-
 	q->q_stats.nr_sbal_total += count;
-	if (count == QDIO_MAX_BUFFERS_MASK) {
-		q->q_stats.nr_sbals[7]++;
-		return;
-	}
-	pos = ilog2(count);
-	q->q_stats.nr_sbals[pos]++;
+	q->q_stats.nr_sbals[ilog2(count)]++;
 }
 
 static void process_buffer_error(struct qdio_q *q, unsigned int start,
 				 int count)
 {
-	unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
-					SLSB_P_OUTPUT_NOT_INIT;
-
 	q->qdio_error = QDIO_ERROR_SLSB_STATE;
 
 	/* special handling for no target buffer empty */
@@ -433,7 +427,7 @@
 	    q->sbal[start]->element[15].sflags == 0x10) {
 		qperf_inc(q, target_full);
 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
-		goto set;
+		return;
 	}
 
 	DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
@@ -442,59 +436,18 @@
 	DBF_ERROR("F14:%2x F15:%2x",
 		  q->sbal[start]->element[14].sflags,
 		  q->sbal[start]->element[15].sflags);
-
-set:
-	/*
-	 * Interrupts may be avoided as long as the error is present
-	 * so change the buffer state immediately to avoid starvation.
-	 */
-	set_buf_states(q, start, state, count);
 }
 
-static inline void inbound_primed(struct qdio_q *q, unsigned int start,
-				  int count)
+static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
+				       int count, bool auto_ack)
 {
-	int new;
+	/* ACK the newest SBAL: */
+	if (!auto_ack)
+		set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
 
-	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count);
-
-	/* for QEBSM the ACK was already set by EQBS */
-	if (is_qebsm(q)) {
-		if (!q->u.in.polling) {
-			q->u.in.polling = 1;
-			q->u.in.ack_count = count;
-			q->u.in.ack_start = start;
-			return;
-		}
-
-		/* delete the previous ACK's */
-		set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
-			       q->u.in.ack_count);
-		q->u.in.ack_count = count;
-		q->u.in.ack_start = start;
-		return;
-	}
-
-	/*
-	 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
-	 * or by the next inbound run.
-	 */
-	new = add_buf(start, count - 1);
-	if (q->u.in.polling) {
-		/* reset the previous ACK but first set the new one */
-		set_buf_state(q, new, SLSB_P_INPUT_ACK);
-		set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
-	} else {
-		q->u.in.polling = 1;
-		set_buf_state(q, new, SLSB_P_INPUT_ACK);
-	}
-
-	q->u.in.ack_start = new;
-	count--;
-	if (!count)
-		return;
-	/* need to change ALL buffers to get more interrupts */
-	set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count);
+	if (!q->u.in.batch_count)
+		q->u.in.batch_start = start;
+	q->u.in.batch_count += count;
 }
 
 static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
@@ -504,11 +457,7 @@
 
 	q->timestamp = get_tod_clock_fast();
 
-	/*
-	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
-	 * would return 0.
-	 */
-	count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+	count = atomic_read(&q->nr_buf_used);
 	if (!count)
 		return 0;
 
@@ -522,43 +471,46 @@
 
 	switch (state) {
 	case SLSB_P_INPUT_PRIMED:
-		inbound_primed(q, start, count);
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
+			      count);
+
+		inbound_handle_work(q, start, count, is_qebsm(q));
 		if (atomic_sub_return(count, &q->nr_buf_used) == 0)
 			qperf_inc(q, inbound_queue_full);
 		if (q->irq_ptr->perf_stat_enabled)
 			account_sbals(q, count);
 		return count;
 	case SLSB_P_INPUT_ERROR:
+		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
+			      count);
+
 		process_buffer_error(q, start, count);
+		inbound_handle_work(q, start, count, false);
 		if (atomic_sub_return(count, &q->nr_buf_used) == 0)
 			qperf_inc(q, inbound_queue_full);
 		if (q->irq_ptr->perf_stat_enabled)
 			account_sbals_error(q, count);
 		return count;
 	case SLSB_CU_INPUT_EMPTY:
-	case SLSB_P_INPUT_NOT_INIT:
-	case SLSB_P_INPUT_ACK:
 		if (q->irq_ptr->perf_stat_enabled)
 			q->q_stats.nr_sbal_nop++;
 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
 			      q->nr, start);
 		return 0;
+	case SLSB_P_INPUT_NOT_INIT:
+	case SLSB_P_INPUT_ACK:
+		/* We should never see this state, throw a WARN: */
 	default:
-		WARN_ON_ONCE(1);
+		dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
+			      "found state %#x at index %u on queue %u\n",
+			      state, start, q->nr);
 		return 0;
 	}
 }
 
 static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
 {
-	int count;
-
-	count = get_inbound_buffer_frontier(q, start);
-
-	if (count && !is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
-		q->u.in.timestamp = get_tod_clock();
-
-	return count;
+	return get_inbound_buffer_frontier(q, start);
 }
 
 static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
@@ -576,42 +528,7 @@
 		/* more work coming */
 		return 0;
 
-	if (is_thinint_irq(q->irq_ptr))
-		return 1;
-
-	/* don't poll under z/VM */
-	if (MACHINE_IS_VM)
-		return 1;
-
-	/*
-	 * At this point we know, that inbound first_to_check
-	 * has (probably) not moved (see qdio_inbound_processing).
-	 */
-	if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
-		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", start);
-		return 1;
-	} else
-		return 0;
-}
-
-static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
-{
-	unsigned char state = 0;
-	int j, b = start;
-
-	for (j = 0; j < count; ++j) {
-		get_buf_state(q, b, &state, 0);
-		if (state == SLSB_P_OUTPUT_PENDING) {
-			struct qaob *aob = q->u.out.aobs[b];
-			if (aob == NULL)
-				continue;
-
-			q->u.out.sbal_state[b].flags |=
-				QDIO_OUTBUF_STATE_FLAG_PENDING;
-			q->u.out.aobs[b] = NULL;
-		}
-		b = next_buf(b);
-	}
+	return 1;
 }
 
 static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
@@ -633,10 +550,9 @@
 	return phys_aob;
 }
 
-static void qdio_kick_handler(struct qdio_q *q, unsigned int count)
+static void qdio_kick_handler(struct qdio_q *q, unsigned int start,
+			      unsigned int count)
 {
-	int start = q->first_to_kick;
-
 	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
 		return;
 
@@ -653,7 +569,6 @@
 		   q->irq_ptr->int_parm);
 
 	/* for the next time */
-	q->first_to_kick = add_buf(start, count);
 	q->qdio_error = 0;
 }
 
@@ -677,9 +592,9 @@
 	if (count == 0)
 		return;
 
+	qdio_kick_handler(q, start, count);
 	start = add_buf(start, count);
 	q->first_to_check = start;
-	qdio_kick_handler(q, count);
 
 	if (!qdio_inbound_q_done(q, start)) {
 		/* means poll time is not yet over */
@@ -705,6 +620,19 @@
 	__qdio_inbound_processing(q);
 }
 
+static void qdio_check_pending(struct qdio_q *q, unsigned int index)
+{
+	unsigned char state;
+
+	if (get_buf_state(q, index, &state, 0) > 0 &&
+	    state == SLSB_P_OUTPUT_PENDING &&
+	    q->u.out.aobs[index]) {
+		q->u.out.sbal_state[index].flags |=
+			QDIO_OUTBUF_STATE_FLAG_PENDING;
+		q->u.out.aobs[index] = NULL;
+	}
+}
+
 static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
 {
 	unsigned char state = 0;
@@ -751,11 +679,14 @@
 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
 			      q->nr);
 		return 0;
-	case SLSB_P_OUTPUT_NOT_INIT:
 	case SLSB_P_OUTPUT_HALTED:
 		return 0;
+	case SLSB_P_OUTPUT_NOT_INIT:
+		/* We should never see this state, throw a WARN: */
 	default:
-		WARN_ON_ONCE(1);
+		dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
+			      "found state %#x at index %u on queue %u\n",
+			      state, start, q->nr);
 		return 0;
 	}
 }
@@ -774,14 +705,20 @@
 
 	if (count) {
 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
-		if (q->u.out.use_cq)
-			qdio_handle_aobs(q, start, count);
+
+		if (q->u.out.use_cq) {
+			unsigned int i;
+
+			for (i = 0; i < count; i++)
+				qdio_check_pending(q, QDIO_BUFNR(start + i));
+		}
 	}
 
 	return count;
 }
 
-static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
+static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
+				unsigned long aob)
 {
 	int retries = 0, cc;
 	unsigned int busy_bit;
@@ -793,7 +730,7 @@
 retry:
 	qperf_inc(q, siga_write);
 
-	cc = qdio_siga_output(q, &busy_bit, aob);
+	cc = qdio_siga_output(q, count, &busy_bit, aob);
 	switch (cc) {
 	case 0:
 		break;
@@ -834,7 +771,7 @@
 	count = qdio_outbound_q_moved(q, start);
 	if (count) {
 		q->first_to_check = add_buf(start, count);
-		qdio_kick_handler(q, count);
+		qdio_kick_handler(q, start, count);
 	}
 
 	if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
@@ -888,47 +825,17 @@
 			qdio_tasklet_schedule(out);
 }
 
-static void __tiqdio_inbound_processing(struct qdio_q *q)
+void tiqdio_inbound_processing(unsigned long data)
 {
-	unsigned int start = q->first_to_check;
-	int count;
+	struct qdio_q *q = (struct qdio_q *)data;
 
-	qperf_inc(q, tasklet_inbound);
 	if (need_siga_sync(q) && need_siga_sync_after_ai(q))
 		qdio_sync_queues(q);
 
 	/* The interrupt could be caused by a PCI request: */
 	qdio_check_outbound_pci_queues(q->irq_ptr);
 
-	count = qdio_inbound_q_moved(q, start);
-	if (count == 0)
-		return;
-
-	start = add_buf(start, count);
-	q->first_to_check = start;
-	qdio_kick_handler(q, count);
-
-	if (!qdio_inbound_q_done(q, start)) {
-		qperf_inc(q, tasklet_inbound_resched);
-		if (!qdio_tasklet_schedule(q))
-			return;
-	}
-
-	qdio_stop_polling(q);
-	/*
-	 * We need to check again to not lose initiative after
-	 * resetting the ACK state.
-	 */
-	if (!qdio_inbound_q_done(q, start)) {
-		qperf_inc(q, tasklet_inbound_resched2);
-		qdio_tasklet_schedule(q);
-	}
-}
-
-void tiqdio_inbound_processing(unsigned long data)
-{
-	struct qdio_q *q = (struct qdio_q *)data;
-	__tiqdio_inbound_processing(q);
+	__qdio_inbound_processing(q);
 }
 
 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
@@ -958,19 +865,14 @@
 	if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
 		return;
 
-	for_each_input_queue(irq_ptr, q, i) {
-		if (q->u.in.queue_start_poll) {
-			/* skip if polling is enabled or already in work */
-			if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
-				     &q->u.in.queue_irq_state)) {
-				qperf_inc(q, int_discarded);
-				continue;
-			}
-			q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
-						 q->irq_ptr->int_parm);
-		} else {
+	if (irq_ptr->irq_poll) {
+		if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
+			irq_ptr->irq_poll(irq_ptr->cdev, irq_ptr->int_parm);
+		else
+			QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
+	} else {
+		for_each_input_queue(irq_ptr, q, i)
 			tasklet_schedule(&q->tasklet);
-		}
 	}
 
 	if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
@@ -985,12 +887,11 @@
 	}
 }
 
-static void qdio_handle_activate_check(struct ccw_device *cdev,
-				unsigned long intparm, int cstat, int dstat)
+static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
+				       unsigned long intparm, int cstat,
+				       int dstat)
 {
-	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
 	struct qdio_q *q;
-	int count;
 
 	DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
 	DBF_ERROR("intp :%lx", intparm);
@@ -1005,9 +906,8 @@
 		goto no_handler;
 	}
 
-	count = sub_buf(q->first_to_check, q->first_to_kick);
 	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
-		   q->nr, q->first_to_kick, count, irq_ptr->int_parm);
+		   q->nr, q->first_to_check, 0, irq_ptr->int_parm);
 no_handler:
 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
 	/*
@@ -1017,11 +917,9 @@
 	lgr_info_log();
 }
 
-static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
+static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
 				      int dstat)
 {
-	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
-
 	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
 
 	if (cstat)
@@ -1068,7 +966,7 @@
 
 	switch (irq_ptr->state) {
 	case QDIO_IRQ_STATE_INACTIVE:
-		qdio_establish_handle_irq(cdev, cstat, dstat);
+		qdio_establish_handle_irq(irq_ptr, cstat, dstat);
 		break;
 	case QDIO_IRQ_STATE_CLEANUP:
 		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
@@ -1080,7 +978,7 @@
 			return;
 		}
 		if (cstat || dstat)
-			qdio_handle_activate_check(cdev, intparm, cstat,
+			qdio_handle_activate_check(irq_ptr, intparm, cstat,
 						   dstat);
 		break;
 	case QDIO_IRQ_STATE_STOPPED:
@@ -1113,9 +1011,8 @@
 }
 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
 
-static void qdio_shutdown_queues(struct ccw_device *cdev)
+static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
 {
-	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
 	struct qdio_q *q;
 	int i;
 
@@ -1128,6 +1025,33 @@
 	}
 }
 
+static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
+{
+	struct ccw_device *cdev = irq->cdev;
+	int rc;
+
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP);
+	if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
+		rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
+	else
+		/* default behaviour is halt */
+		rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+	if (rc) {
+		DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no);
+		DBF_ERROR("rc:%4d", rc);
+		return rc;
+	}
+
+	wait_event_interruptible_timeout(cdev->private->wait_q,
+					 irq->state == QDIO_IRQ_STATE_INACTIVE ||
+					 irq->state == QDIO_IRQ_STATE_ERR,
+					 10 * HZ);
+
+	return 0;
+}
+
 /**
  * qdio_shutdown - shut down a qdio subchannel
  * @cdev: associated ccw device
@@ -1162,41 +1086,13 @@
 	 */
 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
 
-	tiqdio_remove_input_queues(irq_ptr);
-	qdio_shutdown_queues(cdev);
+	tiqdio_remove_device(irq_ptr);
+	qdio_shutdown_queues(irq_ptr);
 	qdio_shutdown_debug_entries(irq_ptr);
 
-	/* cleanup subchannel */
-	spin_lock_irq(get_ccwdev_lock(cdev));
-
-	if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
-		rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
-	else
-		/* default behaviour is halt */
-		rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
-	if (rc) {
-		DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
-		DBF_ERROR("rc:%4d", rc);
-		goto no_cleanup;
-	}
-
-	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
-	spin_unlock_irq(get_ccwdev_lock(cdev));
-	wait_event_interruptible_timeout(cdev->private->wait_q,
-		irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
-		irq_ptr->state == QDIO_IRQ_STATE_ERR,
-		10 * HZ);
-	spin_lock_irq(get_ccwdev_lock(cdev));
-
-no_cleanup:
+	rc = qdio_cancel_ccw(irq_ptr, how);
 	qdio_shutdown_thinint(irq_ptr);
-
-	/* restore interrupt handler */
-	if ((void *)cdev->handler == (void *)qdio_int_handler) {
-		cdev->handler = irq_ptr->orig_handler;
-		cdev->private->intparm = 0;
-	}
-	spin_unlock_irq(get_ccwdev_lock(cdev));
+	qdio_shutdown_irq(irq_ptr);
 
 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
 	mutex_unlock(&irq_ptr->setup_mutex);
@@ -1227,43 +1123,47 @@
 	cdev->private->qdio_data = NULL;
 	mutex_unlock(&irq_ptr->setup_mutex);
 
-	qdio_release_memory(irq_ptr);
+	qdio_free_async_data(irq_ptr);
+	qdio_free_queues(irq_ptr);
+	free_page((unsigned long) irq_ptr->qdr);
+	free_page(irq_ptr->chsc_page);
+	free_page((unsigned long) irq_ptr);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(qdio_free);
 
 /**
  * qdio_allocate - allocate qdio queues and associated data
- * @init_data: initialization data
+ * @cdev: associated ccw device
+ * @no_input_qs: allocate this number of Input Queues
+ * @no_output_qs: allocate this number of Output Queues
  */
-int qdio_allocate(struct qdio_initialize *init_data)
+int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
+		  unsigned int no_output_qs)
 {
 	struct subchannel_id schid;
 	struct qdio_irq *irq_ptr;
+	int rc = -ENOMEM;
 
-	ccw_device_get_schid(init_data->cdev, &schid);
+	ccw_device_get_schid(cdev, &schid);
 	DBF_EVENT("qallocate:%4x", schid.sch_no);
 
-	if ((init_data->no_input_qs && !init_data->input_handler) ||
-	    (init_data->no_output_qs && !init_data->output_handler))
-		return -EINVAL;
-
-	if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
-	    (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
-		return -EINVAL;
-
-	if ((!init_data->input_sbal_addr_array) ||
-	    (!init_data->output_sbal_addr_array))
+	if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
+	    no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
 		return -EINVAL;
 
 	/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
 	irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
 	if (!irq_ptr)
-		goto out_err;
+		return -ENOMEM;
 
+	irq_ptr->cdev = cdev;
 	mutex_init(&irq_ptr->setup_mutex);
-	if (qdio_allocate_dbf(init_data, irq_ptr))
-		goto out_rel;
+	if (qdio_allocate_dbf(irq_ptr))
+		goto err_dbf;
+
+	DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
+		      no_output_qs);
 
 	/*
 	 * Allocate a page for the chsc calls in qdio_establish.
@@ -1273,24 +1173,30 @@
 	 */
 	irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
 	if (!irq_ptr->chsc_page)
-		goto out_rel;
+		goto err_chsc;
 
 	/* qdr is used in ccw1.cda which is u32 */
 	irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
 	if (!irq_ptr->qdr)
-		goto out_rel;
+		goto err_qdr;
 
-	if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
-			     init_data->no_output_qs))
-		goto out_rel;
+	rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
+	if (rc)
+		goto err_queues;
 
-	init_data->cdev->private->qdio_data = irq_ptr;
+	INIT_LIST_HEAD(&irq_ptr->entry);
+	cdev->private->qdio_data = irq_ptr;
 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
 	return 0;
-out_rel:
-	qdio_release_memory(irq_ptr);
-out_err:
-	return -ENOMEM;
+
+err_queues:
+	free_page((unsigned long) irq_ptr->qdr);
+err_qdr:
+	free_page(irq_ptr->chsc_page);
+err_chsc:
+err_dbf:
+	free_page((unsigned long) irq_ptr);
+	return rc;
 }
 EXPORT_SYMBOL_GPL(qdio_allocate);
 
@@ -1316,33 +1222,62 @@
 	DBF_EVENT("use_cq:%d", use_cq);
 }
 
+static void qdio_trace_init_data(struct qdio_irq *irq,
+				 struct qdio_initialize *data)
+{
+	DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
+	DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
+	DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
+	DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
+	DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
+	DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
+		      data->no_output_qs);
+	DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
+	DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
+	DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
+	DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
+	DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
+		    DBF_ERR);
+}
+
 /**
  * qdio_establish - establish queues on a qdio subchannel
+ * @cdev: associated ccw device
  * @init_data: initialization data
  */
-int qdio_establish(struct qdio_initialize *init_data)
+int qdio_establish(struct ccw_device *cdev,
+		   struct qdio_initialize *init_data)
 {
-	struct ccw_device *cdev = init_data->cdev;
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
 	struct subchannel_id schid;
-	struct qdio_irq *irq_ptr;
+	long timeout;
 	int rc;
 
 	ccw_device_get_schid(cdev, &schid);
 	DBF_EVENT("qestablish:%4x", schid.sch_no);
 
-	irq_ptr = cdev->private->qdio_data;
 	if (!irq_ptr)
 		return -ENODEV;
 
+	if (init_data->no_input_qs > irq_ptr->max_input_qs ||
+	    init_data->no_output_qs > irq_ptr->max_output_qs)
+		return -EINVAL;
+
+	if ((init_data->no_input_qs && !init_data->input_handler) ||
+	    (init_data->no_output_qs && !init_data->output_handler))
+		return -EINVAL;
+
+	if (!init_data->input_sbal_addr_array ||
+	    !init_data->output_sbal_addr_array)
+		return -EINVAL;
+
 	mutex_lock(&irq_ptr->setup_mutex);
-	qdio_setup_irq(init_data);
+	qdio_trace_init_data(irq_ptr, init_data);
+	qdio_setup_irq(irq_ptr, init_data);
 
 	rc = qdio_establish_thinint(irq_ptr);
-	if (rc) {
-		mutex_unlock(&irq_ptr->setup_mutex);
-		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
-		return rc;
-	}
+	if (rc)
+		goto err_thinint;
 
 	/* establish q */
 	irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
@@ -1358,14 +1293,16 @@
 	if (rc) {
 		DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
 		DBF_ERROR("rc:%4x", rc);
-		mutex_unlock(&irq_ptr->setup_mutex);
-		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
-		return rc;
+		goto err_ccw_start;
 	}
 
-	wait_event_interruptible_timeout(cdev->private->wait_q,
-		irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
-		irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+	timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
+						   irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
+						   irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+	if (timeout <= 0) {
+		rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
+		goto err_ccw_timeout;
+	}
 
 	if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
 		mutex_unlock(&irq_ptr->setup_mutex);
@@ -1381,9 +1318,19 @@
 	qdio_init_buf_states(irq_ptr);
 
 	mutex_unlock(&irq_ptr->setup_mutex);
-	qdio_print_subchannel_info(irq_ptr, cdev);
-	qdio_setup_debug_entries(irq_ptr, cdev);
+	qdio_print_subchannel_info(irq_ptr);
+	qdio_setup_debug_entries(irq_ptr);
 	return 0;
+
+err_ccw_timeout:
+	qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
+err_ccw_start:
+	qdio_shutdown_thinint(irq_ptr);
+err_thinint:
+	qdio_shutdown_irq(irq_ptr);
+	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+	mutex_unlock(&irq_ptr->setup_mutex);
+	return rc;
 }
 EXPORT_SYMBOL_GPL(qdio_establish);
 
@@ -1393,14 +1340,13 @@
  */
 int qdio_activate(struct ccw_device *cdev)
 {
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
 	struct subchannel_id schid;
-	struct qdio_irq *irq_ptr;
 	int rc;
 
 	ccw_device_get_schid(cdev, &schid);
 	DBF_EVENT("qactivate:%4x", schid.sch_no);
 
-	irq_ptr = cdev->private->qdio_data;
 	if (!irq_ptr)
 		return -ENODEV;
 
@@ -1428,7 +1374,7 @@
 	}
 
 	if (is_thinint_irq(irq_ptr))
-		tiqdio_add_input_queues(irq_ptr);
+		tiqdio_add_device(irq_ptr);
 
 	/* wait for subchannel to become active */
 	msleep(5);
@@ -1448,25 +1394,6 @@
 }
 EXPORT_SYMBOL_GPL(qdio_activate);
 
-static inline int buf_in_between(int bufnr, int start, int count)
-{
-	int end = add_buf(start, count);
-
-	if (end > start) {
-		if (bufnr >= start && bufnr < end)
-			return 1;
-		else
-			return 0;
-	}
-
-	/* wrap-around case */
-	if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
-	    (bufnr < end))
-		return 1;
-	else
-		return 0;
-}
-
 /**
  * handle_inbound - reset processed input buffers
  * @q: queue containing the buffers
@@ -1477,38 +1404,18 @@
 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
 			  int bufnr, int count)
 {
-	int diff;
+	int overlap;
 
 	qperf_inc(q, inbound_call);
 
-	if (!q->u.in.polling)
-		goto set;
-
-	/* protect against stop polling setting an ACK for an emptied slsb */
-	if (count == QDIO_MAX_BUFFERS_PER_Q) {
-		/* overwriting everything, just delete polling status */
-		q->u.in.polling = 0;
-		q->u.in.ack_count = 0;
-		goto set;
-	} else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
-		if (is_qebsm(q)) {
-			/* partial overwrite, just update ack_start */
-			diff = add_buf(bufnr, count);
-			diff = sub_buf(diff, q->u.in.ack_start);
-			q->u.in.ack_count -= diff;
-			if (q->u.in.ack_count <= 0) {
-				q->u.in.polling = 0;
-				q->u.in.ack_count = 0;
-				goto set;
-			}
-			q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
-		}
-		else
-			/* the only ACK will be deleted, so stop polling */
-			q->u.in.polling = 0;
+	/* If any processed SBALs are returned to HW, adjust our tracking: */
+	overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
+			     q->u.in.batch_count);
+	if (overlap > 0) {
+		q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
+		q->u.in.batch_count -= overlap;
 	}
 
-set:
 	count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
 	atomic_add(count, &q->nr_buf_used);
 
@@ -1526,7 +1433,7 @@
  * @count: how many buffers are filled
  */
 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
-			   int bufnr, int count)
+			   unsigned int bufnr, unsigned int count)
 {
 	const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
 	unsigned char state = 0;
@@ -1549,13 +1456,10 @@
 	if (queue_type(q) == QDIO_IQDIO_QFMT) {
 		unsigned long phys_aob = 0;
 
-		/* One SIGA-W per buffer required for unicast HSI */
-		WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
-
-		if (q->u.out.use_cq)
+		if (q->u.out.use_cq && count == 1)
 			phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
 
-		rc = qdio_kick_outbound_q(q, phys_aob);
+		rc = qdio_kick_outbound_q(q, count, phys_aob);
 	} else if (need_siga_sync(q)) {
 		rc = qdio_siga_sync_q(q);
 	} else if (count < QDIO_MAX_BUFFERS_PER_Q &&
@@ -1564,7 +1468,7 @@
 		/* The previous buffer is not processed yet, tack on. */
 		qperf_inc(q, fast_requeue);
 	} else {
-		rc = qdio_kick_outbound_q(q, 0);
+		rc = qdio_kick_outbound_q(q, count, 0);
 	}
 
 	/* Let drivers implement their own completion scanning: */
@@ -1593,12 +1497,11 @@
 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
 	    int q_nr, unsigned int bufnr, unsigned int count)
 {
-	struct qdio_irq *irq_ptr;
+	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
 
 	if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
 		return -EINVAL;
 
-	irq_ptr = cdev->private->qdio_data;
 	if (!irq_ptr)
 		return -ENODEV;
 
@@ -1620,26 +1523,26 @@
 EXPORT_SYMBOL_GPL(do_QDIO);
 
 /**
- * qdio_start_irq - process input buffers
+ * qdio_start_irq - enable interrupt processing for the device
  * @cdev: associated ccw_device for the qdio subchannel
- * @nr: input queue number
  *
  * Return codes
  *   0 - success
  *   1 - irqs not started since new data is available
  */
-int qdio_start_irq(struct ccw_device *cdev, int nr)
+int qdio_start_irq(struct ccw_device *cdev)
 {
 	struct qdio_q *q;
 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+	unsigned int i;
 
 	if (!irq_ptr)
 		return -ENODEV;
-	q = irq_ptr->input_qs[nr];
 
-	clear_nonshared_ind(irq_ptr);
-	qdio_stop_polling(q);
-	clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
+	for_each_input_queue(irq_ptr, q, i)
+		qdio_stop_polling(q);
+
+	clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
 
 	/*
 	 * We need to check again to not lose initiative after
@@ -1647,13 +1550,16 @@
 	 */
 	if (test_nonshared_ind(irq_ptr))
 		goto rescan;
-	if (!qdio_inbound_q_done(q, q->first_to_check))
-		goto rescan;
+
+	for_each_input_queue(irq_ptr, q, i) {
+		if (!qdio_inbound_q_done(q, q->first_to_check))
+			goto rescan;
+	}
+
 	return 0;
 
 rescan:
-	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
-			     &q->u.in.queue_irq_state))
+	if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
 		return 0;
 	else
 		return 1;
@@ -1741,117 +1647,25 @@
 /**
  * qdio_stop_irq - disable interrupt processing for the device
  * @cdev: associated ccw_device for the qdio subchannel
- * @nr: input queue number
  *
  * Return codes
  *   0 - interrupts were already disabled
  *   1 - interrupts successfully disabled
  */
-int qdio_stop_irq(struct ccw_device *cdev, int nr)
+int qdio_stop_irq(struct ccw_device *cdev)
 {
-	struct qdio_q *q;
 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
 
 	if (!irq_ptr)
 		return -ENODEV;
-	q = irq_ptr->input_qs[nr];
 
-	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
-			     &q->u.in.queue_irq_state))
+	if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
 		return 0;
 	else
 		return 1;
 }
 EXPORT_SYMBOL(qdio_stop_irq);
 
-/**
- * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
- * @schid:		Subchannel ID.
- * @cnc:		Boolean Change-Notification Control
- * @response:		Response code will be stored at this address
- * @cb: 		Callback function will be executed for each element
- *			of the address list
- * @priv:		Pointer to pass to the callback function.
- *
- * Performs "Store-network-bridging-information list" operation and calls
- * the callback function for every entry in the list. If "change-
- * notification-control" is set, further changes in the address list
- * will be reported via the IPA command.
- */
-int qdio_pnso_brinfo(struct subchannel_id schid,
-		int cnc, u16 *response,
-		void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
-				void *entry),
-		void *priv)
-{
-	struct chsc_pnso_area *rr;
-	int rc;
-	u32 prev_instance = 0;
-	int isfirstblock = 1;
-	int i, size, elems;
-
-	rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
-	if (rr == NULL)
-		return -ENOMEM;
-	do {
-		/* on the first iteration, naihdr.resume_token will be zero */
-		rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
-		if (rc != 0 && rc != -EBUSY)
-			goto out;
-		if (rr->response.code != 1) {
-			rc = -EIO;
-			continue;
-		} else
-			rc = 0;
-
-		if (cb == NULL)
-			continue;
-
-		size = rr->naihdr.naids;
-		elems = (rr->response.length -
-				sizeof(struct chsc_header) -
-				sizeof(struct chsc_brinfo_naihdr)) /
-				size;
-
-		if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
-			/* Inform the caller that they need to scrap */
-			/* the data that was already reported via cb */
-				rc = -EAGAIN;
-				break;
-		}
-		isfirstblock = 0;
-		prev_instance = rr->naihdr.instance;
-		for (i = 0; i < elems; i++)
-			switch (size) {
-			case sizeof(struct qdio_brinfo_entry_l3_ipv6):
-				(*cb)(priv, l3_ipv6_addr,
-						&rr->entries.l3_ipv6[i]);
-				break;
-			case sizeof(struct qdio_brinfo_entry_l3_ipv4):
-				(*cb)(priv, l3_ipv4_addr,
-						&rr->entries.l3_ipv4[i]);
-				break;
-			case sizeof(struct qdio_brinfo_entry_l2):
-				(*cb)(priv, l2_addr_lnid,
-						&rr->entries.l2[i]);
-				break;
-			default:
-				WARN_ON_ONCE(1);
-				rc = -EIO;
-				goto out;
-			}
-	} while (rr->response.code == 0x0107 ||  /* channel busy */
-		  (rr->response.code == 1 && /* list stored */
-		   /* resume token is non-zero => list incomplete */
-		   (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
-	(*response) = rr->response.code;
-
-out:
-	free_page((unsigned long)rr);
-	return rc;
-}
-EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
-
 static int __init init_QDIO(void)
 {
 	int rc;
@@ -1862,16 +1676,11 @@
 	rc = qdio_setup_init();
 	if (rc)
 		goto out_debug;
-	rc = tiqdio_allocate_memory();
+	rc = qdio_thinint_init();
 	if (rc)
 		goto out_cache;
-	rc = tiqdio_register_thinints();
-	if (rc)
-		goto out_ti;
 	return 0;
 
-out_ti:
-	tiqdio_free_memory();
 out_cache:
 	qdio_setup_exit();
 out_debug:
@@ -1881,8 +1690,7 @@
 
 static void __exit exit_QDIO(void)
 {
-	tiqdio_unregister_thinints();
-	tiqdio_free_memory();
+	qdio_thinint_exit();
 	qdio_setup_exit();
 	qdio_debug_exit();
 }
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 9dc56aa..a5b2e16 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -9,6 +9,8 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 #include <linux/io.h>
+
+#include <asm/ebcdic.h>
 #include <asm/qdio.h>
 
 #include "cio.h"
@@ -135,6 +137,27 @@
 	}
 }
 
+static void __qdio_free_queues(struct qdio_q **queues, unsigned int count)
+{
+	struct qdio_q *q;
+	unsigned int i;
+
+	for (i = 0; i < count; i++) {
+		q = queues[i];
+		free_page((unsigned long) q->slib);
+		kmem_cache_free(qdio_q_cache, q);
+	}
+}
+
+void qdio_free_queues(struct qdio_irq *irq_ptr)
+{
+	__qdio_free_queues(irq_ptr->input_qs, irq_ptr->max_input_qs);
+	irq_ptr->max_input_qs = 0;
+
+	__qdio_free_queues(irq_ptr->output_qs, irq_ptr->max_output_qs);
+	irq_ptr->max_output_qs = 0;
+}
+
 static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
 {
 	struct qdio_q *q;
@@ -142,16 +165,18 @@
 
 	for (i = 0; i < nr_queues; i++) {
 		q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
-		if (!q)
+		if (!q) {
+			__qdio_free_queues(irq_ptr_qs, i);
 			return -ENOMEM;
+		}
 
 		q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
 		if (!q->slib) {
 			kmem_cache_free(qdio_q_cache, q);
+			__qdio_free_queues(irq_ptr_qs, i);
 			return -ENOMEM;
 		}
 		irq_ptr_qs[i] = q;
-		INIT_LIST_HEAD(&q->entry);
 	}
 	return 0;
 }
@@ -163,8 +188,16 @@
 	rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
 	if (rc)
 		return rc;
+
 	rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
-	return rc;
+	if (rc) {
+		__qdio_free_queues(irq_ptr->input_qs, nr_input_qs);
+		return rc;
+	}
+
+	irq_ptr->max_input_qs = nr_input_qs;
+	irq_ptr->max_output_qs = nr_output_qs;
+	return 0;
 }
 
 static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
@@ -180,7 +213,6 @@
 	q->mask = 1 << (31 - i);
 	q->nr = i;
 	q->handler = handler;
-	INIT_LIST_HEAD(&q->entry);
 }
 
 static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
@@ -215,8 +247,6 @@
 			 struct qdio_initialize *qdio_init)
 {
 	struct qdio_q *q;
-	struct qdio_buffer **input_sbal_array = qdio_init->input_sbal_addr_array;
-	struct qdio_buffer **output_sbal_array = qdio_init->output_sbal_addr_array;
 	struct qdio_outbuf_state *output_sbal_state_array =
 				  qdio_init->output_sbal_state_array;
 	int i;
@@ -226,11 +256,9 @@
 		setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
 
 		q->is_input_q = 1;
-		q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ?
-				qdio_init->queue_start_poll_array[i] : NULL;
 
-		setup_storage_lists(q, irq_ptr, input_sbal_array, i);
-		input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
+		setup_storage_lists(q, irq_ptr,
+				    qdio_init->input_sbal_addr_array[i], i);
 
 		if (is_thinint_irq(irq_ptr)) {
 			tasklet_init(&q->tasklet, tiqdio_inbound_processing,
@@ -249,8 +277,8 @@
 		output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
 
 		q->is_input_q = 0;
-		setup_storage_lists(q, irq_ptr, output_sbal_array, i);
-		output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
+		setup_storage_lists(q, irq_ptr,
+				    qdio_init->output_sbal_addr_array[i], i);
 
 		tasklet_init(&q->tasklet, qdio_outbound_processing,
 			     (unsigned long) q);
@@ -353,69 +381,46 @@
 	DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
 }
 
-void qdio_release_memory(struct qdio_irq *irq_ptr)
+void qdio_free_async_data(struct qdio_irq *irq_ptr)
 {
 	struct qdio_q *q;
 	int i;
 
-	/*
-	 * Must check queue array manually since irq_ptr->nr_input_queues /
-	 * irq_ptr->nr_input_queues may not yet be set.
-	 */
-	for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
-		q = irq_ptr->input_qs[i];
-		if (q) {
-			free_page((unsigned long) q->slib);
-			kmem_cache_free(qdio_q_cache, q);
-		}
-	}
-	for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
+	for (i = 0; i < irq_ptr->max_output_qs; i++) {
 		q = irq_ptr->output_qs[i];
-		if (q) {
-			if (q->u.out.use_cq) {
-				int n;
+		if (q->u.out.use_cq) {
+			unsigned int n;
 
-				for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
-					struct qaob *aob = q->u.out.aobs[n];
-					if (aob) {
-						qdio_release_aob(aob);
-						q->u.out.aobs[n] = NULL;
-					}
+			for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; n++) {
+				struct qaob *aob = q->u.out.aobs[n];
+
+				if (aob) {
+					qdio_release_aob(aob);
+					q->u.out.aobs[n] = NULL;
 				}
-
-				qdio_disable_async_operation(&q->u.out);
 			}
-			free_page((unsigned long) q->slib);
-			kmem_cache_free(qdio_q_cache, q);
+
+			qdio_disable_async_operation(&q->u.out);
 		}
 	}
-	free_page((unsigned long) irq_ptr->qdr);
-	free_page(irq_ptr->chsc_page);
-	free_page((unsigned long) irq_ptr);
 }
 
-static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
-				     struct qdio_q **irq_ptr_qs,
-				     int i, int nr)
+static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue)
 {
-	irq_ptr->qdr->qdf0[i + nr].sliba =
-		(unsigned long)irq_ptr_qs[i]->slib;
+	desc->sliba = virt_to_phys(queue->slib);
+	desc->sla = virt_to_phys(queue->sl);
+	desc->slsba = virt_to_phys(&queue->slsb);
 
-	irq_ptr->qdr->qdf0[i + nr].sla =
-		(unsigned long)irq_ptr_qs[i]->sl;
-
-	irq_ptr->qdr->qdf0[i + nr].slsba =
-		(unsigned long)&irq_ptr_qs[i]->slsb.val[0];
-
-	irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
-	irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
-	irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
-	irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
+	desc->akey = PAGE_DEFAULT_KEY >> 4;
+	desc->bkey = PAGE_DEFAULT_KEY >> 4;
+	desc->ckey = PAGE_DEFAULT_KEY >> 4;
+	desc->dkey = PAGE_DEFAULT_KEY >> 4;
 }
 
 static void setup_qdr(struct qdio_irq *irq_ptr,
 		      struct qdio_initialize *qdio_init)
 {
+	struct qdesfmt0 *desc = &irq_ptr->qdr->qdf0[0];
 	int i;
 
 	irq_ptr->qdr->qfmt = qdio_init->q_format;
@@ -424,15 +429,14 @@
 	irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
 	irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
 	irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
-	irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
+	irq_ptr->qdr->qiba = virt_to_phys(&irq_ptr->qib);
 	irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
 
 	for (i = 0; i < qdio_init->no_input_qs; i++)
-		__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
+		qdio_fill_qdr_desc(desc++, irq_ptr->input_qs[i]);
 
 	for (i = 0; i < qdio_init->no_output_qs; i++)
-		__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i,
-					 qdio_init->no_input_qs);
+		qdio_fill_qdr_desc(desc++, irq_ptr->output_qs[i]);
 }
 
 static void setup_qib(struct qdio_irq *irq_ptr,
@@ -450,13 +454,14 @@
 	if (init_data->no_output_qs)
 		irq_ptr->qib.osliba =
 			(unsigned long)(irq_ptr->output_qs[0]->slib);
-	memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
+	memcpy(irq_ptr->qib.ebcnam, dev_name(&irq_ptr->cdev->dev), 8);
+	ASCEBC(irq_ptr->qib.ebcnam, 8);
 }
 
-int qdio_setup_irq(struct qdio_initialize *init_data)
+int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
 {
+	struct ccw_device *cdev = irq_ptr->cdev;
 	struct ciw *ciw;
-	struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
 
 	memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
 	memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
@@ -464,8 +469,9 @@
 	memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
 	memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
 
-	irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
-	irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
+	irq_ptr->debugfs_dev = NULL;
+	irq_ptr->sch_token = irq_ptr->perf_stat_enabled = 0;
+	irq_ptr->state = QDIO_IRQ_STATE_INACTIVE;
 
 	/* wipes qib.ac, required by ar7063 */
 	memset(irq_ptr->qdr, 0, sizeof(struct qdr));
@@ -473,11 +479,17 @@
 	irq_ptr->int_parm = init_data->int_parm;
 	irq_ptr->nr_input_qs = init_data->no_input_qs;
 	irq_ptr->nr_output_qs = init_data->no_output_qs;
-	irq_ptr->cdev = init_data->cdev;
 	irq_ptr->scan_threshold = init_data->scan_threshold;
-	ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
+	ccw_device_get_schid(cdev, &irq_ptr->schid);
 	setup_queues(irq_ptr, init_data);
 
+	if (init_data->irq_poll) {
+		irq_ptr->irq_poll = init_data->irq_poll;
+		set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
+	} else {
+		irq_ptr->irq_poll = NULL;
+	}
+
 	setup_qib(irq_ptr, init_data);
 	set_impl_params(irq_ptr, init_data->qib_param_field_format,
 			init_data->qib_param_field,
@@ -489,37 +501,48 @@
 
 	/* qdr, qib, sls, slsbs, slibs, sbales are filled now */
 
+	/* set our IRQ handler */
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	irq_ptr->orig_handler = cdev->handler;
+	cdev->handler = qdio_int_handler;
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+
 	/* get qdio commands */
-	ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
+	ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE);
 	if (!ciw) {
 		DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
 		return -EINVAL;
 	}
 	irq_ptr->equeue = *ciw;
 
-	ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
+	ciw = ccw_device_get_ciw(cdev, CIW_TYPE_AQUEUE);
 	if (!ciw) {
 		DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
 		return -EINVAL;
 	}
 	irq_ptr->aqueue = *ciw;
 
-	/* set new interrupt handler */
-	spin_lock_irq(get_ccwdev_lock(irq_ptr->cdev));
-	irq_ptr->orig_handler = init_data->cdev->handler;
-	init_data->cdev->handler = qdio_int_handler;
-	spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev));
 	return 0;
 }
 
-void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
-				struct ccw_device *cdev)
+void qdio_shutdown_irq(struct qdio_irq *irq)
+{
+	struct ccw_device *cdev = irq->cdev;
+
+	/* restore IRQ handler */
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	cdev->handler = irq->orig_handler;
+	cdev->private->intparm = 0;
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+}
+
+void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
 {
 	char s[80];
 
 	snprintf(s, 80, "qdio: %s %s on SC %x using "
 		 "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
-		 dev_name(&cdev->dev),
+		 dev_name(&irq_ptr->cdev->dev),
 		 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
 			((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
 		 irq_ptr->schid.sch_no,
@@ -538,7 +561,7 @@
 int qdio_enable_async_operation(struct qdio_output_q *outq)
 {
 	outq->aobs = kcalloc(QDIO_MAX_BUFFERS_PER_Q, sizeof(struct qaob *),
-			     GFP_ATOMIC);
+			     GFP_KERNEL);
 	if (!outq->aobs) {
 		outq->use_cq = 0;
 		return -ENOMEM;
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index ddf780b..7a440e4 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -39,14 +39,6 @@
 static LIST_HEAD(tiq_list);
 static DEFINE_MUTEX(tiq_list_lock);
 
-/* Adapter interrupt definitions */
-static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating);
-
-static struct airq_struct tiqdio_airq = {
-	.handler = tiqdio_thinint_handler,
-	.isc = QDIO_AIRQ_ISC,
-};
-
 static struct indicator_t *q_indicators;
 
 u64 last_ai_time;
@@ -74,31 +66,20 @@
 	atomic_dec(&ind->count);
 }
 
-void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
+void tiqdio_add_device(struct qdio_irq *irq_ptr)
 {
 	mutex_lock(&tiq_list_lock);
-	list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
+	list_add_rcu(&irq_ptr->entry, &tiq_list);
 	mutex_unlock(&tiq_list_lock);
 }
 
-void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
+void tiqdio_remove_device(struct qdio_irq *irq_ptr)
 {
-	struct qdio_q *q;
-
-	q = irq_ptr->input_qs[0];
-	if (!q)
-		return;
-
 	mutex_lock(&tiq_list_lock);
-	list_del_rcu(&q->entry);
+	list_del_rcu(&irq_ptr->entry);
 	mutex_unlock(&tiq_list_lock);
 	synchronize_rcu();
-	INIT_LIST_HEAD(&q->entry);
-}
-
-static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
-{
-	return irq_ptr->nr_input_qs > 1;
+	INIT_LIST_HEAD(&irq_ptr->entry);
 }
 
 static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
@@ -106,26 +87,11 @@
 	return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
 }
 
-static inline int shared_ind(struct qdio_irq *irq_ptr)
-{
-	return references_shared_dsci(irq_ptr) ||
-		has_multiple_inq_on_dsci(irq_ptr);
-}
-
-void clear_nonshared_ind(struct qdio_irq *irq_ptr)
-{
-	if (!is_thinint_irq(irq_ptr))
-		return;
-	if (shared_ind(irq_ptr))
-		return;
-	xchg(irq_ptr->dsci, 0);
-}
-
 int test_nonshared_ind(struct qdio_irq *irq_ptr)
 {
 	if (!is_thinint_irq(irq_ptr))
 		return 0;
-	if (shared_ind(irq_ptr))
+	if (references_shared_dsci(irq_ptr))
 		return 0;
 	if (*irq_ptr->dsci)
 		return 1;
@@ -145,32 +111,24 @@
 	struct qdio_q *q;
 	int i;
 
-	if (!references_shared_dsci(irq) &&
-	    has_multiple_inq_on_dsci(irq))
+	if (!references_shared_dsci(irq))
 		xchg(irq->dsci, 0);
 
+	if (irq->irq_poll) {
+		if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq->poll_state))
+			irq->irq_poll(irq->cdev, irq->int_parm);
+		else
+			QDIO_PERF_STAT_INC(irq, int_discarded);
+
+		return;
+	}
+
 	for_each_input_queue(irq, q, i) {
-		if (q->u.in.queue_start_poll) {
-			/* skip if polling is enabled or already in work */
-			if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
-					     &q->u.in.queue_irq_state)) {
-				qperf_inc(q, int_discarded);
-				continue;
-			}
-
-			/* avoid dsci clear here, done after processing */
-			q->u.in.queue_start_poll(irq->cdev, q->nr,
-						 irq->int_parm);
-		} else {
-			if (!shared_ind(irq))
-				xchg(irq->dsci, 0);
-
-			/*
-			 * Call inbound processing but not directly
-			 * since that could starve other thinint queues.
-			 */
-			tasklet_schedule(&q->tasklet);
-		}
+		/*
+		 * Call inbound processing but not directly
+		 * since that could starve other thinint queues.
+		 */
+		tasklet_schedule(&q->tasklet);
 	}
 }
 
@@ -182,7 +140,7 @@
 static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
 {
 	u32 si_used = clear_shared_ind();
-	struct qdio_q *q;
+	struct qdio_irq *irq;
 
 	last_ai_time = S390_lowcore.int_clock;
 	inc_irq_stat(IRQIO_QAI);
@@ -190,12 +148,8 @@
 	/* protect tiq_list entries, only changed in activate or shutdown */
 	rcu_read_lock();
 
-	/* check for work on all inbound thinint queues */
-	list_for_each_entry_rcu(q, &tiq_list, entry) {
-		struct qdio_irq *irq;
-
+	list_for_each_entry_rcu(irq, &tiq_list, entry) {
 		/* only process queues from changed sets */
-		irq = q->irq_ptr;
 		if (unlikely(references_shared_dsci(irq))) {
 			if (!si_used)
 				continue;
@@ -204,11 +158,16 @@
 
 		tiqdio_call_inq_handlers(irq);
 
-		qperf_inc(q, adapter_int);
+		QDIO_PERF_STAT_INC(irq, adapter_int);
 	}
 	rcu_read_unlock();
 }
 
+static struct airq_struct tiqdio_airq = {
+	.handler = tiqdio_thinint_handler,
+	.isc = QDIO_AIRQ_ISC,
+};
+
 static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
 {
 	struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;
@@ -224,7 +183,7 @@
 	}
 
 	rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
-		       subchannel_indicator_addr);
+		       subchannel_indicator_addr, tiqdio_airq.isc);
 	if (rc) {
 		DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
 			  scssc->response.code);
@@ -238,34 +197,6 @@
 	return rc;
 }
 
-/* allocate non-shared indicators and shared indicator */
-int __init tiqdio_allocate_memory(void)
-{
-	q_indicators = kcalloc(TIQDIO_NR_INDICATORS,
-			       sizeof(struct indicator_t),
-			       GFP_KERNEL);
-	if (!q_indicators)
-		return -ENOMEM;
-	return 0;
-}
-
-void tiqdio_free_memory(void)
-{
-	kfree(q_indicators);
-}
-
-int __init tiqdio_register_thinints(void)
-{
-	int rc;
-
-	rc = register_adapter_interrupt(&tiqdio_airq);
-	if (rc) {
-		DBF_EVENT("RTI:%x", rc);
-		return rc;
-	}
-	return 0;
-}
-
 int qdio_establish_thinint(struct qdio_irq *irq_ptr)
 {
 	int rc;
@@ -293,8 +224,27 @@
 	put_indicator(irq_ptr->dsci);
 }
 
-void __exit tiqdio_unregister_thinints(void)
+int __init qdio_thinint_init(void)
+{
+	int rc;
+
+	q_indicators = kcalloc(TIQDIO_NR_INDICATORS, sizeof(struct indicator_t),
+			       GFP_KERNEL);
+	if (!q_indicators)
+		return -ENOMEM;
+
+	rc = register_adapter_interrupt(&tiqdio_airq);
+	if (rc) {
+		DBF_EVENT("RTI:%x", rc);
+		kfree(q_indicators);
+		return rc;
+	}
+	return 0;
+}
+
+void __exit qdio_thinint_exit(void)
 {
 	WARN_ON(!list_empty(&tiq_list));
 	unregister_adapter_interrupt(&tiqdio_airq);
+	kfree(q_indicators);
 }
diff --git a/drivers/s390/cio/vfio_ccw_chp.c b/drivers/s390/cio/vfio_ccw_chp.c
new file mode 100644
index 0000000..13b26a1
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_chp.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Channel path related status regions for vfio_ccw
+ *
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s): Farhan Ali <alifm@linux.ibm.com>
+ *            Eric Farman <farman@linux.ibm.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/vfio.h>
+#include "vfio_ccw_private.h"
+
+static ssize_t vfio_ccw_schib_region_read(struct vfio_ccw_private *private,
+					  char __user *buf, size_t count,
+					  loff_t *ppos)
+{
+	unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+	loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+	struct ccw_schib_region *region;
+	int ret;
+
+	if (pos + count > sizeof(*region))
+		return -EINVAL;
+
+	mutex_lock(&private->io_mutex);
+	region = private->region[i].data;
+
+	if (cio_update_schib(private->sch)) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	memcpy(region, &private->sch->schib, sizeof(*region));
+
+	if (copy_to_user(buf, (void *)region + pos, count)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ret = count;
+
+out:
+	mutex_unlock(&private->io_mutex);
+	return ret;
+}
+
+static ssize_t vfio_ccw_schib_region_write(struct vfio_ccw_private *private,
+					   const char __user *buf, size_t count,
+					   loff_t *ppos)
+{
+	return -EINVAL;
+}
+
+
+static void vfio_ccw_schib_region_release(struct vfio_ccw_private *private,
+					  struct vfio_ccw_region *region)
+{
+
+}
+
+static const struct vfio_ccw_regops vfio_ccw_schib_region_ops = {
+	.read = vfio_ccw_schib_region_read,
+	.write = vfio_ccw_schib_region_write,
+	.release = vfio_ccw_schib_region_release,
+};
+
+int vfio_ccw_register_schib_dev_regions(struct vfio_ccw_private *private)
+{
+	return vfio_ccw_register_dev_region(private,
+					    VFIO_REGION_SUBTYPE_CCW_SCHIB,
+					    &vfio_ccw_schib_region_ops,
+					    sizeof(struct ccw_schib_region),
+					    VFIO_REGION_INFO_FLAG_READ,
+					    private->schib_region);
+}
+
+static ssize_t vfio_ccw_crw_region_read(struct vfio_ccw_private *private,
+					char __user *buf, size_t count,
+					loff_t *ppos)
+{
+	unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+	loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+	struct ccw_crw_region *region;
+	struct vfio_ccw_crw *crw;
+	int ret;
+
+	if (pos + count > sizeof(*region))
+		return -EINVAL;
+
+	crw = list_first_entry_or_null(&private->crw,
+				       struct vfio_ccw_crw, next);
+
+	if (crw)
+		list_del(&crw->next);
+
+	mutex_lock(&private->io_mutex);
+	region = private->region[i].data;
+
+	if (crw)
+		memcpy(&region->crw, &crw->crw, sizeof(region->crw));
+
+	if (copy_to_user(buf, (void *)region + pos, count))
+		ret = -EFAULT;
+	else
+		ret = count;
+
+	region->crw = 0;
+
+	mutex_unlock(&private->io_mutex);
+
+	kfree(crw);
+
+	/* Notify the guest if more CRWs are on our queue */
+	if (!list_empty(&private->crw) && private->crw_trigger)
+		eventfd_signal(private->crw_trigger, 1);
+
+	return ret;
+}
+
+static ssize_t vfio_ccw_crw_region_write(struct vfio_ccw_private *private,
+					 const char __user *buf, size_t count,
+					 loff_t *ppos)
+{
+	return -EINVAL;
+}
+
+static void vfio_ccw_crw_region_release(struct vfio_ccw_private *private,
+					struct vfio_ccw_region *region)
+{
+
+}
+
+static const struct vfio_ccw_regops vfio_ccw_crw_region_ops = {
+	.read = vfio_ccw_crw_region_read,
+	.write = vfio_ccw_crw_region_write,
+	.release = vfio_ccw_crw_region_release,
+};
+
+int vfio_ccw_register_crw_dev_regions(struct vfio_ccw_private *private)
+{
+	return vfio_ccw_register_dev_region(private,
+					    VFIO_REGION_SUBTYPE_CCW_CRW,
+					    &vfio_ccw_crw_region_ops,
+					    sizeof(struct ccw_crw_region),
+					    VFIO_REGION_INFO_FLAG_READ,
+					    private->crw_region);
+}
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 9628e0f..8d1b277 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -8,6 +8,7 @@
  *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
  */
 
+#include <linux/ratelimit.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/iommu.h>
@@ -625,15 +626,16 @@
  * the target channel program from @orb->cmd.iova to the new ccwchain(s).
  *
  * Limitations:
- * 1. Supports only prefetch enabled mode.
- * 2. Supports idal(c64) ccw chaining.
- * 3. Supports 4k idaw.
+ * 1. Supports idal(c64) ccw chaining.
+ * 2. Supports 4k idaw.
  *
  * Returns:
  *   %0 on success and a negative error value on failure.
  */
 int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
 {
+	/* custom ratelimit used to avoid flood during guest IPL */
+	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1);
 	int ret;
 
 	/* this is an error in the caller */
@@ -641,11 +643,14 @@
 		return -EBUSY;
 
 	/*
-	 * XXX:
-	 * Only support prefetch enable mode now.
+	 * We only support prefetching the channel program. We assume all channel
+	 * programs executed by supported guests likewise support prefetching.
+	 * Executing a channel program that does not specify prefetching will
+	 * typically not cause an error, but a warning is issued to help identify
+	 * the problem if something does break.
 	 */
-	if (!orb->cmd.pfch)
-		return -EOPNOTSUPP;
+	if (!orb->cmd.pfch && __ratelimit(&ratelimit_state))
+		dev_warn(mdev, "Prefetching channel program even though prefetch not specified in ORB");
 
 	INIT_LIST_HEAD(&cp->ccwchain_list);
 	memcpy(&cp->orb, orb, sizeof(*orb));
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index 7cdc380..ba31240 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -15,6 +15,7 @@
 #include <asm/scsw.h>
 
 #include "orb.h"
+#include "vfio_ccw_trace.h"
 
 /*
  * Max length for ccw chain.
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index fd590d1..9b61e9b 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -19,6 +19,7 @@
 
 #include <asm/isc.h>
 
+#include "chp.h"
 #include "ioasm.h"
 #include "css.h"
 #include "vfio_ccw_private.h"
@@ -26,6 +27,8 @@
 struct workqueue_struct *vfio_ccw_work_q;
 static struct kmem_cache *vfio_ccw_io_region;
 static struct kmem_cache *vfio_ccw_cmd_region;
+static struct kmem_cache *vfio_ccw_schib_region;
+static struct kmem_cache *vfio_ccw_crw_region;
 
 debug_info_t *vfio_ccw_debug_msg_id;
 debug_info_t *vfio_ccw_debug_trace_id;
@@ -113,6 +116,16 @@
 		eventfd_signal(private->io_trigger, 1);
 }
 
+static void vfio_ccw_crw_todo(struct work_struct *work)
+{
+	struct vfio_ccw_private *private;
+
+	private = container_of(work, struct vfio_ccw_private, crw_work);
+
+	if (!list_empty(&private->crw) && private->crw_trigger)
+		eventfd_signal(private->crw_trigger, 1);
+}
+
 /*
  * Css driver callbacks
  */
@@ -124,6 +137,18 @@
 	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
 }
 
+static void vfio_ccw_free_regions(struct vfio_ccw_private *private)
+{
+	if (private->crw_region)
+		kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
+	if (private->schib_region)
+		kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
+	if (private->cmd_region)
+		kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
+	if (private->io_region)
+		kmem_cache_free(vfio_ccw_io_region, private->io_region);
+}
+
 static int vfio_ccw_sch_probe(struct subchannel *sch)
 {
 	struct pmcw *pmcw = &sch->schib.pmcw;
@@ -155,6 +180,18 @@
 	if (!private->cmd_region)
 		goto out_free;
 
+	private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
+						  GFP_KERNEL | GFP_DMA);
+
+	if (!private->schib_region)
+		goto out_free;
+
+	private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
+						GFP_KERNEL | GFP_DMA);
+
+	if (!private->crw_region)
+		goto out_free;
+
 	private->sch = sch;
 	dev_set_drvdata(&sch->dev, private);
 	mutex_init(&private->io_mutex);
@@ -167,7 +204,9 @@
 	if (ret)
 		goto out_free;
 
+	INIT_LIST_HEAD(&private->crw);
 	INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
+	INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
 	atomic_set(&private->avail, 1);
 	private->state = VFIO_CCW_STATE_STANDBY;
 
@@ -189,10 +228,7 @@
 	cio_disable_subchannel(sch);
 out_free:
 	dev_set_drvdata(&sch->dev, NULL);
-	if (private->cmd_region)
-		kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
-	if (private->io_region)
-		kmem_cache_free(vfio_ccw_io_region, private->io_region);
+	vfio_ccw_free_regions(private);
 	kfree(private->cp.guest_cp);
 	kfree(private);
 	return ret;
@@ -201,15 +237,20 @@
 static int vfio_ccw_sch_remove(struct subchannel *sch)
 {
 	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+	struct vfio_ccw_crw *crw, *temp;
 
 	vfio_ccw_sch_quiesce(sch);
 
+	list_for_each_entry_safe(crw, temp, &private->crw, next) {
+		list_del(&crw->next);
+		kfree(crw);
+	}
+
 	vfio_ccw_mdev_unreg(sch);
 
 	dev_set_drvdata(&sch->dev, NULL);
 
-	kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
-	kmem_cache_free(vfio_ccw_io_region, private->io_region);
+	vfio_ccw_free_regions(private);
 	kfree(private->cp.guest_cp);
 	kfree(private);
 
@@ -266,6 +307,83 @@
 	return rc;
 }
 
+static void vfio_ccw_queue_crw(struct vfio_ccw_private *private,
+			       unsigned int rsc,
+			       unsigned int erc,
+			       unsigned int rsid)
+{
+	struct vfio_ccw_crw *crw;
+
+	/*
+	 * If unable to allocate a CRW, just drop the event and
+	 * carry on.  The guest will either see a later one or
+	 * learn when it issues its own store subchannel.
+	 */
+	crw = kzalloc(sizeof(*crw), GFP_ATOMIC);
+	if (!crw)
+		return;
+
+	/*
+	 * Build the CRW based on the inputs given to us.
+	 */
+	crw->crw.rsc = rsc;
+	crw->crw.erc = erc;
+	crw->crw.rsid = rsid;
+
+	list_add_tail(&crw->next, &private->crw);
+	queue_work(vfio_ccw_work_q, &private->crw_work);
+}
+
+static int vfio_ccw_chp_event(struct subchannel *sch,
+			      struct chp_link *link, int event)
+{
+	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+	int mask = chp_ssd_get_mask(&sch->ssd_info, link);
+	int retry = 255;
+
+	if (!private || !mask)
+		return 0;
+
+	trace_vfio_ccw_chp_event(private->sch->schid, mask, event);
+	VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): mask=0x%x event=%d\n",
+			   mdev_uuid(private->mdev), sch->schid.cssid,
+			   sch->schid.ssid, sch->schid.sch_no,
+			   mask, event);
+
+	if (cio_update_schib(sch))
+		return -ENODEV;
+
+	switch (event) {
+	case CHP_VARY_OFF:
+		/* Path logically turned off */
+		sch->opm &= ~mask;
+		sch->lpm &= ~mask;
+		if (sch->schib.pmcw.lpum & mask)
+			cio_cancel_halt_clear(sch, &retry);
+		break;
+	case CHP_OFFLINE:
+		/* Path is gone */
+		if (sch->schib.pmcw.lpum & mask)
+			cio_cancel_halt_clear(sch, &retry);
+		vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN,
+				   link->chpid.id);
+		break;
+	case CHP_VARY_ON:
+		/* Path logically turned on */
+		sch->opm |= mask;
+		sch->lpm |= mask;
+		break;
+	case CHP_ONLINE:
+		/* Path became available */
+		sch->lpm |= mask & sch->opm;
+		vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT,
+				   link->chpid.id);
+		break;
+	}
+
+	return 0;
+}
+
 static struct css_device_id vfio_ccw_sch_ids[] = {
 	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
 	{ /* end of list */ },
@@ -283,6 +401,7 @@
 	.remove = vfio_ccw_sch_remove,
 	.shutdown = vfio_ccw_sch_shutdown,
 	.sch_event = vfio_ccw_sch_event,
+	.chp_event = vfio_ccw_chp_event,
 };
 
 static int __init vfio_ccw_debug_init(void)
@@ -312,6 +431,14 @@
 	debug_unregister(vfio_ccw_debug_trace_id);
 }
 
+static void vfio_ccw_destroy_regions(void)
+{
+	kmem_cache_destroy(vfio_ccw_crw_region);
+	kmem_cache_destroy(vfio_ccw_schib_region);
+	kmem_cache_destroy(vfio_ccw_cmd_region);
+	kmem_cache_destroy(vfio_ccw_io_region);
+}
+
 static int __init vfio_ccw_sch_init(void)
 {
 	int ret;
@@ -344,6 +471,26 @@
 		goto out_err;
 	}
 
+	vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region",
+					sizeof(struct ccw_schib_region), 0,
+					SLAB_ACCOUNT, 0,
+					sizeof(struct ccw_schib_region), NULL);
+
+	if (!vfio_ccw_schib_region) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region",
+					sizeof(struct ccw_crw_region), 0,
+					SLAB_ACCOUNT, 0,
+					sizeof(struct ccw_crw_region), NULL);
+
+	if (!vfio_ccw_crw_region) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
 	isc_register(VFIO_CCW_ISC);
 	ret = css_driver_register(&vfio_ccw_sch_driver);
 	if (ret) {
@@ -354,8 +501,7 @@
 	return ret;
 
 out_err:
-	kmem_cache_destroy(vfio_ccw_cmd_region);
-	kmem_cache_destroy(vfio_ccw_io_region);
+	vfio_ccw_destroy_regions();
 	destroy_workqueue(vfio_ccw_work_q);
 	vfio_ccw_debug_exit();
 	return ret;
@@ -365,8 +511,7 @@
 {
 	css_driver_unregister(&vfio_ccw_sch_driver);
 	isc_unregister(VFIO_CCW_ISC);
-	kmem_cache_destroy(vfio_ccw_io_region);
-	kmem_cache_destroy(vfio_ccw_cmd_region);
+	vfio_ccw_destroy_regions();
 	destroy_workqueue(vfio_ccw_work_q);
 	vfio_ccw_debug_exit();
 }
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 4a1e727..e435a9c 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -15,9 +15,6 @@
 #include "ioasm.h"
 #include "vfio_ccw_private.h"
 
-#define CREATE_TRACE_POINTS
-#include "vfio_ccw_trace.h"
-
 static int fsm_io_helper(struct vfio_ccw_private *private)
 {
 	struct subchannel *sch;
@@ -321,8 +318,9 @@
 	}
 
 err_out:
-	trace_vfio_ccw_io_fctl(scsw->cmd.fctl, schid,
-			       io_region->ret_code, errstr);
+	private->state = VFIO_CCW_STATE_IDLE;
+	trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
+				      io_region->ret_code, errstr);
 }
 
 /*
@@ -344,6 +342,10 @@
 		/* should not happen? */
 		cmd_region->ret_code = -EINVAL;
 	}
+
+	trace_vfio_ccw_fsm_async_request(get_schid(private),
+					 cmd_region->command,
+					 cmd_region->ret_code);
 }
 
 /*
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index 15df0a5..2280f51 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -172,8 +172,22 @@
 
 	ret = vfio_ccw_register_async_dev_regions(private);
 	if (ret)
-		vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
-					 &private->nb);
+		goto out_unregister;
+
+	ret = vfio_ccw_register_schib_dev_regions(private);
+	if (ret)
+		goto out_unregister;
+
+	ret = vfio_ccw_register_crw_dev_regions(private);
+	if (ret)
+		goto out_unregister;
+
+	return ret;
+
+out_unregister:
+	vfio_ccw_unregister_dev_regions(private);
+	vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+				 &private->nb);
 	return ret;
 }
 
@@ -181,7 +195,6 @@
 {
 	struct vfio_ccw_private *private =
 		dev_get_drvdata(mdev_parent_dev(mdev));
-	int i;
 
 	if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
 	    (private->state != VFIO_CCW_STATE_STANDBY)) {
@@ -191,15 +204,9 @@
 	}
 
 	cp_free(&private->cp);
+	vfio_ccw_unregister_dev_regions(private);
 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
 				 &private->nb);
-
-	for (i = 0; i < private->num_regions; i++)
-		private->region[i].ops->release(private, &private->region[i]);
-
-	private->num_regions = 0;
-	kfree(private->region);
-	private->region = NULL;
 }
 
 static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
@@ -269,8 +276,6 @@
 	}
 
 	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
-	if (region->ret_code != 0)
-		private->state = VFIO_CCW_STATE_IDLE;
 	ret = (region->ret_code != 0) ? region->ret_code : count;
 
 out_unlock:
@@ -384,17 +389,22 @@
 
 static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
 {
-	if (info->index != VFIO_CCW_IO_IRQ_INDEX)
+	switch (info->index) {
+	case VFIO_CCW_IO_IRQ_INDEX:
+	case VFIO_CCW_CRW_IRQ_INDEX:
+		info->count = 1;
+		info->flags = VFIO_IRQ_INFO_EVENTFD;
+		break;
+	default:
 		return -EINVAL;
-
-	info->count = 1;
-	info->flags = VFIO_IRQ_INFO_EVENTFD;
+	}
 
 	return 0;
 }
 
 static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
 				  uint32_t flags,
+				  uint32_t index,
 				  void __user *data)
 {
 	struct vfio_ccw_private *private;
@@ -404,7 +414,17 @@
 		return -EINVAL;
 
 	private = dev_get_drvdata(mdev_parent_dev(mdev));
-	ctx = &private->io_trigger;
+
+	switch (index) {
+	case VFIO_CCW_IO_IRQ_INDEX:
+		ctx = &private->io_trigger;
+		break;
+	case VFIO_CCW_CRW_IRQ_INDEX:
+		ctx = &private->crw_trigger;
+		break;
+	default:
+		return -EINVAL;
+	}
 
 	switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
 	case VFIO_IRQ_SET_DATA_NONE:
@@ -482,6 +502,17 @@
 	return 0;
 }
 
+void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private)
+{
+	int i;
+
+	for (i = 0; i < private->num_regions; i++)
+		private->region[i].ops->release(private, &private->region[i]);
+	private->num_regions = 0;
+	kfree(private->region);
+	private->region = NULL;
+}
+
 static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
 				   unsigned int cmd,
 				   unsigned long arg)
@@ -565,7 +596,7 @@
 			return ret;
 
 		data = (void __user *)(arg + minsz);
-		return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, data);
+		return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, hdr.index, data);
 	}
 	case VFIO_DEVICE_RESET:
 		return vfio_ccw_mdev_reset(mdev);
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index bbe9bab..8723156 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -17,6 +17,7 @@
 #include <linux/eventfd.h>
 #include <linux/workqueue.h>
 #include <linux/vfio_ccw.h>
+#include <asm/crw.h>
 #include <asm/debug.h>
 
 #include "css.h"
@@ -53,8 +54,16 @@
 				 unsigned int subtype,
 				 const struct vfio_ccw_regops *ops,
 				 size_t size, u32 flags, void *data);
+void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private);
 
 int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private);
+int vfio_ccw_register_schib_dev_regions(struct vfio_ccw_private *private);
+int vfio_ccw_register_crw_dev_regions(struct vfio_ccw_private *private);
+
+struct vfio_ccw_crw {
+	struct list_head	next;
+	struct crw		crw;
+};
 
 /**
  * struct vfio_ccw_private
@@ -68,6 +77,8 @@
  * @io_mutex: protect against concurrent update of I/O regions
  * @region: additional regions for other subchannel operations
  * @cmd_region: MMIO region for asynchronous I/O commands other than START
+ * @schib_region: MMIO region for SCHIB information
+ * @crw_region: MMIO region for getting channel report words
  * @num_regions: number of additional regions
  * @cp: channel program for the current I/O operation
  * @irb: irb info received from interrupt
@@ -86,14 +97,19 @@
 	struct mutex		io_mutex;
 	struct vfio_ccw_region *region;
 	struct ccw_cmd_region	*cmd_region;
+	struct ccw_schib_region *schib_region;
+	struct ccw_crw_region	*crw_region;
 	int num_regions;
 
 	struct channel_program	cp;
 	struct irb		irb;
 	union scsw		scsw;
+	struct list_head	crw;
 
 	struct eventfd_ctx	*io_trigger;
+	struct eventfd_ctx	*crw_trigger;
 	struct work_struct	io_work;
+	struct work_struct	crw_work;
 } __aligned(8);
 
 extern int vfio_ccw_mdev_reg(struct subchannel *sch);
@@ -135,6 +151,7 @@
 static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private,
 				     int event)
 {
+	trace_vfio_ccw_fsm_event(private->sch->schid, private->state, event);
 	vfio_ccw_jumptable[private->state][event](private, event);
 }
 
diff --git a/drivers/s390/cio/vfio_ccw_trace.c b/drivers/s390/cio/vfio_ccw_trace.c
new file mode 100644
index 0000000..4a02059
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_trace.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tracepoint definitions for vfio_ccw
+ *
+ * Copyright IBM Corp. 2019
+ * Author(s): Eric Farman <farman@linux.ibm.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "vfio_ccw_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_chp_event);
+EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_async_request);
+EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_event);
+EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_io_request);
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h
index b1da53d..62fb305 100644
--- a/drivers/s390/cio/vfio_ccw_trace.h
+++ b/drivers/s390/cio/vfio_ccw_trace.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Tracepoints for vfio_ccw driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Tracepoints for vfio_ccw driver
  *
  * Copyright IBM Corp. 2018
  *
@@ -7,6 +7,8 @@
  *            Halil Pasic <pasic@linux.vnet.ibm.com>
  */
 
+#include "cio.h"
+
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM vfio_ccw
 
@@ -15,28 +17,118 @@
 
 #include <linux/tracepoint.h>
 
-TRACE_EVENT(vfio_ccw_io_fctl,
+TRACE_EVENT(vfio_ccw_chp_event,
+	TP_PROTO(struct subchannel_id schid,
+		 int mask,
+		 int event),
+	TP_ARGS(schid, mask, event),
+
+	TP_STRUCT__entry(
+		__field(u8, cssid)
+		__field(u8, ssid)
+		__field(u16, sch_no)
+		__field(int, mask)
+		__field(int, event)
+	),
+
+	TP_fast_assign(
+		__entry->cssid = schid.cssid;
+		__entry->ssid = schid.ssid;
+		__entry->sch_no = schid.sch_no;
+		__entry->mask = mask;
+		__entry->event = event;
+	),
+
+	TP_printk("schid=%x.%x.%04x mask=0x%x event=%d",
+		  __entry->cssid,
+		  __entry->ssid,
+		  __entry->sch_no,
+		  __entry->mask,
+		  __entry->event)
+);
+
+TRACE_EVENT(vfio_ccw_fsm_async_request,
+	TP_PROTO(struct subchannel_id schid,
+		 int command,
+		 int errno),
+	TP_ARGS(schid, command, errno),
+
+	TP_STRUCT__entry(
+		__field(u8, cssid)
+		__field(u8, ssid)
+		__field(u16, sch_no)
+		__field(int, command)
+		__field(int, errno)
+	),
+
+	TP_fast_assign(
+		__entry->cssid = schid.cssid;
+		__entry->ssid = schid.ssid;
+		__entry->sch_no = schid.sch_no;
+		__entry->command = command;
+		__entry->errno = errno;
+	),
+
+	TP_printk("schid=%x.%x.%04x command=0x%x errno=%d",
+		  __entry->cssid,
+		  __entry->ssid,
+		  __entry->sch_no,
+		  __entry->command,
+		  __entry->errno)
+);
+
+TRACE_EVENT(vfio_ccw_fsm_event,
+	TP_PROTO(struct subchannel_id schid, int state, int event),
+	TP_ARGS(schid, state, event),
+
+	TP_STRUCT__entry(
+		__field(u8, cssid)
+		__field(u8, ssid)
+		__field(u16, schno)
+		__field(int, state)
+		__field(int, event)
+	),
+
+	TP_fast_assign(
+		__entry->cssid = schid.cssid;
+		__entry->ssid = schid.ssid;
+		__entry->schno = schid.sch_no;
+		__entry->state = state;
+		__entry->event = event;
+	),
+
+	TP_printk("schid=%x.%x.%04x state=%d event=%d",
+		__entry->cssid, __entry->ssid, __entry->schno,
+		__entry->state,
+		__entry->event)
+);
+
+TRACE_EVENT(vfio_ccw_fsm_io_request,
 	TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr),
 	TP_ARGS(fctl, schid, errno, errstr),
 
 	TP_STRUCT__entry(
+		__field(u8, cssid)
+		__field(u8, ssid)
+		__field(u16, sch_no)
 		__field(int, fctl)
-		__field_struct(struct subchannel_id, schid)
 		__field(int, errno)
 		__field(char*, errstr)
 	),
 
 	TP_fast_assign(
+		__entry->cssid = schid.cssid;
+		__entry->ssid = schid.ssid;
+		__entry->sch_no = schid.sch_no;
 		__entry->fctl = fctl;
-		__entry->schid = schid;
 		__entry->errno = errno;
 		__entry->errstr = errstr;
 	),
 
-	TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s",
-		  __entry->schid.cssid,
-		  __entry->schid.ssid,
-		  __entry->schid.sch_no,
+	TP_printk("schid=%x.%x.%04x fctl=0x%x errno=%d info=%s",
+		  __entry->cssid,
+		  __entry->ssid,
+		  __entry->sch_no,
 		  __entry->fctl,
 		  __entry->errno,
 		  __entry->errstr)
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 52aa95c..22d2db6 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -7,7 +7,8 @@
 obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
 # zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
 zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
-zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o zcrypt_ccamisc.o
+zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
+zcrypt-objs += zcrypt_ccamisc.o zcrypt_ep11misc.o
 obj-$(CONFIG_ZCRYPT) += zcrypt.o
 # adapter drivers depend on ap.o and zcrypt.o
 obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 5256e3c..c00a288 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -18,13 +18,13 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/err.h>
+#include <linux/freezer.h>
 #include <linux/interrupt.h>
 #include <linux/workqueue.h>
 #include <linux/slab.h>
 #include <linux/notifier.h>
 #include <linux/kthread.h>
 #include <linux/mutex.h>
-#include <linux/suspend.h>
 #include <asm/airq.h>
 #include <linux/atomic.h>
 #include <asm/isc.h>
@@ -62,8 +62,10 @@
 
 static struct device *ap_root_device;
 
-DEFINE_SPINLOCK(ap_list_lock);
-LIST_HEAD(ap_card_list);
+/* Hashtable of all queue devices on the AP bus */
+DEFINE_HASHTABLE(ap_queues, 8);
+/* lock used for the ap_queues hashtable */
+DEFINE_SPINLOCK(ap_queues_lock);
 
 /* Default permissions (ioctl, card and domain masking) */
 struct ap_perms ap_perms;
@@ -71,8 +73,7 @@
 DEFINE_MUTEX(ap_perms_mutex);
 EXPORT_SYMBOL(ap_perms_mutex);
 
-static struct ap_config_info *ap_configuration;
-static bool initialised;
+static struct ap_config_info *ap_qci_info;
 
 /*
  * AP bus related debug feature things.
@@ -91,7 +92,7 @@
  * Tasklet & timer for AP request polling and interrupts
  */
 static void ap_tasklet_fn(unsigned long);
-static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0);
+static DECLARE_TASKLET_OLD(ap_tasklet, ap_tasklet_fn);
 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
 static struct task_struct *ap_poll_kthread;
 static DEFINE_MUTEX(ap_poll_thread_mutex);
@@ -103,22 +104,17 @@
  */
 static unsigned long long poll_timeout = 250000;
 
-/* Suspend flag */
-static int ap_suspend_flag;
-/* Maximum domain id */
-static int ap_max_domain_id;
-/*
- * Flag to check if domain was set through module parameter domain=. This is
- * important when supsend and resume is done in a z/VM environment where the
- * domain might change.
- */
-static int user_set_domain;
+/* Maximum domain id, if not given via qci */
+static int ap_max_domain_id = 15;
+/* Maximum adapter id, if not given via qci */
+static int ap_max_adapter_id = 63;
+
 static struct bus_type ap_bus_type;
 
 /* Adapter interrupt definitions */
 static void ap_interrupt_handler(struct airq_struct *airq, bool floating);
 
-static int ap_airq_flag;
+static bool ap_irq_flag;
 
 static struct airq_struct ap_airq = {
 	.handler = ap_interrupt_handler,
@@ -126,15 +122,6 @@
 };
 
 /**
- * ap_using_interrupts() - Returns non-zero if interrupt support is
- * available.
- */
-static inline int ap_using_interrupts(void)
-{
-	return ap_airq_flag;
-}
-
-/**
  * ap_airq_ptr() - Get the address of the adapter interrupt indicator
  *
  * Returns the address of the local-summary-indicator of the adapter
@@ -143,7 +130,7 @@
  */
 void *ap_airq_ptr(void)
 {
-	if (ap_using_interrupts())
+	if (ap_irq_flag)
 		return ap_airq.lsi_ptr;
 	return NULL;
 }
@@ -159,12 +146,12 @@
 }
 
 /**
- * ap_configuration_available(): Test if AP configuration
- * information is available.
+ * ap_qci_available(): Test if AP configuration
+ * information can be queried via QCI subfunction.
  *
- * Returns 1 if AP configuration information is available.
+ * Returns 1 if subfunction PQAP(QCI) is available.
  */
-static int ap_configuration_available(void)
+static int ap_qci_available(void)
 {
 	return test_facility(12);
 }
@@ -187,22 +174,22 @@
  */
 static inline int ap_qact_available(void)
 {
-	if (ap_configuration)
-		return ap_configuration->qact;
+	if (ap_qci_info)
+		return ap_qci_info->qact;
 	return 0;
 }
 
 /*
- * ap_query_configuration(): Fetch cryptographic config info
+ * ap_fetch_qci_info(): Fetch cryptographic config info
  *
  * Returns the ap configuration info fetched via PQAP(QCI).
  * On success 0 is returned, on failure a negative errno
  * is returned, e.g. if the PQAP(QCI) instruction is not
  * available, the return value will be -EOPNOTSUPP.
  */
-static inline int ap_query_configuration(struct ap_config_info *info)
+static inline int ap_fetch_qci_info(struct ap_config_info *info)
 {
-	if (!ap_configuration_available())
+	if (!ap_qci_available())
 		return -EOPNOTSUPP;
 	if (!info)
 		return -EINVAL;
@@ -210,20 +197,39 @@
 }
 
 /**
- * ap_init_configuration(): Allocate and query configuration array.
- */
-static void ap_init_configuration(void)
-{
-	if (!ap_configuration_available())
-		return;
+ * ap_init_qci_info(): Allocate and query qci config info.
+ * Does also update the static variables ap_max_domain_id
+ * and ap_max_adapter_id if this info is available.
 
-	ap_configuration = kzalloc(sizeof(*ap_configuration), GFP_KERNEL);
-	if (!ap_configuration)
+ */
+static void __init ap_init_qci_info(void)
+{
+	if (!ap_qci_available()) {
+		AP_DBF_INFO("%s QCI not supported\n", __func__);
 		return;
-	if (ap_query_configuration(ap_configuration) != 0) {
-		kfree(ap_configuration);
-		ap_configuration = NULL;
+	}
+
+	ap_qci_info = kzalloc(sizeof(*ap_qci_info), GFP_KERNEL);
+	if (!ap_qci_info)
 		return;
+	if (ap_fetch_qci_info(ap_qci_info) != 0) {
+		kfree(ap_qci_info);
+		ap_qci_info = NULL;
+		return;
+	}
+	AP_DBF_INFO("%s successful fetched initial qci info\n", __func__);
+
+	if (ap_qci_info->apxa) {
+		if (ap_qci_info->Na) {
+			ap_max_adapter_id = ap_qci_info->Na;
+			AP_DBF_INFO("%s new ap_max_adapter_id is %d\n",
+				    __func__, ap_max_adapter_id);
+		}
+		if (ap_qci_info->Nd) {
+			ap_max_domain_id = ap_qci_info->Nd;
+			AP_DBF_INFO("%s new ap_max_domain_id is %d\n",
+				    __func__, ap_max_domain_id);
+		}
 	}
 }
 
@@ -238,7 +244,6 @@
 
 /*
  * ap_test_config_card_id(): Test, whether an AP card ID is configured.
- * @id AP card ID
  *
  * Returns 0 if the card is not configured
  *	   1 if the card is configured or
@@ -246,16 +251,16 @@
  */
 static inline int ap_test_config_card_id(unsigned int id)
 {
-	if (!ap_configuration)	/* QCI not supported */
-		/* only ids 0...3F may be probed */
-		return id < 0x40 ? 1 : 0;
-	return ap_test_config(ap_configuration->apm, id);
+	if (id > ap_max_adapter_id)
+		return 0;
+	if (ap_qci_info)
+		return ap_test_config(ap_qci_info->apm, id);
+	return 1;
 }
 
 /*
  * ap_test_config_usage_domain(): Test, whether an AP usage domain
  * is configured.
- * @domain AP usage domain ID
  *
  * Returns 0 if the usage domain is not configured
  *	   1 if the usage domain is configured or
@@ -263,9 +268,11 @@
  */
 int ap_test_config_usage_domain(unsigned int domain)
 {
-	if (!ap_configuration)	/* QCI not supported */
-		return domain < 16;
-	return ap_test_config(ap_configuration->aqm, domain);
+	if (domain > ap_max_domain_id)
+		return 0;
+	if (ap_qci_info)
+		return ap_test_config(ap_qci_info->aqm, domain);
+	return 1;
 }
 EXPORT_SYMBOL(ap_test_config_usage_domain);
 
@@ -279,43 +286,48 @@
  */
 int ap_test_config_ctrl_domain(unsigned int domain)
 {
-	if (!ap_configuration)	/* QCI not supported */
+	if (!ap_qci_info || domain > ap_max_domain_id)
 		return 0;
-	return ap_test_config(ap_configuration->adm, domain);
+	return ap_test_config(ap_qci_info->adm, domain);
 }
 EXPORT_SYMBOL(ap_test_config_ctrl_domain);
 
-/**
- * ap_query_queue(): Check if an AP queue is available.
- * @qid: The AP queue number
- * @queue_depth: Pointer to queue depth value
- * @device_type: Pointer to device type value
- * @facilities: Pointer to facility indicator
+/*
+ * ap_queue_info(): Check and get AP queue info.
+ * Returns true if TAPQ succeeded and the info is filled or
+ * false otherwise.
  */
-static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
-			  unsigned int *facilities)
+static bool ap_queue_info(ap_qid_t qid, int *q_type,
+			  unsigned int *q_fac, int *q_depth, bool *q_decfg)
 {
 	struct ap_queue_status status;
-	unsigned long info;
-	int nd;
+	unsigned long info = 0;
 
-	if (!ap_test_config_card_id(AP_QID_CARD(qid)))
-		return -ENODEV;
+	/* make sure we don't run into a specifiation exception */
+	if (AP_QID_CARD(qid) > ap_max_adapter_id ||
+	    AP_QID_QUEUE(qid) > ap_max_domain_id)
+		return false;
 
+	/* call TAPQ on this APQN */
 	status = ap_test_queue(qid, ap_apft_available(), &info);
 	switch (status.response_code) {
 	case AP_RESPONSE_NORMAL:
-		*queue_depth = (int)(info & 0xff);
-		*device_type = (int)((info >> 24) & 0xff);
-		*facilities = (unsigned int)(info >> 32);
-		/* Update maximum domain id */
-		nd = (info >> 16) & 0xff;
-		/* if N bit is available, z13 and newer */
-		if ((info & (1UL << 57)) && nd > 0)
-			ap_max_domain_id = nd;
-		else /* older machine types */
-			ap_max_domain_id = 15;
-		switch (*device_type) {
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+	case AP_RESPONSE_DECONFIGURED:
+	case AP_RESPONSE_CHECKSTOPPED:
+	case AP_RESPONSE_BUSY:
+		/*
+		 * According to the architecture in all these cases the
+		 * info should be filled. All bits 0 is not possible as
+		 * there is at least one of the mode bits set.
+		 */
+		if (WARN_ON_ONCE(!info))
+			return false;
+		*q_type = (int)((info >> 24) & 0xff);
+		*q_fac = (unsigned int)(info >> 32);
+		*q_depth = (int)(info & 0xff);
+		*q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
+		switch (*q_type) {
 			/* For CEX2 and CEX3 the available functions
 			 * are not reflected by the facilities bits.
 			 * Instead it is coded into the type. So here
@@ -323,45 +335,39 @@
 			 */
 		case AP_DEVICE_TYPE_CEX2A:
 		case AP_DEVICE_TYPE_CEX3A:
-			*facilities |= 0x08000000;
+			*q_fac |= 0x08000000;
 			break;
 		case AP_DEVICE_TYPE_CEX2C:
 		case AP_DEVICE_TYPE_CEX3C:
-			*facilities |= 0x10000000;
+			*q_fac |= 0x10000000;
 			break;
 		default:
 			break;
 		}
-		return 0;
-	case AP_RESPONSE_Q_NOT_AVAIL:
-	case AP_RESPONSE_DECONFIGURED:
-	case AP_RESPONSE_CHECKSTOPPED:
-	case AP_RESPONSE_INVALID_ADDRESS:
-		return -ENODEV;
-	case AP_RESPONSE_RESET_IN_PROGRESS:
-	case AP_RESPONSE_OTHERWISE_CHANGED:
-	case AP_RESPONSE_BUSY:
-		return -EBUSY;
+		return true;
 	default:
-		BUG();
+		/*
+		 * A response code which indicates, there is no info available.
+		 */
+		return false;
 	}
 }
 
-void ap_wait(enum ap_wait wait)
+void ap_wait(enum ap_sm_wait wait)
 {
 	ktime_t hr_time;
 
 	switch (wait) {
-	case AP_WAIT_AGAIN:
-	case AP_WAIT_INTERRUPT:
-		if (ap_using_interrupts())
+	case AP_SM_WAIT_AGAIN:
+	case AP_SM_WAIT_INTERRUPT:
+		if (ap_irq_flag)
 			break;
 		if (ap_poll_kthread) {
 			wake_up(&ap_poll_wait);
 			break;
 		}
-		/* Fall through */
-	case AP_WAIT_TIMEOUT:
+		fallthrough;
+	case AP_SM_WAIT_TIMEOUT:
 		spin_lock_bh(&ap_poll_timer_lock);
 		if (!hrtimer_is_queued(&ap_poll_timer)) {
 			hr_time = poll_timeout;
@@ -370,7 +376,7 @@
 		}
 		spin_unlock_bh(&ap_poll_timer_lock);
 		break;
-	case AP_WAIT_NONE:
+	case AP_SM_WAIT_NONE:
 	default:
 		break;
 	}
@@ -386,10 +392,8 @@
 {
 	struct ap_queue *aq = from_timer(aq, t, timeout);
 
-	if (ap_suspend_flag)
-		return;
 	spin_lock_bh(&aq->lock);
-	ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT));
+	ap_wait(ap_sm_event(aq, AP_SM_EVENT_TIMEOUT));
 	spin_unlock_bh(&aq->lock);
 }
 
@@ -401,8 +405,7 @@
  */
 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
 {
-	if (!ap_suspend_flag)
-		tasklet_schedule(&ap_tasklet);
+	tasklet_schedule(&ap_tasklet);
 	return HRTIMER_NORESTART;
 }
 
@@ -413,8 +416,7 @@
 static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
 {
 	inc_irq_stat(IRQIO_APB);
-	if (!ap_suspend_flag)
-		tasklet_schedule(&ap_tasklet);
+	tasklet_schedule(&ap_tasklet);
 }
 
 /**
@@ -425,45 +427,41 @@
  */
 static void ap_tasklet_fn(unsigned long dummy)
 {
-	struct ap_card *ac;
+	int bkt;
 	struct ap_queue *aq;
-	enum ap_wait wait = AP_WAIT_NONE;
+	enum ap_sm_wait wait = AP_SM_WAIT_NONE;
 
 	/* Reset the indicator if interrupts are used. Thus new interrupts can
 	 * be received. Doing it in the beginning of the tasklet is therefor
 	 * important that no requests on any AP get lost.
 	 */
-	if (ap_using_interrupts())
+	if (ap_irq_flag)
 		xchg(ap_airq.lsi_ptr, 0);
 
-	spin_lock_bh(&ap_list_lock);
-	for_each_ap_card(ac) {
-		for_each_ap_queue(aq, ac) {
-			spin_lock_bh(&aq->lock);
-			wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL));
-			spin_unlock_bh(&aq->lock);
-		}
+	spin_lock_bh(&ap_queues_lock);
+	hash_for_each(ap_queues, bkt, aq, hnode) {
+		spin_lock_bh(&aq->lock);
+		wait = min(wait, ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
+		spin_unlock_bh(&aq->lock);
 	}
-	spin_unlock_bh(&ap_list_lock);
+	spin_unlock_bh(&ap_queues_lock);
 
 	ap_wait(wait);
 }
 
 static int ap_pending_requests(void)
 {
-	struct ap_card *ac;
+	int bkt;
 	struct ap_queue *aq;
 
-	spin_lock_bh(&ap_list_lock);
-	for_each_ap_card(ac) {
-		for_each_ap_queue(aq, ac) {
-			if (aq->queue_count == 0)
-				continue;
-			spin_unlock_bh(&ap_list_lock);
-			return 1;
-		}
+	spin_lock_bh(&ap_queues_lock);
+	hash_for_each(ap_queues, bkt, aq, hnode) {
+		if (aq->queue_count == 0)
+			continue;
+		spin_unlock_bh(&ap_queues_lock);
+		return 1;
 	}
-	spin_unlock_bh(&ap_list_lock);
+	spin_unlock_bh(&ap_queues_lock);
 	return 0;
 }
 
@@ -486,7 +484,7 @@
 	while (!kthread_should_stop()) {
 		add_wait_queue(&ap_poll_wait, &wait);
 		set_current_state(TASK_INTERRUPTIBLE);
-		if (ap_suspend_flag || !ap_pending_requests()) {
+		if (!ap_pending_requests()) {
 			schedule();
 			try_to_freeze();
 		}
@@ -507,7 +505,7 @@
 {
 	int rc;
 
-	if (ap_using_interrupts() || ap_poll_kthread)
+	if (ap_irq_flag || ap_poll_kthread)
 		return 0;
 	mutex_lock(&ap_poll_thread_mutex);
 	ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
@@ -587,51 +585,6 @@
 	return retval;
 }
 
-static int ap_dev_suspend(struct device *dev)
-{
-	struct ap_device *ap_dev = to_ap_dev(dev);
-
-	if (ap_dev->drv && ap_dev->drv->suspend)
-		ap_dev->drv->suspend(ap_dev);
-	return 0;
-}
-
-static int ap_dev_resume(struct device *dev)
-{
-	struct ap_device *ap_dev = to_ap_dev(dev);
-
-	if (ap_dev->drv && ap_dev->drv->resume)
-		ap_dev->drv->resume(ap_dev);
-	return 0;
-}
-
-static void ap_bus_suspend(void)
-{
-	AP_DBF(DBF_DEBUG, "%s running\n", __func__);
-
-	ap_suspend_flag = 1;
-	/*
-	 * Disable scanning for devices, thus we do not want to scan
-	 * for them after removing.
-	 */
-	flush_work(&ap_scan_work);
-	tasklet_disable(&ap_tasklet);
-}
-
-static int __ap_card_devices_unregister(struct device *dev, void *dummy)
-{
-	if (is_card_dev(dev))
-		device_unregister(dev);
-	return 0;
-}
-
-static int __ap_queue_devices_unregister(struct device *dev, void *dummy)
-{
-	if (is_queue_dev(dev))
-		device_unregister(dev);
-	return 0;
-}
-
 static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
 {
 	if (is_queue_dev(dev) &&
@@ -640,67 +593,10 @@
 	return 0;
 }
 
-static void ap_bus_resume(void)
-{
-	int rc;
-
-	AP_DBF(DBF_DEBUG, "%s running\n", __func__);
-
-	/* remove all queue devices */
-	bus_for_each_dev(&ap_bus_type, NULL, NULL,
-			 __ap_queue_devices_unregister);
-	/* remove all card devices */
-	bus_for_each_dev(&ap_bus_type, NULL, NULL,
-			 __ap_card_devices_unregister);
-
-	/* Reset thin interrupt setting */
-	if (ap_interrupts_available() && !ap_using_interrupts()) {
-		rc = register_adapter_interrupt(&ap_airq);
-		ap_airq_flag = (rc == 0);
-	}
-	if (!ap_interrupts_available() && ap_using_interrupts()) {
-		unregister_adapter_interrupt(&ap_airq);
-		ap_airq_flag = 0;
-	}
-	/* Reset domain */
-	if (!user_set_domain)
-		ap_domain_index = -1;
-	/* Get things going again */
-	ap_suspend_flag = 0;
-	if (ap_airq_flag)
-		xchg(ap_airq.lsi_ptr, 0);
-	tasklet_enable(&ap_tasklet);
-	queue_work(system_long_wq, &ap_scan_work);
-}
-
-static int ap_power_event(struct notifier_block *this, unsigned long event,
-			  void *ptr)
-{
-	switch (event) {
-	case PM_HIBERNATION_PREPARE:
-	case PM_SUSPEND_PREPARE:
-		ap_bus_suspend();
-		break;
-	case PM_POST_HIBERNATION:
-	case PM_POST_SUSPEND:
-		ap_bus_resume();
-		break;
-	default:
-		break;
-	}
-	return NOTIFY_DONE;
-}
-static struct notifier_block ap_power_notifier = {
-	.notifier_call = ap_power_event,
-};
-
-static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, ap_dev_resume);
-
 static struct bus_type ap_bus_type = {
 	.name = "ap",
 	.match = &ap_bus_match,
 	.uevent = &ap_uevent,
-	.pm = &ap_bus_pm_ops,
 };
 
 static int __ap_revise_reserved(struct device *dev, void *dummy)
@@ -717,8 +613,8 @@
 		drvres = to_ap_drv(dev->driver)->flags
 			& AP_DRIVER_FLAG_DEFAULT;
 		if (!!devres != !!drvres) {
-			AP_DBF(DBF_DEBUG, "reprobing queue=%02x.%04x\n",
-			       card, queue);
+			AP_DBF_DBG("reprobing queue=%02x.%04x\n",
+				   card, queue);
 			rc = device_reprobe(dev);
 		}
 	}
@@ -775,7 +671,10 @@
 {
 	struct ap_device *ap_dev = to_ap_dev(dev);
 	struct ap_driver *ap_drv = to_ap_drv(dev->driver);
-	int card, queue, devres, drvres, rc;
+	int card, queue, devres, drvres, rc = -ENODEV;
+
+	if (!get_device(dev))
+		return rc;
 
 	if (is_queue_dev(dev)) {
 		/*
@@ -792,31 +691,30 @@
 		mutex_unlock(&ap_perms_mutex);
 		drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
 		if (!!devres != !!drvres)
-			return -ENODEV;
+			goto out;
 	}
 
 	/* Add queue/card to list of active queues/cards */
-	spin_lock_bh(&ap_list_lock);
-	if (is_card_dev(dev))
-		list_add(&to_ap_card(dev)->list, &ap_card_list);
-	else
-		list_add(&to_ap_queue(dev)->list,
-			 &to_ap_queue(dev)->card->queues);
-	spin_unlock_bh(&ap_list_lock);
+	spin_lock_bh(&ap_queues_lock);
+	if (is_queue_dev(dev))
+		hash_add(ap_queues, &to_ap_queue(dev)->hnode,
+			 to_ap_queue(dev)->qid);
+	spin_unlock_bh(&ap_queues_lock);
 
 	ap_dev->drv = ap_drv;
 	rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
 
 	if (rc) {
-		spin_lock_bh(&ap_list_lock);
-		if (is_card_dev(dev))
-			list_del_init(&to_ap_card(dev)->list);
-		else
-			list_del_init(&to_ap_queue(dev)->list);
-		spin_unlock_bh(&ap_list_lock);
+		spin_lock_bh(&ap_queues_lock);
+		if (is_queue_dev(dev))
+			hash_del(&to_ap_queue(dev)->hnode);
+		spin_unlock_bh(&ap_queues_lock);
 		ap_dev->drv = NULL;
 	}
 
+out:
+	if (rc)
+		put_device(dev);
 	return rc;
 }
 
@@ -838,24 +736,40 @@
 		ap_queue_remove(to_ap_queue(dev));
 
 	/* Remove queue/card from list of active queues/cards */
-	spin_lock_bh(&ap_list_lock);
-	if (is_card_dev(dev))
-		list_del_init(&to_ap_card(dev)->list);
-	else
-		list_del_init(&to_ap_queue(dev)->list);
-	spin_unlock_bh(&ap_list_lock);
+	spin_lock_bh(&ap_queues_lock);
+	if (is_queue_dev(dev))
+		hash_del(&to_ap_queue(dev)->hnode);
+	spin_unlock_bh(&ap_queues_lock);
+
+	put_device(dev);
 
 	return 0;
 }
 
+struct ap_queue *ap_get_qdev(ap_qid_t qid)
+{
+	int bkt;
+	struct ap_queue *aq;
+
+	spin_lock_bh(&ap_queues_lock);
+	hash_for_each(ap_queues, bkt, aq, hnode) {
+		if (aq->qid == qid) {
+			get_device(&aq->ap_dev.device);
+			spin_unlock_bh(&ap_queues_lock);
+			return aq;
+		}
+	}
+	spin_unlock_bh(&ap_queues_lock);
+
+	return NULL;
+}
+EXPORT_SYMBOL(ap_get_qdev);
+
 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
 		       char *name)
 {
 	struct device_driver *drv = &ap_drv->driver;
 
-	if (!initialised)
-		return -ENODEV;
-
 	drv->bus = &ap_bus_type;
 	drv->probe = ap_device_probe;
 	drv->remove = ap_device_remove;
@@ -873,8 +787,6 @@
 
 void ap_bus_force_rescan(void)
 {
-	if (ap_suspend_flag)
-		return;
 	/* processing a asynchronous bus rescan */
 	del_timer(&ap_config_timer);
 	queue_work(system_long_wq, &ap_scan_work);
@@ -887,7 +799,7 @@
 */
 void ap_bus_cfg_chg(void)
 {
-	AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__);
+	AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__);
 
 	ap_bus_force_rescan();
 }
@@ -1021,7 +933,7 @@
 
 static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
 }
 
 static ssize_t ap_domain_store(struct bus_type *bus,
@@ -1033,11 +945,12 @@
 	    domain < 0 || domain > ap_max_domain_id ||
 	    !test_bit_inv(domain, ap_perms.aqm))
 		return -EINVAL;
+
 	spin_lock_bh(&ap_domain_lock);
 	ap_domain_index = domain;
 	spin_unlock_bh(&ap_domain_lock);
 
-	AP_DBF(DBF_DEBUG, "stored new default domain=%d\n", domain);
+	AP_DBF_INFO("stored new default domain=%d\n", domain);
 
 	return count;
 }
@@ -1046,60 +959,60 @@
 
 static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
 {
-	if (!ap_configuration)	/* QCI not supported */
-		return snprintf(buf, PAGE_SIZE, "not supported\n");
+	if (!ap_qci_info)	/* QCI not supported */
+		return scnprintf(buf, PAGE_SIZE, "not supported\n");
 
-	return snprintf(buf, PAGE_SIZE,
-			"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
-			ap_configuration->adm[0], ap_configuration->adm[1],
-			ap_configuration->adm[2], ap_configuration->adm[3],
-			ap_configuration->adm[4], ap_configuration->adm[5],
-			ap_configuration->adm[6], ap_configuration->adm[7]);
+	return scnprintf(buf, PAGE_SIZE,
+			 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+			 ap_qci_info->adm[0], ap_qci_info->adm[1],
+			 ap_qci_info->adm[2], ap_qci_info->adm[3],
+			 ap_qci_info->adm[4], ap_qci_info->adm[5],
+			 ap_qci_info->adm[6], ap_qci_info->adm[7]);
 }
 
 static BUS_ATTR_RO(ap_control_domain_mask);
 
 static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
 {
-	if (!ap_configuration)	/* QCI not supported */
-		return snprintf(buf, PAGE_SIZE, "not supported\n");
+	if (!ap_qci_info)	/* QCI not supported */
+		return scnprintf(buf, PAGE_SIZE, "not supported\n");
 
-	return snprintf(buf, PAGE_SIZE,
-			"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
-			ap_configuration->aqm[0], ap_configuration->aqm[1],
-			ap_configuration->aqm[2], ap_configuration->aqm[3],
-			ap_configuration->aqm[4], ap_configuration->aqm[5],
-			ap_configuration->aqm[6], ap_configuration->aqm[7]);
+	return scnprintf(buf, PAGE_SIZE,
+			 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+			 ap_qci_info->aqm[0], ap_qci_info->aqm[1],
+			 ap_qci_info->aqm[2], ap_qci_info->aqm[3],
+			 ap_qci_info->aqm[4], ap_qci_info->aqm[5],
+			 ap_qci_info->aqm[6], ap_qci_info->aqm[7]);
 }
 
 static BUS_ATTR_RO(ap_usage_domain_mask);
 
 static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf)
 {
-	if (!ap_configuration)	/* QCI not supported */
-		return snprintf(buf, PAGE_SIZE, "not supported\n");
+	if (!ap_qci_info)	/* QCI not supported */
+		return scnprintf(buf, PAGE_SIZE, "not supported\n");
 
-	return snprintf(buf, PAGE_SIZE,
-			"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
-			ap_configuration->apm[0], ap_configuration->apm[1],
-			ap_configuration->apm[2], ap_configuration->apm[3],
-			ap_configuration->apm[4], ap_configuration->apm[5],
-			ap_configuration->apm[6], ap_configuration->apm[7]);
+	return scnprintf(buf, PAGE_SIZE,
+			 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+			 ap_qci_info->apm[0], ap_qci_info->apm[1],
+			 ap_qci_info->apm[2], ap_qci_info->apm[3],
+			 ap_qci_info->apm[4], ap_qci_info->apm[5],
+			 ap_qci_info->apm[6], ap_qci_info->apm[7]);
 }
 
 static BUS_ATTR_RO(ap_adapter_mask);
 
 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-			ap_using_interrupts() ? 1 : 0);
+	return scnprintf(buf, PAGE_SIZE, "%d\n",
+			 ap_irq_flag ? 1 : 0);
 }
 
 static BUS_ATTR_RO(ap_interrupts);
 
 static ssize_t config_time_show(struct bus_type *bus, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
 }
 
 static ssize_t config_time_store(struct bus_type *bus,
@@ -1118,7 +1031,7 @@
 
 static ssize_t poll_thread_show(struct bus_type *bus, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
 }
 
 static ssize_t poll_thread_store(struct bus_type *bus,
@@ -1141,7 +1054,7 @@
 
 static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
 }
 
 static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
@@ -1170,27 +1083,28 @@
 
 static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
 {
-	int max_domain_id;
-
-	if (ap_configuration)
-		max_domain_id = ap_max_domain_id ? : -1;
-	else
-		max_domain_id = 15;
-	return snprintf(buf, PAGE_SIZE, "%d\n", max_domain_id);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_domain_id);
 }
 
 static BUS_ATTR_RO(ap_max_domain_id);
 
+static ssize_t ap_max_adapter_id_show(struct bus_type *bus, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_adapter_id);
+}
+
+static BUS_ATTR_RO(ap_max_adapter_id);
+
 static ssize_t apmask_show(struct bus_type *bus, char *buf)
 {
 	int rc;
 
 	if (mutex_lock_interruptible(&ap_perms_mutex))
 		return -ERESTARTSYS;
-	rc = snprintf(buf, PAGE_SIZE,
-		      "0x%016lx%016lx%016lx%016lx\n",
-		      ap_perms.apm[0], ap_perms.apm[1],
-		      ap_perms.apm[2], ap_perms.apm[3]);
+	rc = scnprintf(buf, PAGE_SIZE,
+		       "0x%016lx%016lx%016lx%016lx\n",
+		       ap_perms.apm[0], ap_perms.apm[1],
+		       ap_perms.apm[2], ap_perms.apm[3]);
 	mutex_unlock(&ap_perms_mutex);
 
 	return rc;
@@ -1218,10 +1132,10 @@
 
 	if (mutex_lock_interruptible(&ap_perms_mutex))
 		return -ERESTARTSYS;
-	rc = snprintf(buf, PAGE_SIZE,
-		      "0x%016lx%016lx%016lx%016lx\n",
-		      ap_perms.aqm[0], ap_perms.aqm[1],
-		      ap_perms.aqm[2], ap_perms.aqm[3]);
+	rc = scnprintf(buf, PAGE_SIZE,
+		       "0x%016lx%016lx%016lx%016lx\n",
+		       ap_perms.aqm[0], ap_perms.aqm[1],
+		       ap_perms.aqm[2], ap_perms.aqm[3]);
 	mutex_unlock(&ap_perms_mutex);
 
 	return rc;
@@ -1253,6 +1167,7 @@
 	&bus_attr_ap_interrupts,
 	&bus_attr_poll_timeout,
 	&bus_attr_ap_max_domain_id,
+	&bus_attr_ap_max_adapter_id,
 	&bus_attr_apmask,
 	&bus_attr_aqmask,
 	NULL,
@@ -1264,47 +1179,42 @@
  */
 static void ap_select_domain(void)
 {
-	int count, max_count, best_domain;
 	struct ap_queue_status status;
-	int i, j;
+	int card, dom;
 
 	/*
-	 * We want to use a single domain. Either the one specified with
-	 * the "domain=" parameter or the domain with the maximum number
-	 * of devices.
+	 * Choose the default domain. Either the one specified with
+	 * the "domain=" parameter or the first domain with at least
+	 * one valid APQN.
 	 */
 	spin_lock_bh(&ap_domain_lock);
 	if (ap_domain_index >= 0) {
 		/* Domain has already been selected. */
-		spin_unlock_bh(&ap_domain_lock);
-		return;
+		goto out;
 	}
-	best_domain = -1;
-	max_count = 0;
-	for (i = 0; i < AP_DOMAINS; i++) {
-		if (!ap_test_config_usage_domain(i) ||
-		    !test_bit_inv(i, ap_perms.aqm))
+	for (dom = 0; dom <= ap_max_domain_id; dom++) {
+		if (!ap_test_config_usage_domain(dom) ||
+		    !test_bit_inv(dom, ap_perms.aqm))
 			continue;
-		count = 0;
-		for (j = 0; j < AP_DEVICES; j++) {
-			if (!ap_test_config_card_id(j))
+		for (card = 0; card <= ap_max_adapter_id; card++) {
+			if (!ap_test_config_card_id(card) ||
+			    !test_bit_inv(card, ap_perms.apm))
 				continue;
-			status = ap_test_queue(AP_MKQID(j, i),
+			status = ap_test_queue(AP_MKQID(card, dom),
 					       ap_apft_available(),
 					       NULL);
-			if (status.response_code != AP_RESPONSE_NORMAL)
-				continue;
-			count++;
+			if (status.response_code == AP_RESPONSE_NORMAL)
+				break;
 		}
-		if (count > max_count) {
-			max_count = count;
-			best_domain = i;
-		}
+		if (card <= ap_max_adapter_id)
+			break;
 	}
-	if (best_domain >= 0) {
-		ap_domain_index = best_domain;
-		AP_DBF(DBF_DEBUG, "new ap_domain_index=%d\n", ap_domain_index);
+	if (dom <= ap_max_domain_id) {
+		ap_domain_index = dom;
+		AP_DBF_INFO("%s new default domain is %d\n",
+			    __func__, ap_domain_index);
 	}
+out:
 	spin_unlock_bh(&ap_domain_lock);
 }
 
@@ -1318,8 +1228,11 @@
 	int comp_type = 0;
 
 	/* < CEX2A is not supported */
-	if (rawtype < AP_DEVICE_TYPE_CEX2A)
+	if (rawtype < AP_DEVICE_TYPE_CEX2A) {
+		AP_DBF_WARN("get_comp_type queue=%02x.%04x unsupported type %d\n",
+			    AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
 		return 0;
+	}
 	/* up to CEX7 known and fully supported */
 	if (rawtype <= AP_DEVICE_TYPE_CEX7)
 		return rawtype;
@@ -1341,11 +1254,12 @@
 			comp_type = apinfo.cat;
 	}
 	if (!comp_type)
-		AP_DBF(DBF_WARN, "queue=%02x.%04x unable to map type %d\n",
-		       AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
+		AP_DBF_WARN("get_comp_type queue=%02x.%04x unable to map type %d\n",
+			    AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
 	else if (comp_type != rawtype)
-		AP_DBF(DBF_INFO, "queue=%02x.%04x map type %d to %d\n",
-		       AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype, comp_type);
+		AP_DBF_INFO("get_comp_type queue=%02x.%04x map type %d to %d\n",
+			    AP_QID_CARD(qid), AP_QID_QUEUE(qid),
+			    rawtype, comp_type);
 	return comp_type;
 }
 
@@ -1379,156 +1293,280 @@
 
 /*
  * Helper function for ap_scan_bus().
- * Does the scan bus job for the given adapter id.
+ * Remove card device and associated queue devices.
  */
-static void _ap_scan_bus_adapter(int id)
+static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac)
 {
+	bus_for_each_dev(&ap_bus_type, NULL,
+			 (void *)(long) ac->id,
+			 __ap_queue_devices_with_id_unregister);
+	device_unregister(&ac->ap_dev.device);
+}
+
+/*
+ * Helper function for ap_scan_bus().
+ * Does the scan bus job for all the domains within
+ * a valid adapter given by an ap_card ptr.
+ */
+static inline void ap_scan_domains(struct ap_card *ac)
+{
+	bool decfg;
 	ap_qid_t qid;
 	unsigned int func;
-	struct ap_card *ac;
 	struct device *dev;
 	struct ap_queue *aq;
-	int rc, dom, depth, type, comp_type, borked;
-
-	/* check if there is a card device registered with this id */
-	dev = bus_find_device(&ap_bus_type, NULL,
-			      (void *)(long) id,
-			      __match_card_device_with_id);
-	ac = dev ? to_ap_card(dev) : NULL;
-	if (!ap_test_config_card_id(id)) {
-		if (dev) {
-			/* Card device has been removed from configuration */
-			bus_for_each_dev(&ap_bus_type, NULL,
-					 (void *)(long) id,
-					 __ap_queue_devices_with_id_unregister);
-			device_unregister(dev);
-			put_device(dev);
-		}
-		return;
-	}
+	int rc, dom, depth, type;
 
 	/*
-	 * This card id is enabled in the configuration. If we already have
-	 * a card device with this id, check if type and functions are still
-	 * the very same. Also verify that at least one queue is available.
+	 * Go through the configuration for the domains and compare them
+	 * to the existing queue devices. Also take care of the config
+	 * and error state for the queue devices.
 	 */
-	if (ac) {
-		/* find the first valid queue */
-		for (dom = 0; dom < AP_DOMAINS; dom++) {
-			qid = AP_MKQID(id, dom);
-			if (ap_query_queue(qid, &depth, &type, &func) == 0)
-				break;
-		}
-		borked = 0;
-		if (dom >= AP_DOMAINS) {
-			/* no accessible queue on this card */
-			borked = 1;
-		} else if (ac->raw_hwtype != type) {
-			/* card type has changed */
-			AP_DBF(DBF_INFO, "card=%02x type changed.\n", id);
-			borked = 1;
-		} else if (ac->functions != func) {
-			/* card functions have changed */
-			AP_DBF(DBF_INFO, "card=%02x functions changed.\n", id);
-			borked = 1;
-		}
-		if (borked) {
-			/* unregister card device and associated queues */
-			bus_for_each_dev(&ap_bus_type, NULL,
-					 (void *)(long) id,
-					 __ap_queue_devices_with_id_unregister);
-			device_unregister(dev);
-			put_device(dev);
-			/* go back if there is no valid queue on this card */
-			if (dom >= AP_DOMAINS)
-				return;
-			ac = NULL;
-		}
-	}
 
-	/*
-	 * Go through all possible queue ids. Check and maybe create or release
-	 * queue devices for this card. If there exists no card device yet,
-	 * create a card device also.
-	 */
-	for (dom = 0; dom < AP_DOMAINS; dom++) {
-		qid = AP_MKQID(id, dom);
+	for (dom = 0; dom <= ap_max_domain_id; dom++) {
+		qid = AP_MKQID(ac->id, dom);
 		dev = bus_find_device(&ap_bus_type, NULL,
 				      (void *)(long) qid,
 				      __match_queue_device_with_qid);
 		aq = dev ? to_ap_queue(dev) : NULL;
 		if (!ap_test_config_usage_domain(dom)) {
 			if (dev) {
-				/* Queue device exists but has been
-				 * removed from configuration.
-				 */
+				AP_DBF_INFO("%s(%d,%d) not in config any more, rm queue device\n",
+					    __func__, ac->id, dom);
 				device_unregister(dev);
 				put_device(dev);
 			}
 			continue;
 		}
-		/* try to fetch infos about this queue */
-		rc = ap_query_queue(qid, &depth, &type, &func);
-		if (dev) {
-			if (rc == -ENODEV)
-				borked = 1;
-			else {
-				spin_lock_bh(&aq->lock);
-				borked = aq->state == AP_STATE_BORKED;
-				spin_unlock_bh(&aq->lock);
-			}
-			if (borked) {
-				/* Remove broken device */
-				AP_DBF(DBF_DEBUG,
-				       "removing broken queue=%02x.%04x\n",
-				       id, dom);
+		/* domain is valid, get info from this APQN */
+		if (!ap_queue_info(qid, &type, &func, &depth, &decfg)) {
+			if (aq) {
+				AP_DBF_INFO(
+					"%s(%d,%d) ap_queue_info() not successful, rm queue device\n",
+					__func__, ac->id, dom);
 				device_unregister(dev);
+				put_device(dev);
 			}
-			put_device(dev);
 			continue;
 		}
-		if (rc)
-			continue;
-		/* a new queue device is needed, check out comp type */
-		comp_type = ap_get_compatible_type(qid, type, func);
-		if (!comp_type)
-			continue;
-		/* maybe a card device needs to be created first */
-		if (!ac) {
-			ac = ap_card_create(id, depth, type, comp_type, func);
-			if (!ac)
+		/* if no queue device exists, create a new one */
+		if (!aq) {
+			aq = ap_queue_create(qid, ac->ap_dev.device_type);
+			if (!aq) {
+				AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n",
+					    __func__, ac->id, dom);
 				continue;
-			ac->ap_dev.device.bus = &ap_bus_type;
-			ac->ap_dev.device.parent = ap_root_device;
-			dev_set_name(&ac->ap_dev.device, "card%02x", id);
-			/* Register card device with AP bus */
-			rc = device_register(&ac->ap_dev.device);
+			}
+			aq->card = ac;
+			aq->config = !decfg;
+			dev = &aq->ap_dev.device;
+			dev->bus = &ap_bus_type;
+			dev->parent = &ac->ap_dev.device;
+			dev_set_name(dev, "%02x.%04x", ac->id, dom);
+			/* register queue device */
+			rc = device_register(dev);
 			if (rc) {
-				put_device(&ac->ap_dev.device);
-				ac = NULL;
-				break;
+				AP_DBF_WARN("%s(%d,%d) device_register() failed\n",
+					    __func__, ac->id, dom);
+				goto put_dev_and_continue;
 			}
 			/* get it and thus adjust reference counter */
-			get_device(&ac->ap_dev.device);
+			get_device(dev);
+			if (decfg)
+				AP_DBF_INFO("%s(%d,%d) new (decfg) queue device created\n",
+					    __func__, ac->id, dom);
+			else
+				AP_DBF_INFO("%s(%d,%d) new queue device created\n",
+					    __func__, ac->id, dom);
+			goto put_dev_and_continue;
 		}
-		/* now create the new queue device */
-		aq = ap_queue_create(qid, comp_type);
-		if (!aq)
-			continue;
-		aq->card = ac;
-		aq->ap_dev.device.bus = &ap_bus_type;
-		aq->ap_dev.device.parent = &ac->ap_dev.device;
-		dev_set_name(&aq->ap_dev.device, "%02x.%04x", id, dom);
-		/* Register queue device */
-		rc = device_register(&aq->ap_dev.device);
-		if (rc) {
-			put_device(&aq->ap_dev.device);
-			continue;
+		/* Check config state on the already existing queue device */
+		spin_lock_bh(&aq->lock);
+		if (decfg && aq->config) {
+			/* config off this queue device */
+			aq->config = false;
+			if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+				aq->dev_state = AP_DEV_STATE_ERROR;
+				aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
+			}
+			spin_unlock_bh(&aq->lock);
+			AP_DBF_INFO("%s(%d,%d) queue device config off\n",
+				    __func__, ac->id, dom);
+			/* 'receive' pending messages with -EAGAIN */
+			ap_flush_queue(aq);
+			goto put_dev_and_continue;
 		}
-	} /* end domain loop */
+		if (!decfg && !aq->config) {
+			/* config on this queue device */
+			aq->config = true;
+			if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+				aq->dev_state = AP_DEV_STATE_OPERATING;
+				aq->sm_state = AP_SM_STATE_RESET_START;
+			}
+			spin_unlock_bh(&aq->lock);
+			AP_DBF_INFO("%s(%d,%d) queue device config on\n",
+				    __func__, ac->id, dom);
+			goto put_dev_and_continue;
+		}
+		/* handle other error states */
+		if (!decfg && aq->dev_state == AP_DEV_STATE_ERROR) {
+			spin_unlock_bh(&aq->lock);
+			/* 'receive' pending messages with -EAGAIN */
+			ap_flush_queue(aq);
+			/* re-init (with reset) the queue device */
+			ap_queue_init_state(aq);
+			AP_DBF_INFO("%s(%d,%d) queue device reinit enforced\n",
+				    __func__, ac->id, dom);
+			goto put_dev_and_continue;
+		}
+		spin_unlock_bh(&aq->lock);
+put_dev_and_continue:
+		put_device(dev);
+	}
+}
 
-	if (ac)
-		put_device(&ac->ap_dev.device);
+/*
+ * Helper function for ap_scan_bus().
+ * Does the scan bus job for the given adapter id.
+ */
+static inline void ap_scan_adapter(int ap)
+{
+	bool decfg;
+	ap_qid_t qid;
+	unsigned int func;
+	struct device *dev;
+	struct ap_card *ac;
+	int rc, dom, depth, type, comp_type;
+
+	/* Is there currently a card device for this adapter ? */
+	dev = bus_find_device(&ap_bus_type, NULL,
+			      (void *)(long) ap,
+			      __match_card_device_with_id);
+	ac = dev ? to_ap_card(dev) : NULL;
+
+	/* Adapter not in configuration ? */
+	if (!ap_test_config_card_id(ap)) {
+		if (ac) {
+			AP_DBF_INFO("%s(%d) ap not in config any more, rm card and queue devices\n",
+				    __func__, ap);
+			ap_scan_rm_card_dev_and_queue_devs(ac);
+			put_device(dev);
+		}
+		return;
+	}
+
+	/*
+	 * Adapter ap is valid in the current configuration. So do some checks:
+	 * If no card device exists, build one. If a card device exists, check
+	 * for type and functions changed. For all this we need to find a valid
+	 * APQN first.
+	 */
+
+	for (dom = 0; dom <= ap_max_domain_id; dom++)
+		if (ap_test_config_usage_domain(dom)) {
+			qid = AP_MKQID(ap, dom);
+			if (ap_queue_info(qid, &type, &func, &depth, &decfg))
+				break;
+		}
+	if (dom > ap_max_domain_id) {
+		/* Could not find a valid APQN for this adapter */
+		if (ac) {
+			AP_DBF_INFO(
+				"%s(%d) no type info (no APQN found), rm card and queue devices\n",
+				__func__, ap);
+			ap_scan_rm_card_dev_and_queue_devs(ac);
+			put_device(dev);
+		} else {
+			AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n",
+				   __func__, ap);
+		}
+		return;
+	}
+	if (!type) {
+		/* No apdater type info available, an unusable adapter */
+		if (ac) {
+			AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devices\n",
+				    __func__, ap);
+			ap_scan_rm_card_dev_and_queue_devs(ac);
+			put_device(dev);
+		} else {
+			AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n",
+				   __func__, ap);
+		}
+		return;
+	}
+
+	if (ac) {
+		/* Check APQN against existing card device for changes */
+		if (ac->raw_hwtype != type) {
+			AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devices\n",
+				    __func__, ap, type);
+			ap_scan_rm_card_dev_and_queue_devs(ac);
+			put_device(dev);
+			ac = NULL;
+		} else if (ac->functions != func) {
+			AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devices\n",
+				    __func__, ap, type);
+			ap_scan_rm_card_dev_and_queue_devs(ac);
+			put_device(dev);
+			ac = NULL;
+		} else {
+			if (decfg && ac->config) {
+				ac->config = false;
+				AP_DBF_INFO("%s(%d) card device config off\n",
+					    __func__, ap);
+
+			}
+			if (!decfg && !ac->config) {
+				ac->config = true;
+				AP_DBF_INFO("%s(%d) card device config on\n",
+					    __func__, ap);
+			}
+		}
+	}
+
+	if (!ac) {
+		/* Build a new card device */
+		comp_type = ap_get_compatible_type(qid, type, func);
+		if (!comp_type) {
+			AP_DBF_WARN("%s(%d) type %d, can't get compatibility type\n",
+				    __func__, ap, type);
+			return;
+		}
+		ac = ap_card_create(ap, depth, type, comp_type, func);
+		if (!ac) {
+			AP_DBF_WARN("%s(%d) ap_card_create() failed\n",
+				    __func__, ap);
+			return;
+		}
+		ac->config = !decfg;
+		dev = &ac->ap_dev.device;
+		dev->bus = &ap_bus_type;
+		dev->parent = ap_root_device;
+		dev_set_name(dev, "card%02x", ap);
+		/* Register the new card device with AP bus */
+		rc = device_register(dev);
+		if (rc) {
+			AP_DBF_WARN("%s(%d) device_register() failed\n",
+				    __func__, ap);
+			put_device(dev);
+			return;
+		}
+		/* get it and thus adjust reference counter */
+		get_device(dev);
+		if (decfg)
+			AP_DBF_INFO("%s(%d) new (decfg) card device type=%d func=0x%08x created\n",
+				    __func__, ap, type, func);
+		else
+			AP_DBF_INFO("%s(%d) new card device type=%d func=0x%08x created\n",
+				    __func__, ap, type, func);
+	}
+
+	/* Verify the domains and the queue devices for this card */
+	ap_scan_domains(ac);
+
+	/* release the card device */
+	put_device(&ac->ap_dev.device);
 }
 
 /**
@@ -1537,16 +1575,16 @@
  */
 static void ap_scan_bus(struct work_struct *unused)
 {
-	int id;
+	int ap;
 
-	AP_DBF(DBF_DEBUG, "%s running\n", __func__);
-
-	ap_query_configuration(ap_configuration);
+	ap_fetch_qci_info(ap_qci_info);
 	ap_select_domain();
 
+	AP_DBF_DBG("%s running\n", __func__);
+
 	/* loop over all possible adapters */
-	for (id = 0; id < AP_DEVICES; id++)
-		_ap_scan_bus_adapter(id);
+	for (ap = 0; ap <= ap_max_adapter_id; ap++)
+		ap_scan_adapter(ap);
 
 	/* check if there is at least one queue available with default domain */
 	if (ap_domain_index >= 0) {
@@ -1557,9 +1595,8 @@
 		if (dev)
 			put_device(dev);
 		else
-			AP_DBF(DBF_INFO,
-			       "no queue device with default domain %d available\n",
-			       ap_domain_index);
+			AP_DBF_INFO("no queue device with default domain %d available\n",
+				    ap_domain_index);
 	}
 
 	mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
@@ -1567,8 +1604,6 @@
 
 static void ap_config_timeout(struct timer_list *unused)
 {
-	if (ap_suspend_flag)
-		return;
 	queue_work(system_long_wq, &ap_scan_work);
 }
 
@@ -1611,7 +1646,6 @@
  */
 static int __init ap_module_init(void)
 {
-	int max_domain_id;
 	int rc, i;
 
 	rc = ap_debug_init();
@@ -1623,33 +1657,28 @@
 		return -ENODEV;
 	}
 
+	/* init ap_queue hashtable */
+	hash_init(ap_queues);
+
 	/* set up the AP permissions (ioctls, ap and aq masks) */
 	ap_perms_init();
 
 	/* Get AP configuration data if available */
-	ap_init_configuration();
+	ap_init_qci_info();
 
-	if (ap_configuration)
-		max_domain_id =
-			ap_max_domain_id ? ap_max_domain_id : AP_DOMAINS - 1;
-	else
-		max_domain_id = 15;
-	if (ap_domain_index < -1 || ap_domain_index > max_domain_id ||
+	/* check default domain setting */
+	if (ap_domain_index < -1 || ap_domain_index > ap_max_domain_id ||
 	    (ap_domain_index >= 0 &&
 	     !test_bit_inv(ap_domain_index, ap_perms.aqm))) {
 		pr_warn("%d is not a valid cryptographic domain\n",
 			ap_domain_index);
 		ap_domain_index = -1;
 	}
-	/* In resume callback we need to know if the user had set the domain.
-	 * If so, we can not just reset it.
-	 */
-	if (ap_domain_index >= 0)
-		user_set_domain = 1;
 
+	/* enable interrupts if available */
 	if (ap_interrupts_available()) {
 		rc = register_adapter_interrupt(&ap_airq);
-		ap_airq_flag = (rc == 0);
+		ap_irq_flag = (rc == 0);
 	}
 
 	/* Create /sys/bus/ap. */
@@ -1677,7 +1706,6 @@
 	 */
 	if (MACHINE_IS_VM)
 		poll_timeout = 1500000;
-	spin_lock_init(&ap_poll_timer_lock);
 	hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 	ap_poll_timer.function = ap_poll_timeout;
 
@@ -1688,17 +1716,10 @@
 			goto out_work;
 	}
 
-	rc = register_pm_notifier(&ap_power_notifier);
-	if (rc)
-		goto out_pm;
-
 	queue_work(system_long_wq, &ap_scan_work);
-	initialised = true;
 
 	return 0;
 
-out_pm:
-	ap_poll_thread_stop();
 out_work:
 	hrtimer_cancel(&ap_poll_timer);
 	root_device_unregister(ap_root_device);
@@ -1707,9 +1728,9 @@
 		bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
 	bus_unregister(&ap_bus_type);
 out:
-	if (ap_using_interrupts())
+	if (ap_irq_flag)
 		unregister_adapter_interrupt(&ap_airq);
-	kfree(ap_configuration);
+	kfree(ap_qci_info);
 	return rc;
 }
 device_initcall(ap_module_init);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 4348fdf..ccdbd95 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -15,6 +15,7 @@
 
 #include <linux/device.h>
 #include <linux/types.h>
+#include <linux/hashtable.h>
 #include <asm/isc.h>
 #include <asm/ap.h>
 
@@ -27,8 +28,8 @@
 
 extern int ap_domain_index;
 
-extern spinlock_t ap_list_lock;
-extern struct list_head ap_card_list;
+extern DECLARE_HASHTABLE(ap_queues, 8);
+extern spinlock_t ap_queues_lock;
 
 static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
 {
@@ -49,6 +50,7 @@
 #define AP_RESPONSE_NO_FIRST_PART	0x13
 #define AP_RESPONSE_MESSAGE_TOO_BIG	0x15
 #define AP_RESPONSE_REQ_FAC_NOT_INST	0x16
+#define AP_RESPONSE_INVALID_DOMAIN	0x42
 
 /*
  * Known device types
@@ -76,46 +78,47 @@
 #define AP_FUNC_APXA  6
 
 /*
- * AP interrupt states
+ * AP queue state machine states
  */
-#define AP_INTR_DISABLED	0	/* AP interrupt disabled */
-#define AP_INTR_ENABLED		1	/* AP interrupt enabled */
-
-/*
- * AP device states
- */
-enum ap_state {
-	AP_STATE_RESET_START,
-	AP_STATE_RESET_WAIT,
-	AP_STATE_SETIRQ_WAIT,
-	AP_STATE_IDLE,
-	AP_STATE_WORKING,
-	AP_STATE_QUEUE_FULL,
-	AP_STATE_SUSPEND_WAIT,
-	AP_STATE_REMOVE,	/* about to be removed from driver */
-	AP_STATE_UNBOUND,	/* momentary not bound to a driver */
-	AP_STATE_BORKED,	/* broken */
-	NR_AP_STATES
+enum ap_sm_state {
+	AP_SM_STATE_RESET_START = 0,
+	AP_SM_STATE_RESET_WAIT,
+	AP_SM_STATE_SETIRQ_WAIT,
+	AP_SM_STATE_IDLE,
+	AP_SM_STATE_WORKING,
+	AP_SM_STATE_QUEUE_FULL,
+	NR_AP_SM_STATES
 };
 
 /*
- * AP device events
+ * AP queue state machine events
  */
-enum ap_event {
-	AP_EVENT_POLL,
-	AP_EVENT_TIMEOUT,
-	NR_AP_EVENTS
+enum ap_sm_event {
+	AP_SM_EVENT_POLL,
+	AP_SM_EVENT_TIMEOUT,
+	NR_AP_SM_EVENTS
 };
 
 /*
- * AP wait behaviour
+ * AP queue state wait behaviour
  */
-enum ap_wait {
-	AP_WAIT_AGAIN,		/* retry immediately */
-	AP_WAIT_TIMEOUT,	/* wait for timeout */
-	AP_WAIT_INTERRUPT,	/* wait for thin interrupt (if available) */
-	AP_WAIT_NONE,		/* no wait */
-	NR_AP_WAIT
+enum ap_sm_wait {
+	AP_SM_WAIT_AGAIN = 0,	/* retry immediately */
+	AP_SM_WAIT_TIMEOUT,	/* wait for timeout */
+	AP_SM_WAIT_INTERRUPT,	/* wait for thin interrupt (if available) */
+	AP_SM_WAIT_NONE,	/* no wait */
+	NR_AP_SM_WAIT
+};
+
+/*
+ * AP queue device states
+ */
+enum ap_dev_state {
+	AP_DEV_STATE_UNINITIATED = 0,	/* fresh and virgin, not touched */
+	AP_DEV_STATE_OPERATING,		/* queue dev is working normal */
+	AP_DEV_STATE_SHUTDOWN,		/* remove/unbind/shutdown in progress */
+	AP_DEV_STATE_ERROR,		/* device is in error state */
+	NR_AP_DEV_STATES
 };
 
 struct ap_device;
@@ -136,8 +139,6 @@
 
 	int (*probe)(struct ap_device *);
 	void (*remove)(struct ap_device *);
-	void (*suspend)(struct ap_device *);
-	void (*resume)(struct ap_device *);
 };
 
 #define to_ap_drv(x) container_of((x), struct ap_driver, driver)
@@ -155,13 +156,12 @@
 
 struct ap_card {
 	struct ap_device ap_dev;
-	struct list_head list;		/* Private list of AP cards. */
-	struct list_head queues;	/* List of assoc. AP queues */
 	void *private;			/* ap driver private pointer. */
 	int raw_hwtype;			/* AP raw hardware type. */
 	unsigned int functions;		/* AP device function bitfield. */
 	int queue_depth;		/* AP queue depth.*/
 	int id;				/* AP card number. */
+	bool config;			/* configured state */
 	atomic64_t total_request_count;	/* # requests ever for this AP device.*/
 };
 
@@ -169,14 +169,15 @@
 
 struct ap_queue {
 	struct ap_device ap_dev;
-	struct list_head list;		/* Private list of AP queues. */
+	struct hlist_node hnode;	/* Node for the ap_queues hashtable */
 	struct ap_card *card;		/* Ptr to assoc. AP card. */
 	spinlock_t lock;		/* Per device lock. */
 	void *private;			/* ap driver private pointer. */
+	enum ap_dev_state dev_state;	/* queue device state */
+	bool config;			/* configured state */
 	ap_qid_t qid;			/* AP queue id. */
-	int interrupt;			/* indicate if interrupts are enabled */
+	bool interrupt;			/* indicate if interrupts are enabled */
 	int queue_count;		/* # messages currently on AP queue. */
-	enum ap_state state;		/* State of the AP device. */
 	int pendingq_count;		/* # requests on pendingq list. */
 	int requestq_count;		/* # requests on requestq list. */
 	u64 total_request_count;	/* # requests ever for this AP device.*/
@@ -185,26 +186,54 @@
 	struct list_head pendingq;	/* List of message sent to AP queue. */
 	struct list_head requestq;	/* List of message yet to be sent. */
 	struct ap_message *reply;	/* Per device reply message. */
+	enum ap_sm_state sm_state;	/* ap queue state machine state */
+	int last_err_rc;		/* last error state response code */
 };
 
 #define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device)
 
-typedef enum ap_wait (ap_func_t)(struct ap_queue *queue);
+typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue);
+
+/* failure injection cmd struct */
+struct ap_fi {
+	union {
+		u16 cmd;		/* fi flags + action */
+		struct {
+			u8 flags;	/* fi flags only */
+			u8 action;	/* fi action only */
+		};
+	};
+};
+
+/* all currently known fi actions */
+enum ap_fi_actions {
+	AP_FI_ACTION_CCA_AGENT_FF   = 0x01,
+	AP_FI_ACTION_CCA_DOM_INVAL  = 0x02,
+	AP_FI_ACTION_NQAP_QID_INVAL = 0x03,
+};
+
+/* all currently known fi flags */
+enum ap_fi_flags {
+	AP_FI_FLAG_NO_RETRY	  = 0x01,
+	AP_FI_FLAG_TOGGLE_SPECIAL = 0x02,
+};
 
 struct ap_message {
 	struct list_head list;		/* Request queueing. */
 	unsigned long long psmid;	/* Message id. */
-	void *message;			/* Pointer to message buffer. */
-	size_t length;			/* Message length. */
+	void *msg;			/* Pointer to message buffer. */
+	unsigned int len;		/* Message length. */
+	u16 flags;			/* Flags, see AP_MSG_FLAG_xxx */
+	struct ap_fi fi;		/* Failure Injection cmd */
 	int rc;				/* Return code for this message */
-
 	void *private;			/* ap driver private pointer. */
-	unsigned int special:1;		/* Used for special commands. */
 	/* receive is called from tasklet context */
 	void (*receive)(struct ap_queue *, struct ap_message *,
 			struct ap_message *);
 };
 
+#define AP_MSG_FLAG_SPECIAL  1		/* flag msg as 'special' with NQAP */
+
 /**
  * ap_init_message() - Initialize ap_message.
  * Initialize a message before using. Otherwise this might result in
@@ -222,16 +251,10 @@
  */
 static inline void ap_release_message(struct ap_message *ap_msg)
 {
-	kzfree(ap_msg->message);
-	kzfree(ap_msg->private);
+	kfree_sensitive(ap_msg->msg);
+	kfree_sensitive(ap_msg->private);
 }
 
-#define for_each_ap_card(_ac) \
-	list_for_each_entry(_ac, &ap_card_list, list)
-
-#define for_each_ap_queue(_aq, _ac) \
-	list_for_each_entry(_aq, &(_ac)->queues, list)
-
 /*
  * Note: don't use ap_send/ap_recv after using ap_queue_message
  * for the first time. Otherwise the ap message queue will get
@@ -240,15 +263,15 @@
 int ap_send(ap_qid_t, unsigned long long, void *, size_t);
 int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
 
-enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event);
-enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event);
+enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event);
+enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event);
 
-void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
+int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
 void ap_flush_queue(struct ap_queue *aq);
 
 void *ap_airq_ptr(void);
-void ap_wait(enum ap_wait wait);
+void ap_wait(enum ap_sm_wait wait);
 void ap_request_timeout(struct timer_list *t);
 void ap_bus_force_rescan(void);
 
@@ -259,8 +282,6 @@
 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
 void ap_queue_prepare_remove(struct ap_queue *aq);
 void ap_queue_remove(struct ap_queue *aq);
-void ap_queue_suspend(struct ap_device *ap_dev);
-void ap_queue_resume(struct ap_device *ap_dev);
 void ap_queue_init_state(struct ap_queue *aq);
 
 struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
@@ -275,6 +296,16 @@
 extern struct mutex ap_perms_mutex;
 
 /*
+ * Get ap_queue device for this qid.
+ * Returns ptr to the struct ap_queue device or NULL if there
+ * was no ap_queue device with this qid found. When something is
+ * found, the reference count of the embedded device is increased.
+ * So the caller has to decrease the reference count after use
+ * with a call to put_device(&aq->ap_dev.device).
+ */
+struct ap_queue *ap_get_qdev(ap_qid_t qid);
+
+/*
  * check APQN for owned/reserved by ap bus and default driver(s).
  * Checks if this APQN is or will be in use by the ap bus
  * and the default set of drivers.
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index e85bfca..d98bdd2 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <asm/facility.h>
+#include <asm/sclp.h>
 
 #include "ap_bus.h"
 
@@ -23,7 +24,7 @@
 {
 	struct ap_card *ac = to_ap_card(dev);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
 }
 
 static DEVICE_ATTR_RO(hwtype);
@@ -33,7 +34,7 @@
 {
 	struct ap_card *ac = to_ap_card(dev);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
 }
 
 static DEVICE_ATTR_RO(raw_hwtype);
@@ -43,7 +44,7 @@
 {
 	struct ap_card *ac = to_ap_card(dev);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
 }
 
 static DEVICE_ATTR_RO(depth);
@@ -53,7 +54,7 @@
 {
 	struct ap_card *ac = to_ap_card(dev);
 
-	return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
+	return scnprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
 }
 
 static DEVICE_ATTR_RO(ap_functions);
@@ -66,23 +67,25 @@
 	u64 req_cnt;
 
 	req_cnt = 0;
-	spin_lock_bh(&ap_list_lock);
+	spin_lock_bh(&ap_queues_lock);
 	req_cnt = atomic64_read(&ac->total_request_count);
-	spin_unlock_bh(&ap_list_lock);
-	return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+	spin_unlock_bh(&ap_queues_lock);
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
 }
 
 static ssize_t request_count_store(struct device *dev,
 				   struct device_attribute *attr,
 				   const char *buf, size_t count)
 {
-	struct ap_card *ac = to_ap_card(dev);
+	int bkt;
 	struct ap_queue *aq;
+	struct ap_card *ac = to_ap_card(dev);
 
-	spin_lock_bh(&ap_list_lock);
-	for_each_ap_queue(aq, ac)
-		aq->total_request_count = 0;
-	spin_unlock_bh(&ap_list_lock);
+	spin_lock_bh(&ap_queues_lock);
+	hash_for_each(ap_queues, bkt, aq, hnode)
+		if (ac == aq->card)
+			aq->total_request_count = 0;
+	spin_unlock_bh(&ap_queues_lock);
 	atomic64_set(&ac->total_request_count, 0);
 
 	return count;
@@ -93,16 +96,18 @@
 static ssize_t requestq_count_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
-	struct ap_card *ac = to_ap_card(dev);
+	int bkt;
 	struct ap_queue *aq;
 	unsigned int reqq_cnt;
+	struct ap_card *ac = to_ap_card(dev);
 
 	reqq_cnt = 0;
-	spin_lock_bh(&ap_list_lock);
-	for_each_ap_queue(aq, ac)
-		reqq_cnt += aq->requestq_count;
-	spin_unlock_bh(&ap_list_lock);
-	return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+	spin_lock_bh(&ap_queues_lock);
+	hash_for_each(ap_queues, bkt, aq, hnode)
+		if (ac == aq->card)
+			reqq_cnt += aq->requestq_count;
+	spin_unlock_bh(&ap_queues_lock);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
 }
 
 static DEVICE_ATTR_RO(requestq_count);
@@ -110,16 +115,18 @@
 static ssize_t pendingq_count_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
-	struct ap_card *ac = to_ap_card(dev);
+	int bkt;
 	struct ap_queue *aq;
 	unsigned int penq_cnt;
+	struct ap_card *ac = to_ap_card(dev);
 
 	penq_cnt = 0;
-	spin_lock_bh(&ap_list_lock);
-	for_each_ap_queue(aq, ac)
-		penq_cnt += aq->pendingq_count;
-	spin_unlock_bh(&ap_list_lock);
-	return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+	spin_lock_bh(&ap_queues_lock);
+	hash_for_each(ap_queues, bkt, aq, hnode)
+		if (ac == aq->card)
+			penq_cnt += aq->pendingq_count;
+	spin_unlock_bh(&ap_queues_lock);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
 }
 
 static DEVICE_ATTR_RO(pendingq_count);
@@ -127,11 +134,44 @@
 static ssize_t modalias_show(struct device *dev,
 			     struct device_attribute *attr, char *buf)
 {
-	return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
+	return scnprintf(buf, PAGE_SIZE, "ap:t%02X\n",
+			 to_ap_dev(dev)->device_type);
 }
 
 static DEVICE_ATTR_RO(modalias);
 
+static ssize_t config_show(struct device *dev,
+			   struct device_attribute *attr, char *buf)
+{
+	struct ap_card *ac = to_ap_card(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ac->config ? 1 : 0);
+}
+
+static ssize_t config_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	int rc = 0, cfg;
+	struct ap_card *ac = to_ap_card(dev);
+
+	if (sscanf(buf, "%d\n", &cfg) != 1 || cfg < 0 || cfg > 1)
+		return -EINVAL;
+
+	if (cfg && !ac->config)
+		rc = sclp_ap_configure(ac->id);
+	else if (!cfg && ac->config)
+		rc = sclp_ap_deconfigure(ac->id);
+	if (rc)
+		return rc;
+
+	ac->config = cfg ? true : false;
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(config);
+
 static struct attribute *ap_card_dev_attrs[] = {
 	&dev_attr_hwtype.attr,
 	&dev_attr_raw_hwtype.attr,
@@ -141,6 +181,7 @@
 	&dev_attr_requestq_count.attr,
 	&dev_attr_pendingq_count.attr,
 	&dev_attr_modalias.attr,
+	&dev_attr_config.attr,
 	NULL
 };
 
@@ -162,11 +203,6 @@
 {
 	struct ap_card *ac = to_ap_card(dev);
 
-	if (!list_empty(&ac->list)) {
-		spin_lock_bh(&ap_list_lock);
-		list_del_init(&ac->list);
-		spin_unlock_bh(&ap_list_lock);
-	}
 	kfree(ac);
 }
 
@@ -178,8 +214,6 @@
 	ac = kzalloc(sizeof(*ac), GFP_KERNEL);
 	if (!ac)
 		return NULL;
-	INIT_LIST_HEAD(&ac->list);
-	INIT_LIST_HEAD(&ac->queues);
 	ac->ap_dev.device.release = ap_card_device_release;
 	ac->ap_dev.device.type = &ap_card_type;
 	ac->ap_dev.device_type = comp_type;
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
index dc675eb..34b0350 100644
--- a/drivers/s390/crypto/ap_debug.h
+++ b/drivers/s390/crypto/ap_debug.h
@@ -20,6 +20,14 @@
 
 #define AP_DBF(...)					\
 	debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
+#define AP_DBF_ERR(...)					\
+	debug_sprintf_event(ap_dbf_info, DBF_ERR, ##__VA_ARGS__)
+#define AP_DBF_WARN(...)					\
+	debug_sprintf_event(ap_dbf_info, DBF_WARN, ##__VA_ARGS__)
+#define AP_DBF_INFO(...)					\
+	debug_sprintf_event(ap_dbf_info, DBF_INFO, ##__VA_ARGS__)
+#define AP_DBF_DBG(...)					\
+	debug_sprintf_event(ap_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
 
 extern debug_info_t *ap_dbf_info;
 
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index a317ab4..ff0018f 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -19,7 +19,7 @@
 static void __ap_flush_queue(struct ap_queue *aq);
 
 /**
- * ap_queue_enable_interruption(): Enable interruption on an AP queue.
+ * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
  * @qid: The AP queue number
  * @ind: the notification indicator byte
  *
@@ -27,7 +27,7 @@
  * value it waits a while and tests the AP queue if interrupts
  * have been switched on using ap_test_queue().
  */
-static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
+static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
 {
 	struct ap_queue_status status;
 	struct ap_qirq_ctrl qirqctrl = { 0 };
@@ -69,9 +69,9 @@
  */
 static inline struct ap_queue_status
 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
-	  unsigned int special)
+	  int special)
 {
-	if (special == 1)
+	if (special)
 		qid |= 0x400000UL;
 	return ap_nqap(qid, psmid, msg, length);
 }
@@ -119,9 +119,9 @@
 
 /* State machine definitions and helpers */
 
-static enum ap_wait ap_sm_nop(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
 {
-	return AP_WAIT_NONE;
+	return AP_SM_WAIT_NONE;
 }
 
 /**
@@ -129,18 +129,21 @@
  *	not change the state of the device.
  * @aq: pointer to the AP queue
  *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
  */
 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
 {
 	struct ap_queue_status status;
 	struct ap_message *ap_msg;
+	bool found = false;
 
 	status = ap_dqap(aq->qid, &aq->reply->psmid,
-			 aq->reply->message, aq->reply->length);
+			 aq->reply->msg, aq->reply->len);
 	switch (status.response_code) {
 	case AP_RESPONSE_NORMAL:
-		aq->queue_count--;
+		aq->queue_count = max_t(int, 0, aq->queue_count - 1);
+		if (!status.queue_empty && !aq->queue_count)
+			aq->queue_count++;
 		if (aq->queue_count > 0)
 			mod_timer(&aq->timeout,
 				  jiffies + aq->request_timeout);
@@ -150,9 +153,15 @@
 			list_del_init(&ap_msg->list);
 			aq->pendingq_count--;
 			ap_msg->receive(aq, ap_msg, aq->reply);
+			found = true;
 			break;
 		}
-		/* fall through */
+		if (!found) {
+			AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
+				    __func__, aq->reply->psmid,
+				    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+		}
+		fallthrough;
 	case AP_RESPONSE_NO_PENDING_REPLY:
 		if (!status.queue_empty || aq->queue_count <= 0)
 			break;
@@ -172,56 +181,36 @@
  * ap_sm_read(): Receive pending reply messages from an AP queue.
  * @aq: pointer to the AP queue
  *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
  */
-static enum ap_wait ap_sm_read(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
 {
 	struct ap_queue_status status;
 
 	if (!aq->reply)
-		return AP_WAIT_NONE;
+		return AP_SM_WAIT_NONE;
 	status = ap_sm_recv(aq);
 	switch (status.response_code) {
 	case AP_RESPONSE_NORMAL:
 		if (aq->queue_count > 0) {
-			aq->state = AP_STATE_WORKING;
-			return AP_WAIT_AGAIN;
+			aq->sm_state = AP_SM_STATE_WORKING;
+			return AP_SM_WAIT_AGAIN;
 		}
-		aq->state = AP_STATE_IDLE;
-		return AP_WAIT_NONE;
+		aq->sm_state = AP_SM_STATE_IDLE;
+		return AP_SM_WAIT_NONE;
 	case AP_RESPONSE_NO_PENDING_REPLY:
 		if (aq->queue_count > 0)
-			return AP_WAIT_INTERRUPT;
-		aq->state = AP_STATE_IDLE;
-		return AP_WAIT_NONE;
+			return aq->interrupt ?
+				AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
+		aq->sm_state = AP_SM_STATE_IDLE;
+		return AP_SM_WAIT_NONE;
 	default:
-		aq->state = AP_STATE_BORKED;
-		return AP_WAIT_NONE;
-	}
-}
-
-/**
- * ap_sm_suspend_read(): Receive pending reply messages from an AP queue
- * without changing the device state in between. In suspend mode we don't
- * allow sending new requests, therefore just fetch pending replies.
- * @aq: pointer to the AP queue
- *
- * Returns AP_WAIT_NONE or AP_WAIT_AGAIN
- */
-static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq)
-{
-	struct ap_queue_status status;
-
-	if (!aq->reply)
-		return AP_WAIT_NONE;
-	status = ap_sm_recv(aq);
-	switch (status.response_code) {
-	case AP_RESPONSE_NORMAL:
-		if (aq->queue_count > 0)
-			return AP_WAIT_AGAIN;
-		/* fall through */
-	default:
-		return AP_WAIT_NONE;
+		aq->dev_state = AP_DEV_STATE_ERROR;
+		aq->last_err_rc = status.response_code;
+		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+			    __func__, status.response_code,
+			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+		return AP_SM_WAIT_NONE;
 	}
 }
 
@@ -229,48 +218,65 @@
  * ap_sm_write(): Send messages from the request queue to an AP queue.
  * @aq: pointer to the AP queue
  *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
  */
-static enum ap_wait ap_sm_write(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
 {
 	struct ap_queue_status status;
 	struct ap_message *ap_msg;
+	ap_qid_t qid = aq->qid;
 
 	if (aq->requestq_count <= 0)
-		return AP_WAIT_NONE;
+		return AP_SM_WAIT_NONE;
 	/* Start the next request on the queue. */
 	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
-	status = __ap_send(aq->qid, ap_msg->psmid,
-			   ap_msg->message, ap_msg->length, ap_msg->special);
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) {
+		AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n",
+			    __func__, ap_msg->fi.cmd);
+		qid = 0xFF00;
+	}
+#endif
+	status = __ap_send(qid, ap_msg->psmid,
+			   ap_msg->msg, ap_msg->len,
+			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
 	switch (status.response_code) {
 	case AP_RESPONSE_NORMAL:
-		aq->queue_count++;
+		aq->queue_count = max_t(int, 1, aq->queue_count + 1);
 		if (aq->queue_count == 1)
 			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
 		list_move_tail(&ap_msg->list, &aq->pendingq);
 		aq->requestq_count--;
 		aq->pendingq_count++;
 		if (aq->queue_count < aq->card->queue_depth) {
-			aq->state = AP_STATE_WORKING;
-			return AP_WAIT_AGAIN;
+			aq->sm_state = AP_SM_STATE_WORKING;
+			return AP_SM_WAIT_AGAIN;
 		}
-		/* fall through */
+		fallthrough;
 	case AP_RESPONSE_Q_FULL:
-		aq->state = AP_STATE_QUEUE_FULL;
-		return AP_WAIT_INTERRUPT;
+		aq->sm_state = AP_SM_STATE_QUEUE_FULL;
+		return aq->interrupt ?
+			AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
 	case AP_RESPONSE_RESET_IN_PROGRESS:
-		aq->state = AP_STATE_RESET_WAIT;
-		return AP_WAIT_TIMEOUT;
+		aq->sm_state = AP_SM_STATE_RESET_WAIT;
+		return AP_SM_WAIT_TIMEOUT;
+	case AP_RESPONSE_INVALID_DOMAIN:
+		AP_DBF(DBF_WARN, "AP_RESPONSE_INVALID_DOMAIN on NQAP\n");
+		fallthrough;
 	case AP_RESPONSE_MESSAGE_TOO_BIG:
 	case AP_RESPONSE_REQ_FAC_NOT_INST:
 		list_del_init(&ap_msg->list);
 		aq->requestq_count--;
 		ap_msg->rc = -EINVAL;
 		ap_msg->receive(aq, ap_msg, NULL);
-		return AP_WAIT_AGAIN;
+		return AP_SM_WAIT_AGAIN;
 	default:
-		aq->state = AP_STATE_BORKED;
-		return AP_WAIT_NONE;
+		aq->dev_state = AP_DEV_STATE_ERROR;
+		aq->last_err_rc = status.response_code;
+		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+			    __func__, status.response_code,
+			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+		return AP_SM_WAIT_NONE;
 	}
 }
 
@@ -278,9 +284,9 @@
  * ap_sm_read_write(): Send and receive messages to/from an AP queue.
  * @aq: pointer to the AP queue
  *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
  */
-static enum ap_wait ap_sm_read_write(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
 {
 	return min(ap_sm_read(aq), ap_sm_write(aq));
 }
@@ -291,7 +297,7 @@
  *
  * Submit the Reset command to an AP queue.
  */
-static enum ap_wait ap_sm_reset(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
 {
 	struct ap_queue_status status;
 
@@ -299,17 +305,16 @@
 	switch (status.response_code) {
 	case AP_RESPONSE_NORMAL:
 	case AP_RESPONSE_RESET_IN_PROGRESS:
-		aq->state = AP_STATE_RESET_WAIT;
-		aq->interrupt = AP_INTR_DISABLED;
-		return AP_WAIT_TIMEOUT;
-	case AP_RESPONSE_BUSY:
-		return AP_WAIT_TIMEOUT;
-	case AP_RESPONSE_Q_NOT_AVAIL:
-	case AP_RESPONSE_DECONFIGURED:
-	case AP_RESPONSE_CHECKSTOPPED:
+		aq->sm_state = AP_SM_STATE_RESET_WAIT;
+		aq->interrupt = false;
+		return AP_SM_WAIT_TIMEOUT;
 	default:
-		aq->state = AP_STATE_BORKED;
-		return AP_WAIT_NONE;
+		aq->dev_state = AP_DEV_STATE_ERROR;
+		aq->last_err_rc = status.response_code;
+		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+			    __func__, status.response_code,
+			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+		return AP_SM_WAIT_NONE;
 	}
 }
 
@@ -319,7 +324,7 @@
  *
  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
  */
-static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
 {
 	struct ap_queue_status status;
 	void *lsi_ptr;
@@ -334,21 +339,25 @@
 	switch (status.response_code) {
 	case AP_RESPONSE_NORMAL:
 		lsi_ptr = ap_airq_ptr();
-		if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
-			aq->state = AP_STATE_SETIRQ_WAIT;
+		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
+			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
 		else
-			aq->state = (aq->queue_count > 0) ?
-				AP_STATE_WORKING : AP_STATE_IDLE;
-		return AP_WAIT_AGAIN;
+			aq->sm_state = (aq->queue_count > 0) ?
+				AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
+		return AP_SM_WAIT_AGAIN;
 	case AP_RESPONSE_BUSY:
 	case AP_RESPONSE_RESET_IN_PROGRESS:
-		return AP_WAIT_TIMEOUT;
+		return AP_SM_WAIT_TIMEOUT;
 	case AP_RESPONSE_Q_NOT_AVAIL:
 	case AP_RESPONSE_DECONFIGURED:
 	case AP_RESPONSE_CHECKSTOPPED:
 	default:
-		aq->state = AP_STATE_BORKED;
-		return AP_WAIT_NONE;
+		aq->dev_state = AP_DEV_STATE_ERROR;
+		aq->last_err_rc = status.response_code;
+		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+			    __func__, status.response_code,
+			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+		return AP_SM_WAIT_NONE;
 	}
 }
 
@@ -358,7 +367,7 @@
  *
  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
  */
-static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
 {
 	struct ap_queue_status status;
 
@@ -371,107 +380,76 @@
 
 	if (status.irq_enabled == 1) {
 		/* Irqs are now enabled */
-		aq->interrupt = AP_INTR_ENABLED;
-		aq->state = (aq->queue_count > 0) ?
-			AP_STATE_WORKING : AP_STATE_IDLE;
+		aq->interrupt = true;
+		aq->sm_state = (aq->queue_count > 0) ?
+			AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
 	}
 
 	switch (status.response_code) {
 	case AP_RESPONSE_NORMAL:
 		if (aq->queue_count > 0)
-			return AP_WAIT_AGAIN;
-		/* fallthrough */
+			return AP_SM_WAIT_AGAIN;
+		fallthrough;
 	case AP_RESPONSE_NO_PENDING_REPLY:
-		return AP_WAIT_TIMEOUT;
+		return AP_SM_WAIT_TIMEOUT;
 	default:
-		aq->state = AP_STATE_BORKED;
-		return AP_WAIT_NONE;
+		aq->dev_state = AP_DEV_STATE_ERROR;
+		aq->last_err_rc = status.response_code;
+		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+			    __func__, status.response_code,
+			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+		return AP_SM_WAIT_NONE;
 	}
 }
 
 /*
  * AP state machine jump table
  */
-static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
-	[AP_STATE_RESET_START] = {
-		[AP_EVENT_POLL] = ap_sm_reset,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
+	[AP_SM_STATE_RESET_START] = {
+		[AP_SM_EVENT_POLL] = ap_sm_reset,
+		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
 	},
-	[AP_STATE_RESET_WAIT] = {
-		[AP_EVENT_POLL] = ap_sm_reset_wait,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	[AP_SM_STATE_RESET_WAIT] = {
+		[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
+		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
 	},
-	[AP_STATE_SETIRQ_WAIT] = {
-		[AP_EVENT_POLL] = ap_sm_setirq_wait,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	[AP_SM_STATE_SETIRQ_WAIT] = {
+		[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
+		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
 	},
-	[AP_STATE_IDLE] = {
-		[AP_EVENT_POLL] = ap_sm_write,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	[AP_SM_STATE_IDLE] = {
+		[AP_SM_EVENT_POLL] = ap_sm_write,
+		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
 	},
-	[AP_STATE_WORKING] = {
-		[AP_EVENT_POLL] = ap_sm_read_write,
-		[AP_EVENT_TIMEOUT] = ap_sm_reset,
+	[AP_SM_STATE_WORKING] = {
+		[AP_SM_EVENT_POLL] = ap_sm_read_write,
+		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
 	},
-	[AP_STATE_QUEUE_FULL] = {
-		[AP_EVENT_POLL] = ap_sm_read,
-		[AP_EVENT_TIMEOUT] = ap_sm_reset,
-	},
-	[AP_STATE_SUSPEND_WAIT] = {
-		[AP_EVENT_POLL] = ap_sm_suspend_read,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
-	},
-	[AP_STATE_REMOVE] = {
-		[AP_EVENT_POLL] = ap_sm_nop,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
-	},
-	[AP_STATE_UNBOUND] = {
-		[AP_EVENT_POLL] = ap_sm_nop,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
-	},
-	[AP_STATE_BORKED] = {
-		[AP_EVENT_POLL] = ap_sm_nop,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	[AP_SM_STATE_QUEUE_FULL] = {
+		[AP_SM_EVENT_POLL] = ap_sm_read,
+		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
 	},
 };
 
-enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event)
+enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
 {
-	return ap_jumptable[aq->state][event](aq);
+	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+		return ap_jumptable[aq->sm_state][event](aq);
+	else
+		return AP_SM_WAIT_NONE;
 }
 
-enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event)
+enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
 {
-	enum ap_wait wait;
+	enum ap_sm_wait wait;
 
-	while ((wait = ap_sm_event(aq, event)) == AP_WAIT_AGAIN)
+	while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
 		;
 	return wait;
 }
 
 /*
- * Power management for queue devices
- */
-void ap_queue_suspend(struct ap_device *ap_dev)
-{
-	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
-
-	/* Poll on the device until all requests are finished. */
-	spin_lock_bh(&aq->lock);
-	aq->state = AP_STATE_SUSPEND_WAIT;
-	while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE)
-		;
-	aq->state = AP_STATE_BORKED;
-	spin_unlock_bh(&aq->lock);
-}
-EXPORT_SYMBOL(ap_queue_suspend);
-
-void ap_queue_resume(struct ap_device *ap_dev)
-{
-}
-EXPORT_SYMBOL(ap_queue_resume);
-
-/*
  * AP queue related attributes.
  */
 static ssize_t request_count_show(struct device *dev,
@@ -479,12 +457,20 @@
 				  char *buf)
 {
 	struct ap_queue *aq = to_ap_queue(dev);
+	bool valid = false;
 	u64 req_cnt;
 
 	spin_lock_bh(&aq->lock);
-	req_cnt = aq->total_request_count;
+	if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+		req_cnt = aq->total_request_count;
+		valid = true;
+	}
 	spin_unlock_bh(&aq->lock);
-	return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+
+	if (valid)
+		return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+	else
+		return scnprintf(buf, PAGE_SIZE, "-\n");
 }
 
 static ssize_t request_count_store(struct device *dev,
@@ -509,9 +495,10 @@
 	unsigned int reqq_cnt = 0;
 
 	spin_lock_bh(&aq->lock);
-	reqq_cnt = aq->requestq_count;
+	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+		reqq_cnt = aq->requestq_count;
 	spin_unlock_bh(&aq->lock);
-	return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
 }
 
 static DEVICE_ATTR_RO(requestq_count);
@@ -523,9 +510,10 @@
 	unsigned int penq_cnt = 0;
 
 	spin_lock_bh(&aq->lock);
-	penq_cnt = aq->pendingq_count;
+	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+		penq_cnt = aq->pendingq_count;
 	spin_unlock_bh(&aq->lock);
-	return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
 }
 
 static DEVICE_ATTR_RO(pendingq_count);
@@ -537,17 +525,17 @@
 	int rc = 0;
 
 	spin_lock_bh(&aq->lock);
-	switch (aq->state) {
-	case AP_STATE_RESET_START:
-	case AP_STATE_RESET_WAIT:
-		rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
+	switch (aq->sm_state) {
+	case AP_SM_STATE_RESET_START:
+	case AP_SM_STATE_RESET_WAIT:
+		rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n");
 		break;
-	case AP_STATE_WORKING:
-	case AP_STATE_QUEUE_FULL:
-		rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
+	case AP_SM_STATE_WORKING:
+	case AP_SM_STATE_QUEUE_FULL:
+		rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
 		break;
 	default:
-		rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
+		rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
 	}
 	spin_unlock_bh(&aq->lock);
 	return rc;
@@ -561,8 +549,8 @@
 
 	spin_lock_bh(&aq->lock);
 	__ap_flush_queue(aq);
-	aq->state = AP_STATE_RESET_START;
-	ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+	aq->sm_state = AP_SM_STATE_RESET_START;
+	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
 	spin_unlock_bh(&aq->lock);
 
 	AP_DBF(DBF_INFO, "reset queue=%02x.%04x triggered by user\n",
@@ -580,24 +568,150 @@
 	int rc = 0;
 
 	spin_lock_bh(&aq->lock);
-	if (aq->state == AP_STATE_SETIRQ_WAIT)
-		rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
-	else if (aq->interrupt == AP_INTR_ENABLED)
-		rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
+	if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
+		rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
+	else if (aq->interrupt)
+		rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
 	else
-		rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
+		rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
 	spin_unlock_bh(&aq->lock);
 	return rc;
 }
 
 static DEVICE_ATTR_RO(interrupt);
 
+static ssize_t config_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+	int rc;
+
+	spin_lock_bh(&aq->lock);
+	rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->config ? 1 : 0);
+	spin_unlock_bh(&aq->lock);
+	return rc;
+}
+
+static DEVICE_ATTR_RO(config);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+static ssize_t states_show(struct device *dev,
+			   struct device_attribute *attr, char *buf)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+	int rc = 0;
+
+	spin_lock_bh(&aq->lock);
+	/* queue device state */
+	switch (aq->dev_state) {
+	case AP_DEV_STATE_UNINITIATED:
+		rc = scnprintf(buf, PAGE_SIZE, "UNINITIATED\n");
+		break;
+	case AP_DEV_STATE_OPERATING:
+		rc = scnprintf(buf, PAGE_SIZE, "OPERATING");
+		break;
+	case AP_DEV_STATE_SHUTDOWN:
+		rc = scnprintf(buf, PAGE_SIZE, "SHUTDOWN");
+		break;
+	case AP_DEV_STATE_ERROR:
+		rc = scnprintf(buf, PAGE_SIZE, "ERROR");
+		break;
+	default:
+		rc = scnprintf(buf, PAGE_SIZE, "UNKNOWN");
+	}
+	/* state machine state */
+	if (aq->dev_state) {
+		switch (aq->sm_state) {
+		case AP_SM_STATE_RESET_START:
+			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+					" [RESET_START]\n");
+			break;
+		case AP_SM_STATE_RESET_WAIT:
+			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+					" [RESET_WAIT]\n");
+			break;
+		case AP_SM_STATE_SETIRQ_WAIT:
+			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+					" [SETIRQ_WAIT]\n");
+			break;
+		case AP_SM_STATE_IDLE:
+			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+					" [IDLE]\n");
+			break;
+		case AP_SM_STATE_WORKING:
+			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+					" [WORKING]\n");
+			break;
+		case AP_SM_STATE_QUEUE_FULL:
+			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+					" [FULL]\n");
+			break;
+		default:
+			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+					" [UNKNOWN]\n");
+		}
+	}
+	spin_unlock_bh(&aq->lock);
+
+	return rc;
+}
+static DEVICE_ATTR_RO(states);
+
+static ssize_t last_err_rc_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+	int rc;
+
+	spin_lock_bh(&aq->lock);
+	rc = aq->last_err_rc;
+	spin_unlock_bh(&aq->lock);
+
+	switch (rc) {
+	case AP_RESPONSE_NORMAL:
+		return scnprintf(buf, PAGE_SIZE, "NORMAL\n");
+	case AP_RESPONSE_Q_NOT_AVAIL:
+		return scnprintf(buf, PAGE_SIZE, "Q_NOT_AVAIL\n");
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+		return scnprintf(buf, PAGE_SIZE, "RESET_IN_PROGRESS\n");
+	case AP_RESPONSE_DECONFIGURED:
+		return scnprintf(buf, PAGE_SIZE, "DECONFIGURED\n");
+	case AP_RESPONSE_CHECKSTOPPED:
+		return scnprintf(buf, PAGE_SIZE, "CHECKSTOPPED\n");
+	case AP_RESPONSE_BUSY:
+		return scnprintf(buf, PAGE_SIZE, "BUSY\n");
+	case AP_RESPONSE_INVALID_ADDRESS:
+		return scnprintf(buf, PAGE_SIZE, "INVALID_ADDRESS\n");
+	case AP_RESPONSE_OTHERWISE_CHANGED:
+		return scnprintf(buf, PAGE_SIZE, "OTHERWISE_CHANGED\n");
+	case AP_RESPONSE_Q_FULL:
+		return scnprintf(buf, PAGE_SIZE, "Q_FULL/NO_PENDING_REPLY\n");
+	case AP_RESPONSE_INDEX_TOO_BIG:
+		return scnprintf(buf, PAGE_SIZE, "INDEX_TOO_BIG\n");
+	case AP_RESPONSE_NO_FIRST_PART:
+		return scnprintf(buf, PAGE_SIZE, "NO_FIRST_PART\n");
+	case AP_RESPONSE_MESSAGE_TOO_BIG:
+		return scnprintf(buf, PAGE_SIZE, "MESSAGE_TOO_BIG\n");
+	case AP_RESPONSE_REQ_FAC_NOT_INST:
+		return scnprintf(buf, PAGE_SIZE, "REQ_FAC_NOT_INST\n");
+	default:
+		return scnprintf(buf, PAGE_SIZE, "response code %d\n", rc);
+	}
+}
+static DEVICE_ATTR_RO(last_err_rc);
+#endif
+
 static struct attribute *ap_queue_dev_attrs[] = {
 	&dev_attr_request_count.attr,
 	&dev_attr_requestq_count.attr,
 	&dev_attr_pendingq_count.attr,
 	&dev_attr_reset.attr,
 	&dev_attr_interrupt.attr,
+	&dev_attr_config.attr,
+#ifdef CONFIG_ZCRYPT_DEBUG
+	&dev_attr_states.attr,
+	&dev_attr_last_err_rc.attr,
+#endif
 	NULL
 };
 
@@ -619,11 +733,10 @@
 {
 	struct ap_queue *aq = to_ap_queue(dev);
 
-	if (!list_empty(&aq->list)) {
-		spin_lock_bh(&ap_list_lock);
-		list_del_init(&aq->list);
-		spin_unlock_bh(&ap_list_lock);
-	}
+	spin_lock_bh(&ap_queues_lock);
+	hash_del(&aq->hnode);
+	spin_unlock_bh(&ap_queues_lock);
+
 	kfree(aq);
 }
 
@@ -638,10 +751,8 @@
 	aq->ap_dev.device.type = &ap_queue_type;
 	aq->ap_dev.device_type = device_type;
 	aq->qid = qid;
-	aq->state = AP_STATE_UNBOUND;
-	aq->interrupt = AP_INTR_DISABLED;
+	aq->interrupt = false;
 	spin_lock_init(&aq->lock);
-	INIT_LIST_HEAD(&aq->list);
 	INIT_LIST_HEAD(&aq->pendingq);
 	INIT_LIST_HEAD(&aq->requestq);
 	timer_setup(&aq->timeout, ap_request_timeout, 0);
@@ -654,7 +765,7 @@
 	aq->reply = reply;
 
 	spin_lock_bh(&aq->lock);
-	ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
 	spin_unlock_bh(&aq->lock);
 }
 EXPORT_SYMBOL(ap_queue_init_reply);
@@ -664,22 +775,30 @@
  * @aq: The AP device to queue the message to
  * @ap_msg: The message that is to be added
  */
-void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
+int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
 {
-	/* For asynchronous message handling a valid receive-callback
-	 * is required.
-	 */
+	int rc = 0;
+
+	/* msg needs to have a valid receive-callback */
 	BUG_ON(!ap_msg->receive);
 
 	spin_lock_bh(&aq->lock);
-	/* Queue the message. */
-	list_add_tail(&ap_msg->list, &aq->requestq);
-	aq->requestq_count++;
-	aq->total_request_count++;
-	atomic64_inc(&aq->card->total_request_count);
+
+	/* only allow to queue new messages if device state is ok */
+	if (aq->dev_state == AP_DEV_STATE_OPERATING) {
+		list_add_tail(&ap_msg->list, &aq->requestq);
+		aq->requestq_count++;
+		aq->total_request_count++;
+		atomic64_inc(&aq->card->total_request_count);
+	} else
+		rc = -ENODEV;
+
 	/* Send/receive as many request from the queue as possible. */
-	ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
+	ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
+
 	spin_unlock_bh(&aq->lock);
+
+	return rc;
 }
 EXPORT_SYMBOL(ap_queue_message);
 
@@ -750,8 +869,8 @@
 	spin_lock_bh(&aq->lock);
 	/* flush queue */
 	__ap_flush_queue(aq);
-	/* set REMOVE state to prevent new messages are queued in */
-	aq->state = AP_STATE_REMOVE;
+	/* move queue device state to SHUTDOWN in progress */
+	aq->dev_state = AP_DEV_STATE_SHUTDOWN;
 	spin_unlock_bh(&aq->lock);
 	del_timer_sync(&aq->timeout);
 }
@@ -759,23 +878,23 @@
 void ap_queue_remove(struct ap_queue *aq)
 {
 	/*
-	 * all messages have been flushed and the state is
-	 * AP_STATE_REMOVE. Now reset with zero which also
-	 * clears the irq registration and move the state
-	 * to AP_STATE_UNBOUND to signal that this queue
-	 * is not used by any driver currently.
+	 * all messages have been flushed and the device state
+	 * is SHUTDOWN. Now reset with zero which also clears
+	 * the irq registration and move the device state
+	 * to the initial value AP_DEV_STATE_UNINITIATED.
 	 */
 	spin_lock_bh(&aq->lock);
 	ap_zapq(aq->qid);
-	aq->state = AP_STATE_UNBOUND;
+	aq->dev_state = AP_DEV_STATE_UNINITIATED;
 	spin_unlock_bh(&aq->lock);
 }
 
 void ap_queue_init_state(struct ap_queue *aq)
 {
 	spin_lock_bh(&aq->lock);
-	aq->state = AP_STATE_RESET_START;
-	ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+	aq->dev_state = AP_DEV_STATE_OPERATING;
+	aq->sm_state = AP_SM_STATE_RESET_START;
+	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
 	spin_unlock_bh(&aq->lock);
 }
 EXPORT_SYMBOL(ap_queue_init_state);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 0658aa5..dd84995 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -25,13 +25,15 @@
 
 #include "zcrypt_api.h"
 #include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("IBM Corporation");
 MODULE_DESCRIPTION("s390 protected key interface");
 
-#define KEYBLOBBUFSIZE 8192  /* key buffer size used for internal processing */
-#define MAXAPQNSINLIST 64    /* max 64 apqns within a apqn list */
+#define KEYBLOBBUFSIZE 8192	/* key buffer size used for internal processing */
+#define PROTKEYBLOBBUFSIZE 256	/* protected key buffer size used internal */
+#define MAXAPQNSINLIST 64	/* max 64 apqns within a apqn list */
 
 /*
  * debug feature data and functions
@@ -68,6 +70,17 @@
 	u8  protkey[MAXPROTKEYSIZE]; /* the protected key blob */
 } __packed;
 
+/* inside view of a clear key token (type 0x00 version 0x02) */
+struct clearaeskeytoken {
+	u8  type;	 /* 0x00 for PAES specific key tokens */
+	u8  res0[3];
+	u8  version;	 /* 0x02 for clear AES key token */
+	u8  res1[3];
+	u32 keytype;	 /* key type, one of the PKEY_KEYTYPE values */
+	u32 len;	 /* bytes actually stored in clearkey[] */
+	u8  clearkey[]; /* clear key value */
+} __packed;
+
 /*
  * Create a protected key from a clear key value.
  */
@@ -175,6 +188,73 @@
 }
 
 /*
+ * Construct EP11 key with given clear key value.
+ */
+static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
+			    u8 *keybuf, size_t *keybuflen)
+{
+	int i, rc;
+	u16 card, dom;
+	u32 nr_apqns, *apqns = NULL;
+
+	/* build a list of apqns suitable for ep11 keys with cpacf support */
+	rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+			    ZCRYPT_CEX7, EP11_API_V, NULL);
+	if (rc)
+		goto out;
+
+	/* go through the list of apqns and try to bild an ep11 key */
+	for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+		card = apqns[i] >> 16;
+		dom = apqns[i] & 0xFFFF;
+		rc = ep11_clr2keyblob(card, dom, clrkeylen * 8,
+				      0, clrkey, keybuf, keybuflen);
+		if (rc == 0)
+			break;
+	}
+
+out:
+	kfree(apqns);
+	if (rc)
+		DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/*
+ * Find card and transform EP11 secure key into protected key.
+ */
+static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey)
+{
+	int i, rc;
+	u16 card, dom;
+	u32 nr_apqns, *apqns = NULL;
+	struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+	/* build a list of apqns suitable for this key */
+	rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+			    ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+	if (rc)
+		goto out;
+
+	/* go through the list of apqns and try to derive an pkey */
+	for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+		card = apqns[i] >> 16;
+		dom = apqns[i] & 0xFFFF;
+		pkey->len = sizeof(pkey->protkey);
+		rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
+					pkey->protkey, &pkey->len, &pkey->type);
+		if (rc == 0)
+			break;
+	}
+
+out:
+	kfree(apqns);
+	if (rc)
+		DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/*
  * Verify key and give back some info about the key.
  */
 static int pkey_verifykey(const struct pkey_seckey *seckey,
@@ -307,26 +387,96 @@
 static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
 			       struct pkey_protkey *protkey)
 {
+	int rc = -EINVAL;
+	u8 *tmpbuf = NULL;
 	struct keytoken_header *hdr = (struct keytoken_header *)key;
-	struct protaeskeytoken *t;
 
 	switch (hdr->version) {
-	case TOKVER_PROTECTED_KEY:
-		if (keylen != sizeof(struct protaeskeytoken))
-			return -EINVAL;
+	case TOKVER_PROTECTED_KEY: {
+		struct protaeskeytoken *t;
 
+		if (keylen != sizeof(struct protaeskeytoken))
+			goto out;
 		t = (struct protaeskeytoken *)key;
 		protkey->len = t->len;
 		protkey->type = t->keytype;
 		memcpy(protkey->protkey, t->protkey,
 		       sizeof(protkey->protkey));
+		rc = pkey_verifyprotkey(protkey);
+		break;
+	}
+	case TOKVER_CLEAR_KEY: {
+		struct clearaeskeytoken *t;
+		struct pkey_clrkey ckey;
+		union u_tmpbuf {
+			u8 skey[SECKEYBLOBSIZE];
+			u8 ep11key[MAXEP11AESKEYBLOBSIZE];
+		};
+		size_t tmpbuflen = sizeof(union u_tmpbuf);
 
-		return pkey_verifyprotkey(protkey);
+		if (keylen < sizeof(struct clearaeskeytoken))
+			goto out;
+		t = (struct clearaeskeytoken *)key;
+		if (keylen != sizeof(*t) + t->len)
+			goto out;
+		if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16)
+		    || (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24)
+		    || (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32))
+			memcpy(ckey.clrkey, t->clearkey, t->len);
+		else
+			goto out;
+		/* alloc temp key buffer space */
+		tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC);
+		if (!tmpbuf) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		/* try direct way with the PCKMO instruction */
+		rc = pkey_clr2protkey(t->keytype, &ckey, protkey);
+		if (rc == 0)
+			break;
+		/* PCKMO failed, so try the CCA secure key way */
+		rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype,
+				    ckey.clrkey, tmpbuf);
+		if (rc == 0)
+			rc = pkey_skey2pkey(tmpbuf, protkey);
+		if (rc == 0)
+			break;
+		/* if the CCA way also failed, let's try via EP11 */
+		rc = pkey_clr2ep11key(ckey.clrkey, t->len,
+				      tmpbuf, &tmpbuflen);
+		if (rc == 0)
+			rc = pkey_ep11key2pkey(tmpbuf, protkey);
+		/* now we should really have an protected key */
+		DEBUG_ERR("%s unable to build protected key from clear",
+			  __func__);
+		break;
+	}
+	case TOKVER_EP11_AES: {
+		/* check ep11 key for exportable as protected key */
+		rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
+		if (rc)
+			goto out;
+		rc = pkey_ep11key2pkey(key, protkey);
+		break;
+	}
+	case TOKVER_EP11_AES_WITH_HEADER:
+		/* check ep11 key with header for exportable as protected key */
+		rc = ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1);
+		if (rc)
+			goto out;
+		rc = pkey_ep11key2pkey(key + sizeof(struct ep11kblob_header),
+				       protkey);
+		break;
 	default:
 		DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
 			  __func__, hdr->version);
-		return -EINVAL;
+		rc = -EINVAL;
 	}
+
+out:
+	kfree(tmpbuf);
+	return rc;
 }
 
 /*
@@ -405,6 +555,10 @@
 		if (*keybufsize < SECKEYBLOBSIZE)
 			return -EINVAL;
 		break;
+	case PKEY_TYPE_EP11:
+		if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+			return -EINVAL;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -421,7 +575,10 @@
 	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
 		card = apqns[i].card;
 		dom = apqns[i].domain;
-		if (ktype == PKEY_TYPE_CCA_DATA) {
+		if (ktype == PKEY_TYPE_EP11) {
+			rc = ep11_genaeskey(card, dom, ksize, kflags,
+					    keybuf, keybufsize);
+		} else if (ktype == PKEY_TYPE_CCA_DATA) {
 			rc = cca_genseckey(card, dom, ksize, keybuf);
 			*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
 		} else /* TOKVER_CCA_VLSC */
@@ -452,6 +609,10 @@
 		if (*keybufsize < SECKEYBLOBSIZE)
 			return -EINVAL;
 		break;
+	case PKEY_TYPE_EP11:
+		if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+			return -EINVAL;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -468,7 +629,10 @@
 	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
 		card = apqns[i].card;
 		dom = apqns[i].domain;
-		if (ktype == PKEY_TYPE_CCA_DATA) {
+		if (ktype == PKEY_TYPE_EP11) {
+			rc = ep11_clr2keyblob(card, dom, ksize, kflags,
+					      clrkey, keybuf, keybufsize);
+		} else if (ktype == PKEY_TYPE_CCA_DATA) {
 			rc = cca_clr2seckey(card, dom, ksize,
 					    clrkey, keybuf);
 			*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
@@ -491,11 +655,11 @@
 	u32 _nr_apqns, *_apqns = NULL;
 	struct keytoken_header *hdr = (struct keytoken_header *)key;
 
-	if (keylen < sizeof(struct keytoken_header) ||
-	    hdr->type != TOKTYPE_CCA_INTERNAL)
+	if (keylen < sizeof(struct keytoken_header))
 		return -EINVAL;
 
-	if (hdr->version == TOKVER_CCA_AES) {
+	if (hdr->type == TOKTYPE_CCA_INTERNAL
+	    && hdr->version == TOKVER_CCA_AES) {
 		struct secaeskeytoken *t = (struct secaeskeytoken *)key;
 
 		rc = cca_check_secaeskeytoken(debug_info, 3, key, 0);
@@ -507,13 +671,14 @@
 			*ksize = (enum pkey_key_size) t->bitsize;
 
 		rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
-				   ZCRYPT_CEX3C, t->mkvp, 0, 1);
+				   ZCRYPT_CEX3C, AES_MK_SET, t->mkvp, 0, 1);
 		if (rc == 0 && flags)
 			*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
 		if (rc == -ENODEV) {
 			rc = cca_findcard2(&_apqns, &_nr_apqns,
 					   *cardnr, *domain,
-					   ZCRYPT_CEX3C, 0, t->mkvp, 1);
+					   ZCRYPT_CEX3C, AES_MK_SET,
+					   0, t->mkvp, 1);
 			if (rc == 0 && flags)
 				*flags = PKEY_FLAGS_MATCH_ALT_MKVP;
 		}
@@ -523,7 +688,8 @@
 		*cardnr = ((struct pkey_apqn *)_apqns)->card;
 		*domain = ((struct pkey_apqn *)_apqns)->domain;
 
-	} else if (hdr->version == TOKVER_CCA_VLSC) {
+	} else if (hdr->type == TOKTYPE_CCA_INTERNAL
+		   && hdr->version == TOKVER_CCA_VLSC) {
 		struct cipherkeytoken *t = (struct cipherkeytoken *)key;
 
 		rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1);
@@ -542,13 +708,14 @@
 		}
 
 		rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
-				   ZCRYPT_CEX6, t->mkvp0, 0, 1);
+				   ZCRYPT_CEX6, AES_MK_SET, t->mkvp0, 0, 1);
 		if (rc == 0 && flags)
 			*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
 		if (rc == -ENODEV) {
 			rc = cca_findcard2(&_apqns, &_nr_apqns,
 					   *cardnr, *domain,
-					   ZCRYPT_CEX6, 0, t->mkvp0, 1);
+					   ZCRYPT_CEX6, AES_MK_SET,
+					   0, t->mkvp0, 1);
 			if (rc == 0 && flags)
 				*flags = PKEY_FLAGS_MATCH_ALT_MKVP;
 		}
@@ -558,6 +725,29 @@
 		*cardnr = ((struct pkey_apqn *)_apqns)->card;
 		*domain = ((struct pkey_apqn *)_apqns)->domain;
 
+	} else if (hdr->type == TOKTYPE_NON_CCA
+		   && hdr->version == TOKVER_EP11_AES) {
+		struct ep11keyblob *kb = (struct ep11keyblob *)key;
+
+		rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
+		if (rc)
+			goto out;
+		if (ktype)
+			*ktype = PKEY_TYPE_EP11;
+		if (ksize)
+			*ksize = kb->head.keybitlen;
+
+		rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+				    ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+		if (rc)
+			goto out;
+
+		if (flags)
+			*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+		*cardnr = ((struct pkey_apqn *)_apqns)->card;
+		*domain = ((struct pkey_apqn *)_apqns)->domain;
+
 	} else
 		rc = -EINVAL;
 
@@ -580,30 +770,32 @@
 	if (keylen < sizeof(struct keytoken_header))
 		return -EINVAL;
 
-	switch (hdr->type) {
-	case TOKTYPE_NON_CCA:
-		return pkey_nonccatok2pkey(key, keylen, pkey);
-	case TOKTYPE_CCA_INTERNAL:
-		switch (hdr->version) {
-		case TOKVER_CCA_AES:
+	if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+		if (hdr->version == TOKVER_CCA_AES) {
 			if (keylen != sizeof(struct secaeskeytoken))
 				return -EINVAL;
 			if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
 				return -EINVAL;
-			break;
-		case TOKVER_CCA_VLSC:
+		} else if (hdr->version == TOKVER_CCA_VLSC) {
 			if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
 				return -EINVAL;
 			if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
 				return -EINVAL;
-			break;
-		default:
+		} else {
 			DEBUG_ERR("%s unknown CCA internal token version %d\n",
 				  __func__, hdr->version);
 			return -EINVAL;
 		}
-		break;
-	default:
+	} else if (hdr->type == TOKTYPE_NON_CCA) {
+		if (hdr->version == TOKVER_EP11_AES) {
+			if (keylen < sizeof(struct ep11keyblob))
+				return -EINVAL;
+			if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
+				return -EINVAL;
+		} else {
+			return pkey_nonccatok2pkey(key, keylen, pkey);
+		}
+	} else {
 		DEBUG_ERR("%s unknown/unsupported blob type %d\n",
 			  __func__, hdr->type);
 		return -EINVAL;
@@ -613,12 +805,22 @@
 	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
 		card = apqns[i].card;
 		dom = apqns[i].domain;
-		if (hdr->version == TOKVER_CCA_AES)
+		if (hdr->type == TOKTYPE_CCA_INTERNAL
+		    && hdr->version == TOKVER_CCA_AES)
 			rc = cca_sec2protkey(card, dom, key, pkey->protkey,
 					     &pkey->len, &pkey->type);
-		else /* TOKVER_CCA_VLSC */
+		else if (hdr->type == TOKTYPE_CCA_INTERNAL
+			 && hdr->version == TOKVER_CCA_VLSC)
 			rc = cca_cipher2protkey(card, dom, key, pkey->protkey,
 						&pkey->len, &pkey->type);
+		else { /* EP11 AES secure key blob */
+			struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+			pkey->len = sizeof(pkey->protkey);
+			rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
+						pkey->protkey, &pkey->len,
+						&pkey->type);
+		}
 		if (rc == 0)
 			break;
 	}
@@ -629,16 +831,48 @@
 static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
 			  struct pkey_apqn *apqns, size_t *nr_apqns)
 {
-	int rc = EINVAL;
+	int rc;
 	u32 _nr_apqns, *_apqns = NULL;
 	struct keytoken_header *hdr = (struct keytoken_header *)key;
 
-	if (keylen < sizeof(struct keytoken_header) ||
-	    hdr->type != TOKTYPE_CCA_INTERNAL ||
-	    flags == 0)
+	if (keylen < sizeof(struct keytoken_header) || flags == 0)
 		return -EINVAL;
 
-	if (hdr->version == TOKVER_CCA_AES || hdr->version == TOKVER_CCA_VLSC) {
+	if (hdr->type == TOKTYPE_NON_CCA
+	    && (hdr->version == TOKVER_EP11_AES_WITH_HEADER
+		|| hdr->version == TOKVER_EP11_ECC_WITH_HEADER)
+	    && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+		int minhwtype = 0, api = 0;
+		struct ep11keyblob *kb = (struct ep11keyblob *)
+			(key + sizeof(struct ep11kblob_header));
+
+		if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+			return -EINVAL;
+		if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+			minhwtype = ZCRYPT_CEX7;
+			api = EP11_API_V;
+		}
+		rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+				    minhwtype, api, kb->wkvp);
+		if (rc)
+			goto out;
+	} else if (hdr->type == TOKTYPE_NON_CCA
+		   && hdr->version == TOKVER_EP11_AES
+		   && is_ep11_keyblob(key)) {
+		int minhwtype = 0, api = 0;
+		struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+		if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+			return -EINVAL;
+		if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+			minhwtype = ZCRYPT_CEX7;
+			api = EP11_API_V;
+		}
+		rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+				    minhwtype, api, kb->wkvp);
+		if (rc)
+			goto out;
+	} else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
 		int minhwtype = ZCRYPT_CEX3C;
 		u64 cur_mkvp = 0, old_mkvp = 0;
 
@@ -649,7 +883,7 @@
 				cur_mkvp = t->mkvp;
 			if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
 				old_mkvp = t->mkvp;
-		} else {
+		} else if (hdr->version == TOKVER_CCA_VLSC) {
 			struct cipherkeytoken *t = (struct cipherkeytoken *)key;
 
 			minhwtype = ZCRYPT_CEX6;
@@ -657,19 +891,43 @@
 				cur_mkvp = t->mkvp0;
 			if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
 				old_mkvp = t->mkvp0;
+		} else {
+			/* unknown cca internal token type */
+			return -EINVAL;
 		}
 		rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
-				   minhwtype, cur_mkvp, old_mkvp, 1);
+				   minhwtype, AES_MK_SET,
+				   cur_mkvp, old_mkvp, 1);
 		if (rc)
 			goto out;
-		if (apqns) {
-			if (*nr_apqns < _nr_apqns)
-				rc = -ENOSPC;
-			else
-				memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+	} else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+		u64 cur_mkvp = 0, old_mkvp = 0;
+		struct eccprivkeytoken *t = (struct eccprivkeytoken *)key;
+
+		if (t->secid == 0x20) {
+			if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+				cur_mkvp = t->mkvp;
+			if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+				old_mkvp = t->mkvp;
+		} else {
+			/* unknown cca internal 2 token type */
+			return -EINVAL;
 		}
-		*nr_apqns = _nr_apqns;
+		rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+				   ZCRYPT_CEX7, APKA_MK_SET,
+				   cur_mkvp, old_mkvp, 1);
+		if (rc)
+			goto out;
+	} else
+		return -EINVAL;
+
+	if (apqns) {
+		if (*nr_apqns < _nr_apqns)
+			rc = -ENOSPC;
+		else
+			memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
 	}
+	*nr_apqns = _nr_apqns;
 
 out:
 	kfree(_apqns);
@@ -680,7 +938,7 @@
 			      u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
 			      struct pkey_apqn *apqns, size_t *nr_apqns)
 {
-	int rc = -EINVAL;
+	int rc;
 	u32 _nr_apqns, *_apqns = NULL;
 
 	if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) {
@@ -694,61 +952,174 @@
 		if (ktype == PKEY_TYPE_CCA_CIPHER)
 			minhwtype = ZCRYPT_CEX6;
 		rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
-				   minhwtype, cur_mkvp, old_mkvp, 1);
+				   minhwtype, AES_MK_SET,
+				   cur_mkvp, old_mkvp, 1);
 		if (rc)
 			goto out;
-		if (apqns) {
-			if (*nr_apqns < _nr_apqns)
-				rc = -ENOSPC;
-			else
-				memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
-		}
-		*nr_apqns = _nr_apqns;
+	} else if (ktype == PKEY_TYPE_CCA_ECC) {
+		u64 cur_mkvp = 0, old_mkvp = 0;
+
+		if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+			cur_mkvp = *((u64 *) cur_mkvp);
+		if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+			old_mkvp = *((u64 *) alt_mkvp);
+		rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+				   ZCRYPT_CEX7, APKA_MK_SET,
+				   cur_mkvp, old_mkvp, 1);
+		if (rc)
+			goto out;
+
+	} else if (ktype == PKEY_TYPE_EP11 ||
+		   ktype == PKEY_TYPE_EP11_AES ||
+		   ktype == PKEY_TYPE_EP11_ECC) {
+		u8 *wkvp = NULL;
+
+		if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+			wkvp = cur_mkvp;
+		rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+				    ZCRYPT_CEX7, EP11_API_V, wkvp);
+		if (rc)
+			goto out;
+
+	} else
+		return -EINVAL;
+
+	if (apqns) {
+		if (*nr_apqns < _nr_apqns)
+			rc = -ENOSPC;
+		else
+			memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
 	}
+	*nr_apqns = _nr_apqns;
 
 out:
 	kfree(_apqns);
 	return rc;
 }
 
+static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
+			      const u8 *key, size_t keylen, u32 *protkeytype,
+			      u8 *protkey, u32 *protkeylen)
+{
+	int i, card, dom, rc;
+	struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+	/* check for at least one apqn given */
+	if (!apqns || !nr_apqns)
+		return -EINVAL;
+
+	if (keylen < sizeof(struct keytoken_header))
+		return -EINVAL;
+
+	if (hdr->type == TOKTYPE_NON_CCA
+	    && hdr->version == TOKVER_EP11_AES_WITH_HEADER
+	    && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+		/* EP11 AES key blob with header */
+		if (ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1))
+			return -EINVAL;
+	} else if (hdr->type == TOKTYPE_NON_CCA
+		   && hdr->version == TOKVER_EP11_ECC_WITH_HEADER
+		   && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+		/* EP11 ECC key blob with header */
+		if (ep11_check_ecc_key_with_hdr(debug_info, 3, key, keylen, 1))
+			return -EINVAL;
+	} else if (hdr->type == TOKTYPE_NON_CCA
+		   && hdr->version == TOKVER_EP11_AES
+		   && is_ep11_keyblob(key)) {
+		/* EP11 AES key blob with header in session field */
+		if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
+			return -EINVAL;
+	} else	if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+		if (hdr->version == TOKVER_CCA_AES) {
+			/* CCA AES data key */
+			if (keylen != sizeof(struct secaeskeytoken))
+				return -EINVAL;
+			if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
+				return -EINVAL;
+		} else if (hdr->version == TOKVER_CCA_VLSC) {
+			/* CCA AES cipher key */
+			if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
+				return -EINVAL;
+			if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
+				return -EINVAL;
+		} else {
+			DEBUG_ERR("%s unknown CCA internal token version %d\n",
+				  __func__, hdr->version);
+			return -EINVAL;
+		}
+	} else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+		/* CCA ECC (private) key */
+		if (keylen < sizeof(struct eccprivkeytoken))
+			return -EINVAL;
+		if (cca_check_sececckeytoken(debug_info, 3, key, keylen, 1))
+			return -EINVAL;
+	} else if (hdr->type == TOKTYPE_NON_CCA) {
+		struct pkey_protkey pkey;
+
+		rc = pkey_nonccatok2pkey(key, keylen, &pkey);
+		if (rc)
+			return rc;
+		memcpy(protkey, pkey.protkey, pkey.len);
+		*protkeylen = pkey.len;
+		*protkeytype = pkey.type;
+		return 0;
+	} else {
+		DEBUG_ERR("%s unknown/unsupported blob type %d\n",
+			  __func__, hdr->type);
+		return -EINVAL;
+	}
+
+	/* simple try all apqns from the list */
+	for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+		card = apqns[i].card;
+		dom = apqns[i].domain;
+		if (hdr->type == TOKTYPE_NON_CCA
+		    && (hdr->version == TOKVER_EP11_AES_WITH_HEADER
+			|| hdr->version == TOKVER_EP11_ECC_WITH_HEADER)
+		    && is_ep11_keyblob(key + sizeof(struct ep11kblob_header)))
+			rc = ep11_kblob2protkey(card, dom, key, hdr->len,
+						protkey, protkeylen, protkeytype);
+		else if (hdr->type == TOKTYPE_NON_CCA
+			 && hdr->version == TOKVER_EP11_AES
+			 && is_ep11_keyblob(key))
+			rc = ep11_kblob2protkey(card, dom, key, hdr->len,
+						protkey, protkeylen, protkeytype);
+		else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+			 hdr->version == TOKVER_CCA_AES)
+			rc = cca_sec2protkey(card, dom, key, protkey,
+					     protkeylen, protkeytype);
+		else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+			 hdr->version == TOKVER_CCA_VLSC)
+			rc = cca_cipher2protkey(card, dom, key, protkey,
+						protkeylen, protkeytype);
+		else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA)
+			rc = cca_ecc2protkey(card, dom, key, protkey,
+					     protkeylen, protkeytype);
+		else
+			return -EINVAL;
+	}
+
+	return rc;
+}
+
 /*
  * File io functions
  */
 
 static void *_copy_key_from_user(void __user *ukey, size_t keylen)
 {
-	void *kkey;
-
 	if (!ukey || keylen < MINKEYBLOBSIZE || keylen > KEYBLOBBUFSIZE)
 		return ERR_PTR(-EINVAL);
-	kkey = kmalloc(keylen, GFP_KERNEL);
-	if (!kkey)
-		return ERR_PTR(-ENOMEM);
-	if (copy_from_user(kkey, ukey, keylen)) {
-		kfree(kkey);
-		return ERR_PTR(-EFAULT);
-	}
 
-	return kkey;
+	return memdup_user(ukey, keylen);
 }
 
 static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
 {
-	void *kapqns = NULL;
-	size_t nbytes;
+	if (!uapqns || nr_apqns == 0)
+		return NULL;
 
-	if (uapqns && nr_apqns > 0) {
-		nbytes = nr_apqns * sizeof(struct pkey_apqn);
-		kapqns = kmalloc(nbytes, GFP_KERNEL);
-		if (!kapqns)
-			return ERR_PTR(-ENOMEM);
-		if (copy_from_user(kapqns, uapqns, nbytes)) {
-			kfree(kapqns);
-			return ERR_PTR(-EFAULT);
-		}
-	}
-
-	return kapqns;
+	return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn));
 }
 
 static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
@@ -1131,6 +1502,55 @@
 		kfree(apqns);
 		break;
 	}
+	case PKEY_KBLOB2PROTK3: {
+		struct pkey_kblob2pkey3 __user *utp = (void __user *) arg;
+		struct pkey_kblob2pkey3 ktp;
+		struct pkey_apqn *apqns = NULL;
+		u32 protkeylen = PROTKEYBLOBBUFSIZE;
+		u8 *kkey, *protkey;
+
+		if (copy_from_user(&ktp, utp, sizeof(ktp)))
+			return -EFAULT;
+		apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
+		if (IS_ERR(apqns))
+			return PTR_ERR(apqns);
+		kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+		if (IS_ERR(kkey)) {
+			kfree(apqns);
+			return PTR_ERR(kkey);
+		}
+		protkey = kmalloc(protkeylen, GFP_KERNEL);
+		if (!protkey) {
+			kfree(apqns);
+			kfree(kkey);
+			return -ENOMEM;
+		}
+		rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries, kkey,
+					ktp.keylen, &ktp.pkeytype,
+					protkey, &protkeylen);
+		DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
+		kfree(apqns);
+		kfree(kkey);
+		if (rc) {
+			kfree(protkey);
+			break;
+		}
+		if (ktp.pkey && ktp.pkeylen) {
+			if (protkeylen > ktp.pkeylen) {
+				kfree(protkey);
+				return -EINVAL;
+			}
+			if (copy_to_user(ktp.pkey, protkey, protkeylen)) {
+				kfree(protkey);
+				return -EFAULT;
+			}
+		}
+		kfree(protkey);
+		ktp.pkeylen = protkeylen;
+		if (copy_to_user(utp, &ktp, sizeof(ktp)))
+			return -EFAULT;
+		break;
+	}
 	default:
 		/* unknown/unsupported ioctl cmd */
 		return -ENOTTY;
@@ -1379,8 +1799,9 @@
 					    bool is_xts, char *buf, loff_t off,
 					    size_t count)
 {
-	size_t keysize;
-	int rc;
+	int i, rc, card, dom;
+	u32 nr_apqns, *apqns = NULL;
+	size_t keysize = CCACIPHERTOKENSIZE;
 
 	if (off != 0 || count < CCACIPHERTOKENSIZE)
 		return -EINVAL;
@@ -1388,22 +1809,31 @@
 		if (count < 2 * CCACIPHERTOKENSIZE)
 			return -EINVAL;
 
-	keysize = CCACIPHERTOKENSIZE;
-	rc = cca_gencipherkey(-1, -1, keybits, 0, buf, &keysize);
+	/* build a list of apqns able to generate an cipher key */
+	rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+			   ZCRYPT_CEX6, 0, 0, 0, 0);
 	if (rc)
 		return rc;
-	memset(buf + keysize, 0, CCACIPHERTOKENSIZE - keysize);
+
+	memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+	/* simple try all apqns from the list */
+	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+		card = apqns[i] >> 16;
+		dom = apqns[i] & 0xFFFF;
+		rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
+		if (rc == 0)
+			break;
+	}
+	if (rc)
+		return rc;
 
 	if (is_xts) {
 		keysize = CCACIPHERTOKENSIZE;
-		rc = cca_gencipherkey(-1, -1, keybits, 0,
-				      buf + CCACIPHERTOKENSIZE, &keysize);
-		if (rc)
-			return rc;
-		memset(buf + CCACIPHERTOKENSIZE + keysize, 0,
-		       CCACIPHERTOKENSIZE - keysize);
-
-		return 2 * CCACIPHERTOKENSIZE;
+		buf += CCACIPHERTOKENSIZE;
+		rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
+		if (rc == 0)
+			return 2 * CCACIPHERTOKENSIZE;
 	}
 
 	return CCACIPHERTOKENSIZE;
@@ -1479,10 +1909,134 @@
 	.bin_attrs = ccacipher_attrs,
 };
 
+/*
+ * Sysfs attribute read function for all ep11 aes key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ * This function and the sysfs attributes using it provide EP11 key blobs
+ * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
+ * 320 bytes.
+ */
+static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+				       bool is_xts, char *buf, loff_t off,
+				       size_t count)
+{
+	int i, rc, card, dom;
+	u32 nr_apqns, *apqns = NULL;
+	size_t keysize = MAXEP11AESKEYBLOBSIZE;
+
+	if (off != 0 || count < MAXEP11AESKEYBLOBSIZE)
+		return -EINVAL;
+	if (is_xts)
+		if (count < 2 * MAXEP11AESKEYBLOBSIZE)
+			return -EINVAL;
+
+	/* build a list of apqns able to generate an cipher key */
+	rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+			    ZCRYPT_CEX7, EP11_API_V, NULL);
+	if (rc)
+		return rc;
+
+	memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+	/* simple try all apqns from the list */
+	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+		card = apqns[i] >> 16;
+		dom = apqns[i] & 0xFFFF;
+		rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+		if (rc == 0)
+			break;
+	}
+	if (rc)
+		return rc;
+
+	if (is_xts) {
+		keysize = MAXEP11AESKEYBLOBSIZE;
+		buf += MAXEP11AESKEYBLOBSIZE;
+		rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+		if (rc == 0)
+			return 2 * MAXEP11AESKEYBLOBSIZE;
+	}
+
+	return MAXEP11AESKEYBLOBSIZE;
+}
+
+static ssize_t ep11_aes_128_read(struct file *filp,
+				 struct kobject *kobj,
+				 struct bin_attribute *attr,
+				 char *buf, loff_t off,
+				 size_t count)
+{
+	return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+				       off, count);
+}
+
+static ssize_t ep11_aes_192_read(struct file *filp,
+				 struct kobject *kobj,
+				 struct bin_attribute *attr,
+				 char *buf, loff_t off,
+				 size_t count)
+{
+	return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+				       off, count);
+}
+
+static ssize_t ep11_aes_256_read(struct file *filp,
+				 struct kobject *kobj,
+				 struct bin_attribute *attr,
+				 char *buf, loff_t off,
+				 size_t count)
+{
+	return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+				       off, count);
+}
+
+static ssize_t ep11_aes_128_xts_read(struct file *filp,
+				     struct kobject *kobj,
+				     struct bin_attribute *attr,
+				     char *buf, loff_t off,
+				     size_t count)
+{
+	return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+				       off, count);
+}
+
+static ssize_t ep11_aes_256_xts_read(struct file *filp,
+				     struct kobject *kobj,
+				     struct bin_attribute *attr,
+				     char *buf, loff_t off,
+				     size_t count)
+{
+	return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+				       off, count);
+}
+
+static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+
+static struct bin_attribute *ep11_attrs[] = {
+	&bin_attr_ep11_aes_128,
+	&bin_attr_ep11_aes_192,
+	&bin_attr_ep11_aes_256,
+	&bin_attr_ep11_aes_128_xts,
+	&bin_attr_ep11_aes_256_xts,
+	NULL
+};
+
+static struct attribute_group ep11_attr_group = {
+	.name	   = "ep11",
+	.bin_attrs = ep11_attrs,
+};
+
 static const struct attribute_group *pkey_attr_groups[] = {
 	&protkey_attr_group,
 	&ccadata_attr_group,
 	&ccacipher_attr_group,
+	&ep11_attr_group,
 	NULL,
 };
 
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 1ec0114..72eb8f9 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -86,7 +86,7 @@
 		case AP_RESPONSE_RESET_IN_PROGRESS:
 			if (!status.irq_enabled)
 				return;
-			/* Fall through */
+			fallthrough;
 		case AP_RESPONSE_BUSY:
 			msleep(20);
 			break;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index ec41a8a..3b9eda3 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -25,6 +25,7 @@
 #include <linux/debugfs.h>
 #include <linux/cdev.h>
 #include <linux/ctype.h>
+#include <linux/capability.h>
 #include <asm/debug.h>
 
 #define CREATE_TRACE_POINTS
@@ -36,6 +37,7 @@
 #include "zcrypt_msgtype6.h"
 #include "zcrypt_msgtype50.h"
 #include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
 
 /*
  * Module description.
@@ -601,13 +603,13 @@
 				       unsigned int pref_weight)
 {
 	if (!pref_zc)
-		return false;
+		return true;
 	weight += atomic_read(&zc->load);
 	pref_weight += atomic_read(&pref_zc->load);
 	if (weight == pref_weight)
-		return atomic64_read(&zc->card->total_request_count) >
+		return atomic64_read(&zc->card->total_request_count) <
 			atomic64_read(&pref_zc->card->total_request_count);
-	return weight > pref_weight;
+	return weight < pref_weight;
 }
 
 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
@@ -616,30 +618,39 @@
 					unsigned int pref_weight)
 {
 	if (!pref_zq)
-		return false;
+		return true;
 	weight += atomic_read(&zq->load);
 	pref_weight += atomic_read(&pref_zq->load);
 	if (weight == pref_weight)
-		return zq->queue->total_request_count >
+		return zq->queue->total_request_count <
 			pref_zq->queue->total_request_count;
-	return weight > pref_weight;
+	return weight < pref_weight;
 }
 
 /*
  * zcrypt ioctls.
  */
 static long zcrypt_rsa_modexpo(struct ap_perms *perms,
+			       struct zcrypt_track *tr,
 			       struct ica_rsa_modexpo *mex)
 {
 	struct zcrypt_card *zc, *pref_zc;
 	struct zcrypt_queue *zq, *pref_zq;
-	unsigned int weight, pref_weight;
+	struct ap_message ap_msg;
+	unsigned int wgt = 0, pref_wgt = 0;
 	unsigned int func_code;
-	int qid = 0, rc = -ENODEV;
+	int cpen, qpen, qid = 0, rc = -ENODEV;
 	struct module *mod;
 
 	trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
 
+	ap_init_message(&ap_msg);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (tr && tr->fi.cmd)
+		ap_msg.fi.cmd = tr->fi.cmd;
+#endif
+
 	if (mex->outputdatalength < mex->inputdatalength) {
 		func_code = 0;
 		rc = -EINVAL;
@@ -661,8 +672,9 @@
 	pref_zq = NULL;
 	spin_lock(&zcrypt_list_lock);
 	for_each_zcrypt_card(zc) {
-		/* Check for online accelarator and CCA cards */
-		if (!zc->online || !(zc->card->functions & 0x18000000))
+		/* Check for useable accelarator or CCA card */
+		if (!zc->online || !zc->card->config ||
+		    !(zc->card->functions & 0x18000000))
 			continue;
 		/* Check for size limits */
 		if (zc->min_mod_size > mex->inputdatalength ||
@@ -672,26 +684,35 @@
 		if (!zcrypt_check_card(perms, zc->card->id))
 			continue;
 		/* get weight index of the card device	*/
-		weight = zc->speed_rating[func_code];
-		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+		wgt = zc->speed_rating[func_code];
+		/* penalty if this msg was previously sent via this card */
+		cpen = (tr && tr->again_counter && tr->last_qid &&
+			AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+			TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+		if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
 			continue;
 		for_each_zcrypt_queue(zq, zc) {
-			/* check if device is online and eligible */
-			if (!zq->online || !zq->ops->rsa_modexpo)
+			/* check if device is useable and eligible */
+			if (!zq->online || !zq->ops->rsa_modexpo ||
+			    !zq->queue->config)
 				continue;
 			/* check if device node has admission for this queue */
 			if (!zcrypt_check_queue(perms,
 						AP_QID_QUEUE(zq->queue->qid)))
 				continue;
-			if (zcrypt_queue_compare(zq, pref_zq,
-						 weight, pref_weight))
+			/* penalty if the msg was previously sent at this qid */
+			qpen = (tr && tr->again_counter && tr->last_qid &&
+				tr->last_qid == zq->queue->qid) ?
+				TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+			if (!zcrypt_queue_compare(zq, pref_zq,
+						  wgt + cpen + qpen, pref_wgt))
 				continue;
 			pref_zc = zc;
 			pref_zq = zq;
-			pref_weight = weight;
+			pref_wgt = wgt + cpen + qpen;
 		}
 	}
-	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 	if (!pref_zq) {
@@ -700,30 +721,44 @@
 	}
 
 	qid = pref_zq->queue->qid;
-	rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
+	rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg);
 
 	spin_lock(&zcrypt_list_lock);
-	zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
+	zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 out:
+	ap_release_message(&ap_msg);
+	if (tr) {
+		tr->last_rc = rc;
+		tr->last_qid = qid;
+	}
 	trace_s390_zcrypt_rep(mex, func_code, rc,
 			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
 	return rc;
 }
 
 static long zcrypt_rsa_crt(struct ap_perms *perms,
+			   struct zcrypt_track *tr,
 			   struct ica_rsa_modexpo_crt *crt)
 {
 	struct zcrypt_card *zc, *pref_zc;
 	struct zcrypt_queue *zq, *pref_zq;
-	unsigned int weight, pref_weight;
+	struct ap_message ap_msg;
+	unsigned int wgt = 0, pref_wgt = 0;
 	unsigned int func_code;
-	int qid = 0, rc = -ENODEV;
+	int cpen, qpen, qid = 0, rc = -ENODEV;
 	struct module *mod;
 
 	trace_s390_zcrypt_req(crt, TP_ICARSACRT);
 
+	ap_init_message(&ap_msg);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (tr && tr->fi.cmd)
+		ap_msg.fi.cmd = tr->fi.cmd;
+#endif
+
 	if (crt->outputdatalength < crt->inputdatalength) {
 		func_code = 0;
 		rc = -EINVAL;
@@ -745,8 +780,9 @@
 	pref_zq = NULL;
 	spin_lock(&zcrypt_list_lock);
 	for_each_zcrypt_card(zc) {
-		/* Check for online accelarator and CCA cards */
-		if (!zc->online || !(zc->card->functions & 0x18000000))
+		/* Check for useable accelarator or CCA card */
+		if (!zc->online || !zc->card->config ||
+		    !(zc->card->functions & 0x18000000))
 			continue;
 		/* Check for size limits */
 		if (zc->min_mod_size > crt->inputdatalength ||
@@ -756,26 +792,35 @@
 		if (!zcrypt_check_card(perms, zc->card->id))
 			continue;
 		/* get weight index of the card device	*/
-		weight = zc->speed_rating[func_code];
-		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+		wgt = zc->speed_rating[func_code];
+		/* penalty if this msg was previously sent via this card */
+		cpen = (tr && tr->again_counter && tr->last_qid &&
+			AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+			TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+		if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
 			continue;
 		for_each_zcrypt_queue(zq, zc) {
-			/* check if device is online and eligible */
-			if (!zq->online || !zq->ops->rsa_modexpo_crt)
+			/* check if device is useable and eligible */
+			if (!zq->online || !zq->ops->rsa_modexpo_crt ||
+			    !zq->queue->config)
 				continue;
 			/* check if device node has admission for this queue */
 			if (!zcrypt_check_queue(perms,
 						AP_QID_QUEUE(zq->queue->qid)))
 				continue;
-			if (zcrypt_queue_compare(zq, pref_zq,
-						 weight, pref_weight))
+			/* penalty if the msg was previously sent at this qid */
+			qpen = (tr && tr->again_counter && tr->last_qid &&
+				tr->last_qid == zq->queue->qid) ?
+				TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+			if (!zcrypt_queue_compare(zq, pref_zq,
+						  wgt + cpen + qpen, pref_wgt))
 				continue;
 			pref_zc = zc;
 			pref_zq = zq;
-			pref_weight = weight;
+			pref_wgt = wgt + cpen + qpen;
 		}
 	}
-	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 	if (!pref_zq) {
@@ -784,35 +829,52 @@
 	}
 
 	qid = pref_zq->queue->qid;
-	rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
+	rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg);
 
 	spin_lock(&zcrypt_list_lock);
-	zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
+	zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 out:
+	ap_release_message(&ap_msg);
+	if (tr) {
+		tr->last_rc = rc;
+		tr->last_qid = qid;
+	}
 	trace_s390_zcrypt_rep(crt, func_code, rc,
 			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
 	return rc;
 }
 
-static long _zcrypt_send_cprb(struct ap_perms *perms,
+static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
+			      struct zcrypt_track *tr,
 			      struct ica_xcRB *xcRB)
 {
 	struct zcrypt_card *zc, *pref_zc;
 	struct zcrypt_queue *zq, *pref_zq;
 	struct ap_message ap_msg;
-	unsigned int weight, pref_weight;
+	unsigned int wgt = 0, pref_wgt = 0;
 	unsigned int func_code;
 	unsigned short *domain, tdom;
-	int qid = 0, rc = -ENODEV;
+	int cpen, qpen, qid = 0, rc = -ENODEV;
 	struct module *mod;
 
 	trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
 
 	xcRB->status = 0;
 	ap_init_message(&ap_msg);
-	rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (tr && tr->fi.cmd)
+		ap_msg.fi.cmd = tr->fi.cmd;
+	if (tr && tr->fi.action == AP_FI_ACTION_CCA_AGENT_FF) {
+		ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid agent_ID 'FF'\n",
+				__func__, tr->fi.cmd);
+		xcRB->agent_ID = 0x4646;
+	}
+#endif
+
+	rc = get_cprb_fc(userspace, xcRB, &ap_msg, &func_code, &domain);
 	if (rc)
 		goto out;
 
@@ -821,7 +883,7 @@
 	 * domain but a control only domain, use the default domain as target.
 	 */
 	tdom = *domain;
-	if (tdom >= 0 && tdom < AP_DOMAINS &&
+	if (tdom < AP_DOMAINS &&
 	    !ap_test_config_usage_domain(tdom) &&
 	    ap_test_config_ctrl_domain(tdom) &&
 	    ap_domain_index >= 0)
@@ -831,8 +893,9 @@
 	pref_zq = NULL;
 	spin_lock(&zcrypt_list_lock);
 	for_each_zcrypt_card(zc) {
-		/* Check for online CCA cards */
-		if (!zc->online || !(zc->card->functions & 0x10000000))
+		/* Check for useable CCA card */
+		if (!zc->online || !zc->card->config ||
+		    !(zc->card->functions & 0x10000000))
 			continue;
 		/* Check for user selected CCA card */
 		if (xcRB->user_defined != AUTOSELECT &&
@@ -842,29 +905,38 @@
 		if (!zcrypt_check_card(perms, zc->card->id))
 			continue;
 		/* get weight index of the card device	*/
-		weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
-		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+		wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
+		/* penalty if this msg was previously sent via this card */
+		cpen = (tr && tr->again_counter && tr->last_qid &&
+			AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+			TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+		if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
 			continue;
 		for_each_zcrypt_queue(zq, zc) {
-			/* check if device is online and eligible */
+			/* check for device useable and eligible */
 			if (!zq->online ||
 			    !zq->ops->send_cprb ||
-			    (tdom != (unsigned short) AUTOSELECT &&
+			    !zq->queue->config ||
+			    (tdom != AUTOSEL_DOM &&
 			     tdom != AP_QID_QUEUE(zq->queue->qid)))
 				continue;
 			/* check if device node has admission for this queue */
 			if (!zcrypt_check_queue(perms,
 						AP_QID_QUEUE(zq->queue->qid)))
 				continue;
-			if (zcrypt_queue_compare(zq, pref_zq,
-						 weight, pref_weight))
+			/* penalty if the msg was previously sent at this qid */
+			qpen = (tr && tr->again_counter && tr->last_qid &&
+				tr->last_qid == zq->queue->qid) ?
+				TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+			if (!zcrypt_queue_compare(zq, pref_zq,
+						  wgt + cpen + qpen, pref_wgt))
 				continue;
 			pref_zc = zc;
 			pref_zq = zq;
-			pref_weight = weight;
+			pref_wgt = wgt + cpen + qpen;
 		}
 	}
-	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 	if (!pref_zq) {
@@ -874,17 +946,29 @@
 
 	/* in case of auto select, provide the correct domain */
 	qid = pref_zq->queue->qid;
-	if (*domain == (unsigned short) AUTOSELECT)
+	if (*domain == AUTOSEL_DOM)
 		*domain = AP_QID_QUEUE(qid);
 
-	rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (tr && tr->fi.action == AP_FI_ACTION_CCA_DOM_INVAL) {
+		ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid domain\n",
+				__func__, tr->fi.cmd);
+		*domain = 99;
+	}
+#endif
+
+	rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcRB, &ap_msg);
 
 	spin_lock(&zcrypt_list_lock);
-	zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
+	zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 out:
 	ap_release_message(&ap_msg);
+	if (tr) {
+		tr->last_rc = rc;
+		tr->last_qid = qid;
+	}
 	trace_s390_zcrypt_rep(xcRB, func_code, rc,
 			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
 	return rc;
@@ -892,7 +976,7 @@
 
 long zcrypt_send_cprb(struct ica_xcRB *xcRB)
 {
-	return _zcrypt_send_cprb(&ap_perms, xcRB);
+	return _zcrypt_send_cprb(false, &ap_perms, NULL, xcRB);
 }
 EXPORT_SYMBOL(zcrypt_send_cprb);
 
@@ -901,7 +985,7 @@
 				 struct ep11_target_dev *targets)
 {
 	while (target_num-- > 0) {
-		if (dev_id == targets->ap_id)
+		if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
 			return true;
 		targets++;
 	}
@@ -912,31 +996,40 @@
 				  unsigned short target_num,
 				  struct ep11_target_dev *targets)
 {
+	int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
+
 	while (target_num-- > 0) {
-		if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
+		if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
+		    (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
 			return true;
 		targets++;
 	}
 	return false;
 }
 
-static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
-				  struct ep11_urb *xcrb)
+static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
+				   struct zcrypt_track *tr,
+				   struct ep11_urb *xcrb)
 {
 	struct zcrypt_card *zc, *pref_zc;
 	struct zcrypt_queue *zq, *pref_zq;
 	struct ep11_target_dev *targets;
 	unsigned short target_num;
-	unsigned int weight, pref_weight;
+	unsigned int wgt = 0, pref_wgt = 0;
 	unsigned int func_code;
 	struct ap_message ap_msg;
-	int qid = 0, rc = -ENODEV;
+	int cpen, qpen, qid = 0, rc = -ENODEV;
 	struct module *mod;
 
 	trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
 
 	ap_init_message(&ap_msg);
 
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (tr && tr->fi.cmd)
+		ap_msg.fi.cmd = tr->fi.cmd;
+#endif
+
 	target_num = (unsigned short) xcrb->targets_num;
 
 	/* empty list indicates autoselect (all available targets) */
@@ -952,7 +1045,7 @@
 		}
 
 		uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
-		if (copy_from_user(targets, uptr,
+		if (z_copy_from_user(userspace, targets, uptr,
 				   target_num * sizeof(*targets))) {
 			func_code = 0;
 			rc = -EFAULT;
@@ -960,7 +1053,7 @@
 		}
 	}
 
-	rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code);
+	rc = get_ep11cprb_fc(userspace, xcrb, &ap_msg, &func_code);
 	if (rc)
 		goto out_free;
 
@@ -968,8 +1061,9 @@
 	pref_zq = NULL;
 	spin_lock(&zcrypt_list_lock);
 	for_each_zcrypt_card(zc) {
-		/* Check for online EP11 cards */
-		if (!zc->online || !(zc->card->functions & 0x04000000))
+		/* Check for useable EP11 card */
+		if (!zc->online || !zc->card->config ||
+		    !(zc->card->functions & 0x04000000))
 			continue;
 		/* Check for user selected EP11 card */
 		if (targets &&
@@ -979,13 +1073,18 @@
 		if (!zcrypt_check_card(perms, zc->card->id))
 			continue;
 		/* get weight index of the card device	*/
-		weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
-		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+		wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
+		/* penalty if this msg was previously sent via this card */
+		cpen = (tr && tr->again_counter && tr->last_qid &&
+			AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+			TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+		if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
 			continue;
 		for_each_zcrypt_queue(zq, zc) {
-			/* check if device is online and eligible */
+			/* check if device is useable and eligible */
 			if (!zq->online ||
 			    !zq->ops->send_ep11_cprb ||
+			    !zq->queue->config ||
 			    (targets &&
 			     !is_desired_ep11_queue(zq->queue->qid,
 						    target_num, targets)))
@@ -994,15 +1093,19 @@
 			if (!zcrypt_check_queue(perms,
 						AP_QID_QUEUE(zq->queue->qid)))
 				continue;
-			if (zcrypt_queue_compare(zq, pref_zq,
-						 weight, pref_weight))
+			/* penalty if the msg was previously sent at this qid */
+			qpen = (tr && tr->again_counter && tr->last_qid &&
+				tr->last_qid == zq->queue->qid) ?
+				TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+			if (!zcrypt_queue_compare(zq, pref_zq,
+						  wgt + cpen + qpen, pref_wgt))
 				continue;
 			pref_zc = zc;
 			pref_zq = zq;
-			pref_weight = weight;
+			pref_wgt = wgt + cpen + qpen;
 		}
 	}
-	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 	if (!pref_zq) {
@@ -1011,26 +1114,36 @@
 	}
 
 	qid = pref_zq->queue->qid;
-	rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
+	rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
 
 	spin_lock(&zcrypt_list_lock);
-	zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
+	zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 out_free:
 	kfree(targets);
 out:
 	ap_release_message(&ap_msg);
+	if (tr) {
+		tr->last_rc = rc;
+		tr->last_qid = qid;
+	}
 	trace_s390_zcrypt_rep(xcrb, func_code, rc,
 			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
 	return rc;
 }
 
+long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
+{
+	return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb);
+}
+EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
+
 static long zcrypt_rng(char *buffer)
 {
 	struct zcrypt_card *zc, *pref_zc;
 	struct zcrypt_queue *zq, *pref_zq;
-	unsigned int weight, pref_weight;
+	unsigned int wgt = 0, pref_wgt = 0;
 	unsigned int func_code;
 	struct ap_message ap_msg;
 	unsigned int domain;
@@ -1048,26 +1161,27 @@
 	pref_zq = NULL;
 	spin_lock(&zcrypt_list_lock);
 	for_each_zcrypt_card(zc) {
-		/* Check for online CCA cards */
-		if (!zc->online || !(zc->card->functions & 0x10000000))
+		/* Check for useable CCA card */
+		if (!zc->online || !zc->card->config ||
+		    !(zc->card->functions & 0x10000000))
 			continue;
 		/* get weight index of the card device	*/
-		weight = zc->speed_rating[func_code];
-		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+		wgt = zc->speed_rating[func_code];
+		if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
 			continue;
 		for_each_zcrypt_queue(zq, zc) {
-			/* check if device is online and eligible */
-			if (!zq->online || !zq->ops->rng)
+			/* check if device is useable and eligible */
+			if (!zq->online || !zq->ops->rng ||
+			    !zq->queue->config)
 				continue;
-			if (zcrypt_queue_compare(zq, pref_zq,
-						 weight, pref_weight))
+			if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
 				continue;
 			pref_zc = zc;
 			pref_zq = zq;
-			pref_weight = weight;
+			pref_wgt = wgt;
 		}
 	}
-	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 	if (!pref_zq) {
@@ -1079,7 +1193,7 @@
 	rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
 
 	spin_lock(&zcrypt_list_lock);
-	zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
+	zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 out:
@@ -1288,6 +1402,187 @@
 	return requestq_count;
 }
 
+static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+	int rc;
+	struct zcrypt_track tr;
+	struct ica_rsa_modexpo mex;
+	struct ica_rsa_modexpo __user *umex = (void __user *) arg;
+
+	memset(&tr, 0, sizeof(tr));
+	if (copy_from_user(&mex, umex, sizeof(mex)))
+		return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (mex.inputdatalength & (1U << 31)) {
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		tr.fi.cmd = (u16)(mex.inputdatalength >> 16);
+	}
+	mex.inputdatalength &= 0x0000FFFF;
+#endif
+
+	do {
+		rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
+		if (rc == -EAGAIN)
+			tr.again_counter++;
+#ifdef CONFIG_ZCRYPT_DEBUG
+		if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
+			break;
+#endif
+	} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	/* on failure: retry once again after a requested rescan */
+	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+		do {
+			rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
+			if (rc == -EAGAIN)
+				tr.again_counter++;
+		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+		rc = -EIO;
+	if (rc) {
+		ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
+		return rc;
+	}
+	return put_user(mex.outputdatalength, &umex->outputdatalength);
+}
+
+static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+	int rc;
+	struct zcrypt_track tr;
+	struct ica_rsa_modexpo_crt crt;
+	struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
+
+	memset(&tr, 0, sizeof(tr));
+	if (copy_from_user(&crt, ucrt, sizeof(crt)))
+		return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (crt.inputdatalength & (1U << 31)) {
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		tr.fi.cmd = (u16)(crt.inputdatalength >> 16);
+	}
+	crt.inputdatalength &= 0x0000FFFF;
+#endif
+
+	do {
+		rc = zcrypt_rsa_crt(perms, &tr, &crt);
+		if (rc == -EAGAIN)
+			tr.again_counter++;
+#ifdef CONFIG_ZCRYPT_DEBUG
+		if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
+			break;
+#endif
+	} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	/* on failure: retry once again after a requested rescan */
+	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+		do {
+			rc = zcrypt_rsa_crt(perms, &tr, &crt);
+			if (rc == -EAGAIN)
+				tr.again_counter++;
+		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+		rc = -EIO;
+	if (rc) {
+		ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
+		return rc;
+	}
+	return put_user(crt.outputdatalength, &ucrt->outputdatalength);
+}
+
+static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+	int rc;
+	struct ica_xcRB xcRB;
+	struct zcrypt_track tr;
+	struct ica_xcRB __user *uxcRB = (void __user *) arg;
+
+	memset(&tr, 0, sizeof(tr));
+	if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
+		return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (xcRB.status & (1U << 31)) {
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		tr.fi.cmd = (u16)(xcRB.status >> 16);
+	}
+	xcRB.status &= 0x0000FFFF;
+#endif
+
+	do {
+		rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB);
+		if (rc == -EAGAIN)
+			tr.again_counter++;
+#ifdef CONFIG_ZCRYPT_DEBUG
+		if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
+			break;
+#endif
+	} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	/* on failure: retry once again after a requested rescan */
+	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+		do {
+			rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB);
+			if (rc == -EAGAIN)
+				tr.again_counter++;
+		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+		rc = -EIO;
+	if (rc)
+		ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
+			   rc, xcRB.status);
+	if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
+		return -EFAULT;
+	return rc;
+}
+
+static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+	int rc;
+	struct ep11_urb xcrb;
+	struct zcrypt_track tr;
+	struct ep11_urb __user *uxcrb = (void __user *)arg;
+
+	memset(&tr, 0, sizeof(tr));
+	if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
+		return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (xcrb.req_len & (1ULL << 63)) {
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		tr.fi.cmd = (u16)(xcrb.req_len >> 48);
+	}
+	xcrb.req_len &= 0x0000FFFFFFFFFFFFULL;
+#endif
+
+	do {
+		rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
+		if (rc == -EAGAIN)
+			tr.again_counter++;
+#ifdef CONFIG_ZCRYPT_DEBUG
+		if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
+			break;
+#endif
+	} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	/* on failure: retry once again after a requested rescan */
+	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+		do {
+			rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
+			if (rc == -EAGAIN)
+				tr.again_counter++;
+		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+		rc = -EIO;
+	if (rc)
+		ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
+	if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
+		return -EFAULT;
+	return rc;
+}
+
 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
 				  unsigned long arg)
 {
@@ -1300,87 +1595,14 @@
 		return rc;
 
 	switch (cmd) {
-	case ICARSAMODEXPO: {
-		struct ica_rsa_modexpo __user *umex = (void __user *) arg;
-		struct ica_rsa_modexpo mex;
-
-		if (copy_from_user(&mex, umex, sizeof(mex)))
-			return -EFAULT;
-		do {
-			rc = zcrypt_rsa_modexpo(perms, &mex);
-		} while (rc == -EAGAIN);
-		/* on failure: retry once again after a requested rescan */
-		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
-			do {
-				rc = zcrypt_rsa_modexpo(perms, &mex);
-			} while (rc == -EAGAIN);
-		if (rc) {
-			ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
-			return rc;
-		}
-		return put_user(mex.outputdatalength, &umex->outputdatalength);
-	}
-	case ICARSACRT: {
-		struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
-		struct ica_rsa_modexpo_crt crt;
-
-		if (copy_from_user(&crt, ucrt, sizeof(crt)))
-			return -EFAULT;
-		do {
-			rc = zcrypt_rsa_crt(perms, &crt);
-		} while (rc == -EAGAIN);
-		/* on failure: retry once again after a requested rescan */
-		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
-			do {
-				rc = zcrypt_rsa_crt(perms, &crt);
-			} while (rc == -EAGAIN);
-		if (rc) {
-			ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
-			return rc;
-		}
-		return put_user(crt.outputdatalength, &ucrt->outputdatalength);
-	}
-	case ZSECSENDCPRB: {
-		struct ica_xcRB __user *uxcRB = (void __user *) arg;
-		struct ica_xcRB xcRB;
-
-		if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
-			return -EFAULT;
-		do {
-			rc = _zcrypt_send_cprb(perms, &xcRB);
-		} while (rc == -EAGAIN);
-		/* on failure: retry once again after a requested rescan */
-		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
-			do {
-				rc = _zcrypt_send_cprb(perms, &xcRB);
-			} while (rc == -EAGAIN);
-		if (rc)
-			ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
-				   rc, xcRB.status);
-		if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
-			return -EFAULT;
-		return rc;
-	}
-	case ZSENDEP11CPRB: {
-		struct ep11_urb __user *uxcrb = (void __user *)arg;
-		struct ep11_urb xcrb;
-
-		if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
-			return -EFAULT;
-		do {
-			rc = zcrypt_send_ep11_cprb(perms, &xcrb);
-		} while (rc == -EAGAIN);
-		/* on failure: retry once again after a requested rescan */
-		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
-			do {
-				rc = zcrypt_send_ep11_cprb(perms, &xcrb);
-			} while (rc == -EAGAIN);
-		if (rc)
-			ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
-		if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
-			return -EFAULT;
-		return rc;
-	}
+	case ICARSAMODEXPO:
+		return icarsamodexpo_ioctl(perms, arg);
+	case ICARSACRT:
+		return icarsacrt_ioctl(perms, arg);
+	case ZSECSENDCPRB:
+		return zsecsendcprb_ioctl(perms, arg);
+	case ZSENDEP11CPRB:
+		return zsendep11cprb_ioctl(perms, arg);
 	case ZCRYPT_DEVICE_STATUS: {
 		struct zcrypt_device_status_ext *device_status;
 		size_t total_size = MAX_ZDEV_ENTRIES_EXT
@@ -1506,8 +1728,10 @@
 	struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
 	struct compat_ica_rsa_modexpo mex32;
 	struct ica_rsa_modexpo mex64;
+	struct zcrypt_track tr;
 	long rc;
 
+	memset(&tr, 0, sizeof(tr));
 	if (copy_from_user(&mex32, umex32, sizeof(mex32)))
 		return -EFAULT;
 	mex64.inputdata = compat_ptr(mex32.inputdata);
@@ -1517,13 +1741,19 @@
 	mex64.b_key = compat_ptr(mex32.b_key);
 	mex64.n_modulus = compat_ptr(mex32.n_modulus);
 	do {
-		rc = zcrypt_rsa_modexpo(perms, &mex64);
-	} while (rc == -EAGAIN);
+		rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
+		if (rc == -EAGAIN)
+			tr.again_counter++;
+	} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
 	/* on failure: retry once again after a requested rescan */
 	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
 		do {
-			rc = zcrypt_rsa_modexpo(perms, &mex64);
-		} while (rc == -EAGAIN);
+			rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
+			if (rc == -EAGAIN)
+				tr.again_counter++;
+		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+		rc = -EIO;
 	if (rc)
 		return rc;
 	return put_user(mex64.outputdatalength,
@@ -1548,8 +1778,10 @@
 	struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
 	struct compat_ica_rsa_modexpo_crt crt32;
 	struct ica_rsa_modexpo_crt crt64;
+	struct zcrypt_track tr;
 	long rc;
 
+	memset(&tr, 0, sizeof(tr));
 	if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
 		return -EFAULT;
 	crt64.inputdata = compat_ptr(crt32.inputdata);
@@ -1562,13 +1794,19 @@
 	crt64.nq_prime = compat_ptr(crt32.nq_prime);
 	crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
 	do {
-		rc = zcrypt_rsa_crt(perms, &crt64);
-	} while (rc == -EAGAIN);
+		rc = zcrypt_rsa_crt(perms, &tr, &crt64);
+		if (rc == -EAGAIN)
+			tr.again_counter++;
+	} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
 	/* on failure: retry once again after a requested rescan */
 	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
 		do {
-			rc = zcrypt_rsa_crt(perms, &crt64);
-		} while (rc == -EAGAIN);
+			rc = zcrypt_rsa_crt(perms, &tr, &crt64);
+			if (rc == -EAGAIN)
+				tr.again_counter++;
+		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+		rc = -EIO;
 	if (rc)
 		return rc;
 	return put_user(crt64.outputdatalength,
@@ -1600,9 +1838,11 @@
 {
 	struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
 	struct compat_ica_xcRB xcRB32;
+	struct zcrypt_track tr;
 	struct ica_xcRB xcRB64;
 	long rc;
 
+	memset(&tr, 0, sizeof(tr));
 	if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
 		return -EFAULT;
 	xcRB64.agent_ID = xcRB32.agent_ID;
@@ -1626,13 +1866,19 @@
 	xcRB64.priority_window = xcRB32.priority_window;
 	xcRB64.status = xcRB32.status;
 	do {
-		rc = _zcrypt_send_cprb(perms, &xcRB64);
-	} while (rc == -EAGAIN);
+		rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64);
+		if (rc == -EAGAIN)
+			tr.again_counter++;
+	} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
 	/* on failure: retry once again after a requested rescan */
 	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
 		do {
-			rc = _zcrypt_send_cprb(perms, &xcRB64);
-		} while (rc == -EAGAIN);
+			rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64);
+			if (rc == -EAGAIN)
+				tr.again_counter++;
+		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+		rc = -EIO;
 	xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
 	xcRB32.reply_data_length = xcRB64.reply_data_length;
 	xcRB32.status = xcRB64.status;
@@ -1888,6 +2134,7 @@
 	zcrypt_msgtype6_exit();
 	zcrypt_msgtype50_exit();
 	zcrypt_ccamisc_exit();
+	zcrypt_ep11misc_exit();
 	zcrypt_debug_exit();
 }
 
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index d464618..51c0b8b 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -55,13 +55,30 @@
 
 struct zcrypt_queue;
 
+/* struct to hold tracking information for a userspace request/response */
+struct zcrypt_track {
+	int again_counter;		/* retry attempts counter */
+	int last_qid;			/* last qid used */
+	int last_rc;			/* last return code */
+#ifdef CONFIG_ZCRYPT_DEBUG
+	struct ap_fi fi;		/* failure injection cmd */
+#endif
+};
+
+/* defines related to message tracking */
+#define TRACK_AGAIN_MAX 10
+#define TRACK_AGAIN_CARD_WEIGHT_PENALTY  1000
+#define TRACK_AGAIN_QUEUE_WEIGHT_PENALTY 10000
+
 struct zcrypt_ops {
-	long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *);
+	long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *,
+			    struct ap_message *);
 	long (*rsa_modexpo_crt)(struct zcrypt_queue *,
-				struct ica_rsa_modexpo_crt *);
-	long (*send_cprb)(struct zcrypt_queue *, struct ica_xcRB *,
+				struct ica_rsa_modexpo_crt *,
+				struct ap_message *);
+	long (*send_cprb)(bool userspace, struct zcrypt_queue *, struct ica_xcRB *,
 			  struct ap_message *);
-	long (*send_ep11_cprb)(struct zcrypt_queue *, struct ep11_urb *,
+	long (*send_ep11_cprb)(bool userspace, struct zcrypt_queue *, struct ep11_urb *,
 			       struct ap_message *);
 	long (*rng)(struct zcrypt_queue *, char *, struct ap_message *);
 	struct list_head list;		/* zcrypt ops list. */
@@ -82,7 +99,7 @@
 	int min_mod_size;		/* Min number of bits. */
 	int max_mod_size;		/* Max number of bits. */
 	int max_exp_bit_length;
-	int speed_rating[NUM_OPS];	/* Speed idx of crypto ops. */
+	const int *speed_rating;	/* Speed idx of crypto ops. */
 	atomic_t load;			/* Utilization of the crypto device */
 
 	int request_count;		/* # current requests. */
@@ -140,8 +157,31 @@
 int zcrypt_api_init(void);
 void zcrypt_api_exit(void);
 long zcrypt_send_cprb(struct ica_xcRB *xcRB);
+long zcrypt_send_ep11_cprb(struct ep11_urb *urb);
 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
 int zcrypt_device_status_ext(int card, int queue,
 			     struct zcrypt_device_status_ext *devstatus);
 
+static inline unsigned long z_copy_from_user(bool userspace,
+					     void *to,
+					     const void __user *from,
+					     unsigned long n)
+{
+	if (likely(userspace))
+		return copy_from_user(to, from, n);
+	memcpy(to, (void __force *) from, n);
+	return 0;
+}
+
+static inline unsigned long z_copy_to_user(bool userspace,
+					   void __user *to,
+					   const void *from,
+					   unsigned long n)
+{
+	if (likely(userspace))
+		return copy_to_user(to, from, n);
+	memcpy((void __force *) to, from, n);
+	return 0;
+}
+
 #endif /* _ZCRYPT_API_H_ */
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index d4f35a1..09fe6bb 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -41,7 +41,7 @@
 {
 	struct zcrypt_card *zc = to_ap_card(dev)->private;
 
-	return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
 }
 
 static DEVICE_ATTR_RO(type);
@@ -50,22 +50,28 @@
 			   struct device_attribute *attr,
 			   char *buf)
 {
-	struct zcrypt_card *zc = to_ap_card(dev)->private;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
+	int online = ac->config && zc->online ? 1 : 0;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", zc->online);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", online);
 }
 
 static ssize_t online_store(struct device *dev,
 			    struct device_attribute *attr,
 			    const char *buf, size_t count)
 {
-	struct zcrypt_card *zc = to_ap_card(dev)->private;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
 	struct zcrypt_queue *zq;
 	int online, id;
 
 	if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
 		return -EINVAL;
 
+	if (online && !ac->config)
+		return -ENODEV;
+
 	zc->online = online;
 	id = zc->card->id;
 
@@ -86,7 +92,7 @@
 {
 	struct zcrypt_card *zc = to_ap_card(dev)->private;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
+	return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
 }
 
 static DEVICE_ATTR_RO(load);
@@ -151,11 +157,6 @@
 {
 	int rc;
 
-	rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
-				&zcrypt_card_attr_group);
-	if (rc)
-		return rc;
-
 	spin_lock(&zcrypt_list_lock);
 	list_add_tail(&zc->list, &zcrypt_card_list);
 	spin_unlock(&zcrypt_list_lock);
@@ -164,6 +165,14 @@
 
 	ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id);
 
+	rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
+				&zcrypt_card_attr_group);
+	if (rc) {
+		spin_lock(&zcrypt_list_lock);
+		list_del_init(&zc->list);
+		spin_unlock(&zcrypt_list_lock);
+	}
+
 	return rc;
 }
 EXPORT_SYMBOL(zcrypt_card_register);
@@ -183,5 +192,6 @@
 	spin_unlock(&zcrypt_list_lock);
 	sysfs_remove_group(&zc->card->ap_dev.device.kobj,
 			   &zcrypt_card_attr_group);
+	zcrypt_card_put(zc);
 }
 EXPORT_SYMBOL(zcrypt_card_unregister);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 03999b0..ffab935 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -173,6 +173,49 @@
 EXPORT_SYMBOL(cca_check_secaescipherkey);
 
 /*
+ * Simple check if the token is a valid CCA secure ECC private
+ * key token. Returns 0 on success or errno value on failure.
+ */
+int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
+			     const u8 *token, size_t keysize,
+			     int checkcpacfexport)
+{
+	struct eccprivkeytoken *t = (struct eccprivkeytoken *) token;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+	if (t->type != TOKTYPE_CCA_INTERNAL_PKA) {
+		if (dbg)
+			DBF("%s token check failed, type 0x%02x != 0x%02x\n",
+			    __func__, (int) t->type, TOKTYPE_CCA_INTERNAL_PKA);
+		return -EINVAL;
+	}
+	if (t->len > keysize) {
+		if (dbg)
+			DBF("%s token check failed, len %d > keysize %zu\n",
+			    __func__, (int) t->len, keysize);
+		return -EINVAL;
+	}
+	if (t->secid != 0x20) {
+		if (dbg)
+			DBF("%s token check failed, secid 0x%02x != 0x20\n",
+			    __func__, (int) t->secid);
+		return -EINVAL;
+	}
+	if (checkcpacfexport && !(t->kutc & 0x01)) {
+		if (dbg)
+			DBF("%s token check failed, XPRTCPAC bit is 0\n",
+			    __func__);
+		return -EINVAL;
+	}
+
+#undef DBF
+
+	return 0;
+}
+EXPORT_SYMBOL(cca_check_sececckeytoken);
+
+/*
  * Allocate consecutive memory for request CPRB, request param
  * block, reply CPRB and reply param block and fill in values
  * for the common fields. Returns 0 on success or errno value
@@ -205,9 +248,9 @@
 	preqcblk->rpl_msgbl = cprbplusparamblen;
 	if (paramblen) {
 		preqcblk->req_parmb =
-			((u8 *) preqcblk) + sizeof(struct CPRBX);
+			((u8 __user *) preqcblk) + sizeof(struct CPRBX);
 		preqcblk->rpl_parmb =
-			((u8 *) prepcblk) + sizeof(struct CPRBX);
+			((u8 __user *) prepcblk) + sizeof(struct CPRBX);
 	}
 
 	*pcprbmem = cprbmem;
@@ -249,24 +292,6 @@
 }
 
 /*
- * Helper function which calls zcrypt_send_cprb with
- * memory management segment adjusted to kernel space
- * so that the copy_from_user called within this
- * function do in fact copy from kernel space.
- */
-static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb)
-{
-	int rc;
-	mm_segment_t old_fs = get_fs();
-
-	set_fs(KERNEL_DS);
-	rc = zcrypt_send_cprb(xcrb);
-	set_fs(old_fs);
-
-	return rc;
-}
-
-/*
  * Generate (random) CCA AES DATA secure key.
  */
 int cca_genseckey(u16 cardnr, u16 domain,
@@ -274,7 +299,7 @@
 {
 	int i, rc, keysize;
 	int seckeysize;
-	u8 *mem;
+	u8 *mem, *ptr;
 	struct CPRBX *preqcblk, *prepcblk;
 	struct ica_xcRB xcrb;
 	struct kgreqparm {
@@ -320,7 +345,7 @@
 	preqcblk->domain = domain;
 
 	/* fill request cprb param block with KG request */
-	preqparm = (struct kgreqparm *) preqcblk->req_parmb;
+	preqparm = (struct kgreqparm __force *) preqcblk->req_parmb;
 	memcpy(preqparm->subfunc_code, "KG", 2);
 	preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
 	preqparm->lv1.len = sizeof(struct lv1);
@@ -359,7 +384,7 @@
 	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
 
 	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
-	rc = _zcrypt_send_cprb(&xcrb);
+	rc = zcrypt_send_cprb(&xcrb);
 	if (rc) {
 		DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n",
 			  __func__, (int) cardnr, (int) domain, rc);
@@ -377,8 +402,9 @@
 	}
 
 	/* process response cprb param block */
-	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
-	prepparm = (struct kgrepparm *) prepcblk->rpl_parmb;
+	ptr =  ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepcblk->rpl_parmb = (u8 __user *) ptr;
+	prepparm = (struct kgrepparm *) ptr;
 
 	/* check length of the returned secure key token */
 	seckeysize = prepparm->lv3.keyblock.toklen
@@ -415,7 +441,7 @@
 		   const u8 *clrkey, u8 seckey[SECKEYBLOBSIZE])
 {
 	int rc, keysize, seckeysize;
-	u8 *mem;
+	u8 *mem, *ptr;
 	struct CPRBX *preqcblk, *prepcblk;
 	struct ica_xcRB xcrb;
 	struct cmreqparm {
@@ -460,7 +486,7 @@
 	preqcblk->domain = domain;
 
 	/* fill request cprb param block with CM request */
-	preqparm = (struct cmreqparm *) preqcblk->req_parmb;
+	preqparm = (struct cmreqparm __force *) preqcblk->req_parmb;
 	memcpy(preqparm->subfunc_code, "CM", 2);
 	memcpy(preqparm->rule_array, "AES     ", 8);
 	preqparm->rule_array_len =
@@ -496,7 +522,7 @@
 	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
 
 	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
-	rc = _zcrypt_send_cprb(&xcrb);
+	rc = zcrypt_send_cprb(&xcrb);
 	if (rc) {
 		DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
 			  __func__, (int) cardnr, (int) domain, rc);
@@ -514,8 +540,9 @@
 	}
 
 	/* process response cprb param block */
-	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
-	prepparm = (struct cmrepparm *) prepcblk->rpl_parmb;
+	ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepcblk->rpl_parmb = (u8 __user *) ptr;
+	prepparm = (struct cmrepparm *) ptr;
 
 	/* check length of the returned secure key token */
 	seckeysize = prepparm->lv3.keyblock.toklen
@@ -554,7 +581,7 @@
 		    u8 *protkey, u32 *protkeylen, u32 *protkeytype)
 {
 	int rc;
-	u8 *mem;
+	u8 *mem, *ptr;
 	struct CPRBX *preqcblk, *prepcblk;
 	struct ica_xcRB xcrb;
 	struct uskreqparm {
@@ -592,7 +619,7 @@
 				u8  pad2[1];
 				u8  vptype;
 				u8  vp[32];  /* verification pattern */
-			} keyblock;
+			} ckb;
 		} lv3;
 	} __packed * prepparm;
 
@@ -605,7 +632,7 @@
 	preqcblk->domain = domain;
 
 	/* fill request cprb param block with USK request */
-	preqparm = (struct uskreqparm *) preqcblk->req_parmb;
+	preqparm = (struct uskreqparm __force *) preqcblk->req_parmb;
 	memcpy(preqparm->subfunc_code, "US", 2);
 	preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
 	preqparm->lv1.len = sizeof(struct lv1);
@@ -622,7 +649,7 @@
 	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
 
 	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
-	rc = _zcrypt_send_cprb(&xcrb);
+	rc = zcrypt_send_cprb(&xcrb);
 	if (rc) {
 		DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
 			  __func__, (int) cardnr, (int) domain, rc);
@@ -646,19 +673,21 @@
 	}
 
 	/* process response cprb param block */
-	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
-	prepparm = (struct uskrepparm *) prepcblk->rpl_parmb;
+	ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepcblk->rpl_parmb = (u8 __user *) ptr;
+	prepparm = (struct uskrepparm *) ptr;
 
 	/* check the returned keyblock */
-	if (prepparm->lv3.keyblock.version != 0x01) {
-		DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x01\n",
-			  __func__, (int) prepparm->lv3.keyblock.version);
+	if (prepparm->lv3.ckb.version != 0x01 &&
+	    prepparm->lv3.ckb.version != 0x02) {
+		DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+			  __func__, (int) prepparm->lv3.ckb.version);
 		rc = -EIO;
 		goto out;
 	}
 
 	/* copy the tanslated protected key */
-	switch (prepparm->lv3.keyblock.len) {
+	switch (prepparm->lv3.ckb.len) {
 	case 16+32:
 		/* AES 128 protected key */
 		if (protkeytype)
@@ -676,13 +705,13 @@
 		break;
 	default:
 		DEBUG_ERR("%s unknown/unsupported keylen %d\n",
-			  __func__, prepparm->lv3.keyblock.len);
+			  __func__, prepparm->lv3.ckb.len);
 		rc = -EIO;
 		goto out;
 	}
-	memcpy(protkey, prepparm->lv3.keyblock.key, prepparm->lv3.keyblock.len);
+	memcpy(protkey, prepparm->lv3.ckb.key, prepparm->lv3.ckb.len);
 	if (protkeylen)
-		*protkeylen = prepparm->lv3.keyblock.len;
+		*protkeylen = prepparm->lv3.ckb.len;
 
 out:
 	free_cprbmem(mem, PARMBSIZE, 0);
@@ -713,7 +742,7 @@
 		     u8 *keybuf, size_t *keybufsize)
 {
 	int rc;
-	u8 *mem;
+	u8 *mem, *ptr;
 	struct CPRBX *preqcblk, *prepcblk;
 	struct ica_xcRB xcrb;
 	struct gkreqparm {
@@ -795,7 +824,7 @@
 	preqcblk->req_parml = sizeof(struct gkreqparm);
 
 	/* prepare request param block with GK request */
-	preqparm = (struct gkreqparm *) preqcblk->req_parmb;
+	preqparm = (struct gkreqparm __force *) preqcblk->req_parmb;
 	memcpy(preqparm->subfunc_code, "GK", 2);
 	preqparm->rule_array_len =  sizeof(uint16_t) + 2 * 8;
 	memcpy(preqparm->rule_array, "AES     OP      ", 2*8);
@@ -846,7 +875,7 @@
 	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
 
 	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
-	rc = _zcrypt_send_cprb(&xcrb);
+	rc = zcrypt_send_cprb(&xcrb);
 	if (rc) {
 		DEBUG_ERR(
 			"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
@@ -866,8 +895,9 @@
 	}
 
 	/* process response cprb param block */
-	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
-	prepparm = (struct gkrepparm *) prepcblk->rpl_parmb;
+	ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepcblk->rpl_parmb = (u8 __user *) ptr;
+	prepparm = (struct gkrepparm *) ptr;
 
 	/* do some plausibility checks on the key block */
 	if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
@@ -916,7 +946,7 @@
 			   int *key_token_size)
 {
 	int rc, n;
-	u8 *mem;
+	u8 *mem, *ptr;
 	struct CPRBX *preqcblk, *prepcblk;
 	struct ica_xcRB xcrb;
 	struct rule_array_block {
@@ -973,7 +1003,7 @@
 	preqcblk->req_parml = 0;
 
 	/* prepare request param block with IP request */
-	preq_ra_block = (struct rule_array_block *) preqcblk->req_parmb;
+	preq_ra_block = (struct rule_array_block __force *) preqcblk->req_parmb;
 	memcpy(preq_ra_block->subfunc_code, "IP", 2);
 	preq_ra_block->rule_array_len =  sizeof(uint16_t) + 2 * 8;
 	memcpy(preq_ra_block->rule_array, rule_array_1, 8);
@@ -986,7 +1016,7 @@
 	}
 
 	/* prepare vud block */
-	preq_vud_block = (struct vud_block *)
+	preq_vud_block = (struct vud_block __force *)
 		(preqcblk->req_parmb + preqcblk->req_parml);
 	n = complete ? 0 : (clr_key_bit_size + 7) / 8;
 	preq_vud_block->len = sizeof(struct vud_block) + n;
@@ -1000,7 +1030,7 @@
 	preqcblk->req_parml += preq_vud_block->len;
 
 	/* prepare key block */
-	preq_key_block = (struct key_block *)
+	preq_key_block = (struct key_block __force *)
 		(preqcblk->req_parmb + preqcblk->req_parml);
 	n = *key_token_size;
 	preq_key_block->len = sizeof(struct key_block) + n;
@@ -1013,7 +1043,7 @@
 	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
 
 	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
-	rc = _zcrypt_send_cprb(&xcrb);
+	rc = zcrypt_send_cprb(&xcrb);
 	if (rc) {
 		DEBUG_ERR(
 			"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
@@ -1033,8 +1063,9 @@
 	}
 
 	/* process response cprb param block */
-	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
-	prepparm = (struct iprepparm *) prepcblk->rpl_parmb;
+	ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepcblk->rpl_parmb = (u8 __user *) ptr;
+	prepparm = (struct iprepparm *) ptr;
 
 	/* do some plausibility checks on the key block */
 	if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
@@ -1150,7 +1181,7 @@
 		       u8 *protkey, u32 *protkeylen, u32 *protkeytype)
 {
 	int rc;
-	u8 *mem;
+	u8 *mem, *ptr;
 	struct CPRBX *preqcblk, *prepcblk;
 	struct ica_xcRB xcrb;
 	struct aureqparm {
@@ -1207,7 +1238,7 @@
 	preqcblk->domain = domain;
 
 	/* fill request cprb param block with AU request */
-	preqparm = (struct aureqparm *) preqcblk->req_parmb;
+	preqparm = (struct aureqparm __force *) preqcblk->req_parmb;
 	memcpy(preqparm->subfunc_code, "AU", 2);
 	preqparm->rule_array_len =
 		sizeof(preqparm->rule_array_len)
@@ -1229,7 +1260,7 @@
 	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
 
 	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
-	rc = _zcrypt_send_cprb(&xcrb);
+	rc = zcrypt_send_cprb(&xcrb);
 	if (rc) {
 		DEBUG_ERR(
 			"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
@@ -1256,14 +1287,15 @@
 	}
 
 	/* process response cprb param block */
-	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
-	prepparm = (struct aurepparm *) prepcblk->rpl_parmb;
+	ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepcblk->rpl_parmb = (u8 __user *) ptr;
+	prepparm = (struct aurepparm *) ptr;
 
 	/* check the returned keyblock */
-	if (prepparm->vud.ckb.version != 0x01) {
-		DEBUG_ERR(
-			"%s reply param keyblock version mismatch 0x%02x != 0x01\n",
-			__func__, (int) prepparm->vud.ckb.version);
+	if (prepparm->vud.ckb.version != 0x01 &&
+	    prepparm->vud.ckb.version != 0x02) {
+		DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+			  __func__, (int) prepparm->vud.ckb.version);
 		rc = -EIO;
 		goto out;
 	}
@@ -1309,6 +1341,156 @@
 EXPORT_SYMBOL(cca_cipher2protkey);
 
 /*
+ * Derive protected key from CCA ECC secure private key.
+ */
+int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
+		    u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+	int rc;
+	u8 *mem, *ptr;
+	struct CPRBX *preqcblk, *prepcblk;
+	struct ica_xcRB xcrb;
+	struct aureqparm {
+		u8  subfunc_code[2];
+		u16 rule_array_len;
+		u8  rule_array[8];
+		struct {
+			u16 len;
+			u16 tk_blob_len;
+			u16 tk_blob_tag;
+			u8  tk_blob[66];
+		} vud;
+		struct {
+			u16 len;
+			u16 cca_key_token_len;
+			u16 cca_key_token_flags;
+			u8  cca_key_token[0];
+		} kb;
+	} __packed * preqparm;
+	struct aurepparm {
+		u8  subfunc_code[2];
+		u16 rule_array_len;
+		struct {
+			u16 len;
+			u16 sublen;
+			u16 tag;
+			struct cpacfkeyblock {
+				u8  version;  /* version of this struct */
+				u8  flags[2];
+				u8  algo;
+				u8  form;
+				u8  pad1[3];
+				u16 keylen;
+				u8  key[0];  /* the key (keylen bytes) */
+				u16 keyattrlen;
+				u8  keyattr[32];
+				u8  pad2[1];
+				u8  vptype;
+				u8  vp[32];  /* verification pattern */
+			} ckb;
+		} vud;
+		struct {
+			u16 len;
+		} kb;
+	} __packed * prepparm;
+	int keylen = ((struct eccprivkeytoken *)key)->len;
+
+	/* get already prepared memory for 2 cprbs with param block each */
+	rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+	if (rc)
+		return rc;
+
+	/* fill request cprb struct */
+	preqcblk->domain = domain;
+
+	/* fill request cprb param block with AU request */
+	preqparm = (struct aureqparm __force *) preqcblk->req_parmb;
+	memcpy(preqparm->subfunc_code, "AU", 2);
+	preqparm->rule_array_len =
+		sizeof(preqparm->rule_array_len)
+		+ sizeof(preqparm->rule_array);
+	memcpy(preqparm->rule_array, "EXPT-SK ", 8);
+	/* vud, tk blob */
+	preqparm->vud.len = sizeof(preqparm->vud);
+	preqparm->vud.tk_blob_len = sizeof(preqparm->vud.tk_blob)
+		+ 2 * sizeof(uint16_t);
+	preqparm->vud.tk_blob_tag = 0x00C2;
+	/* kb, cca token */
+	preqparm->kb.len = keylen + 3 * sizeof(uint16_t);
+	preqparm->kb.cca_key_token_len = keylen + 2 * sizeof(uint16_t);
+	memcpy(preqparm->kb.cca_key_token, key, keylen);
+	/* now fill length of param block into cprb */
+	preqcblk->req_parml = sizeof(struct aureqparm) + keylen;
+
+	/* fill xcrb struct */
+	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+	rc = zcrypt_send_cprb(&xcrb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+			__func__, (int) cardnr, (int) domain, rc);
+		goto out;
+	}
+
+	/* check response returncode and reasoncode */
+	if (prepcblk->ccp_rtcode != 0) {
+		DEBUG_ERR(
+			"%s unwrap secure key failure, card response %d/%d\n",
+			__func__,
+			(int) prepcblk->ccp_rtcode,
+			(int) prepcblk->ccp_rscode);
+		rc = -EIO;
+		goto out;
+	}
+	if (prepcblk->ccp_rscode != 0) {
+		DEBUG_WARN(
+			"%s unwrap secure key warning, card response %d/%d\n",
+			__func__,
+			(int) prepcblk->ccp_rtcode,
+			(int) prepcblk->ccp_rscode);
+	}
+
+	/* process response cprb param block */
+	ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepcblk->rpl_parmb = (u8 __user *) ptr;
+	prepparm = (struct aurepparm *) ptr;
+
+	/* check the returned keyblock */
+	if (prepparm->vud.ckb.version != 0x02) {
+		DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n",
+			  __func__, (int) prepparm->vud.ckb.version);
+		rc = -EIO;
+		goto out;
+	}
+	if (prepparm->vud.ckb.algo != 0x81) {
+		DEBUG_ERR(
+			"%s reply param keyblock algo mismatch 0x%02x != 0x81\n",
+			__func__, (int) prepparm->vud.ckb.algo);
+		rc = -EIO;
+		goto out;
+	}
+
+	/* copy the translated protected key */
+	if (prepparm->vud.ckb.keylen > *protkeylen) {
+		DEBUG_ERR("%s prot keylen mismatch %d > buffersize %u\n",
+			  __func__, prepparm->vud.ckb.keylen, *protkeylen);
+		rc = -EIO;
+		goto out;
+	}
+	memcpy(protkey, prepparm->vud.ckb.key, prepparm->vud.ckb.keylen);
+	*protkeylen = prepparm->vud.ckb.keylen;
+	if (protkeytype)
+		*protkeytype = PKEY_KEYTYPE_ECC;
+
+out:
+	free_cprbmem(mem, PARMBSIZE, 0);
+	return rc;
+}
+EXPORT_SYMBOL(cca_ecc2protkey);
+
+/*
  * query cryptographic facility from CCA adapter
  */
 int cca_query_crypto_facility(u16 cardnr, u16 domain,
@@ -1346,7 +1528,7 @@
 	preqcblk->domain = domain;
 
 	/* fill request cprb param block with FQ request */
-	preqparm = (struct fqreqparm *) preqcblk->req_parmb;
+	preqparm = (struct fqreqparm __force *) preqcblk->req_parmb;
 	memcpy(preqparm->subfunc_code, "FQ", 2);
 	memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
 	preqparm->rule_array_len =
@@ -1359,7 +1541,7 @@
 	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
 
 	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
-	rc = _zcrypt_send_cprb(&xcrb);
+	rc = zcrypt_send_cprb(&xcrb);
 	if (rc) {
 		DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
 			  __func__, (int) cardnr, (int) domain, rc);
@@ -1377,8 +1559,9 @@
 	}
 
 	/* process response cprb param block */
-	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
-	prepparm = (struct fqrepparm *) prepcblk->rpl_parmb;
+	ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepcblk->rpl_parmb = (u8 __user *) ptr;
+	prepparm = (struct fqrepparm *) ptr;
 	ptr = prepparm->lvdata;
 
 	/* check and possibly copy reply rule array */
@@ -1516,21 +1699,38 @@
 				       rarray, &rlen, varray, &vlen);
 	if (rc == 0 && rlen >= 10*8 && vlen >= 204) {
 		memcpy(ci->serial, rarray, 8);
-		ci->new_mk_state = (char) rarray[7*8];
-		ci->cur_mk_state = (char) rarray[8*8];
-		ci->old_mk_state = (char) rarray[9*8];
-		if (ci->old_mk_state == '2')
-			memcpy(&ci->old_mkvp, varray + 172, 8);
-		if (ci->cur_mk_state == '2')
-			memcpy(&ci->cur_mkvp, varray + 184, 8);
-		if (ci->new_mk_state == '3')
-			memcpy(&ci->new_mkvp, varray + 196, 8);
-		found = 1;
+		ci->new_aes_mk_state = (char) rarray[7*8];
+		ci->cur_aes_mk_state = (char) rarray[8*8];
+		ci->old_aes_mk_state = (char) rarray[9*8];
+		if (ci->old_aes_mk_state == '2')
+			memcpy(&ci->old_aes_mkvp, varray + 172, 8);
+		if (ci->cur_aes_mk_state == '2')
+			memcpy(&ci->cur_aes_mkvp, varray + 184, 8);
+		if (ci->new_aes_mk_state == '3')
+			memcpy(&ci->new_aes_mkvp, varray + 196, 8);
+		found++;
+	}
+	if (!found)
+		goto out;
+	rlen = vlen = PAGE_SIZE/2;
+	rc = cca_query_crypto_facility(cardnr, domain, "STATICSB",
+				       rarray, &rlen, varray, &vlen);
+	if (rc == 0 && rlen >= 13*8 && vlen >= 240) {
+		ci->new_apka_mk_state = (char) rarray[10*8];
+		ci->cur_apka_mk_state = (char) rarray[11*8];
+		ci->old_apka_mk_state = (char) rarray[12*8];
+		if (ci->old_apka_mk_state == '2')
+			memcpy(&ci->old_apka_mkvp, varray + 208, 8);
+		if (ci->cur_apka_mk_state == '2')
+			memcpy(&ci->cur_apka_mkvp, varray + 220, 8);
+		if (ci->new_apka_mk_state == '3')
+			memcpy(&ci->new_apka_mkvp, varray + 232, 8);
+		found++;
 	}
 
+out:
 	free_page((unsigned long) pg);
-
-	return found ? 0 : -ENOENT;
+	return found == 2 ? 0 : -ENOENT;
 }
 
 /*
@@ -1568,9 +1768,9 @@
 		return -EINVAL;
 
 	/* fetch status of all crypto cards */
-	device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
-				      sizeof(struct zcrypt_device_status_ext),
-				      GFP_KERNEL);
+	device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+				       sizeof(struct zcrypt_device_status_ext),
+				       GFP_KERNEL);
 	if (!device_status)
 		return -ENOMEM;
 	zcrypt_device_status_mask_ext(device_status);
@@ -1584,16 +1784,16 @@
 			/* enabled CCA card, check current mkvp from cache */
 			if (cca_info_cache_fetch(card, dom, &ci) == 0 &&
 			    ci.hwtype >= minhwtype &&
-			    ci.cur_mk_state == '2' &&
-			    ci.cur_mkvp == mkvp) {
+			    ci.cur_aes_mk_state == '2' &&
+			    ci.cur_aes_mkvp == mkvp) {
 				if (!verify)
 					break;
 				/* verify: refresh card info */
 				if (fetch_cca_info(card, dom, &ci) == 0) {
 					cca_info_cache_update(card, dom, &ci);
 					if (ci.hwtype >= minhwtype &&
-					    ci.cur_mk_state == '2' &&
-					    ci.cur_mkvp == mkvp)
+					    ci.cur_aes_mk_state == '2' &&
+					    ci.cur_aes_mkvp == mkvp)
 						break;
 				}
 			}
@@ -1615,12 +1815,12 @@
 			if (fetch_cca_info(card, dom, &ci) == 0) {
 				cca_info_cache_update(card, dom, &ci);
 				if (ci.hwtype >= minhwtype &&
-				    ci.cur_mk_state == '2' &&
-				    ci.cur_mkvp == mkvp)
+				    ci.cur_aes_mk_state == '2' &&
+				    ci.cur_aes_mkvp == mkvp)
 					break;
 				if (ci.hwtype >= minhwtype &&
-				    ci.old_mk_state == '2' &&
-				    ci.old_mkvp == mkvp &&
+				    ci.old_aes_mk_state == '2' &&
+				    ci.old_aes_mkvp == mkvp &&
 				    oi < 0)
 					oi = i;
 			}
@@ -1640,7 +1840,7 @@
 	} else
 		rc = -ENODEV;
 
-	kfree(device_status);
+	kvfree(device_status);
 	return rc;
 }
 
@@ -1674,15 +1874,14 @@
 EXPORT_SYMBOL(cca_findcard);
 
 int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
-		  int minhwtype, u64 cur_mkvp, u64 old_mkvp, int verify)
+		  int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp,
+		  int verify)
 {
 	struct zcrypt_device_status_ext *device_status;
-	int i, n, card, dom, curmatch, oldmatch, rc = 0;
+	u32 *_apqns = NULL, _nr_apqns = 0;
+	int i, card, dom, curmatch, oldmatch, rc = 0;
 	struct cca_info ci;
 
-	*apqns = NULL;
-	*nr_apqns = 0;
-
 	/* fetch status of all crypto cards */
 	device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
 				       sizeof(struct zcrypt_device_status_ext),
@@ -1691,67 +1890,73 @@
 		return -ENOMEM;
 	zcrypt_device_status_mask_ext(device_status);
 
-	/* loop two times: first gather eligible apqns, then store them */
-	while (1) {
-		n = 0;
-		/* walk through all the crypto cards */
-		for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
-			card = AP_QID_CARD(device_status[i].qid);
-			dom = AP_QID_QUEUE(device_status[i].qid);
-			/* check online state */
-			if (!device_status[i].online)
-				continue;
-			/* check for cca functions */
-			if (!(device_status[i].functions & 0x04))
-				continue;
-			/* check cardnr */
-			if (cardnr != 0xFFFF && card != cardnr)
-				continue;
-			/* check domain */
-			if (domain != 0xFFFF && dom != domain)
-				continue;
-			/* get cca info on this apqn */
-			if (cca_get_info(card, dom, &ci, verify))
-				continue;
-			/* current master key needs to be valid */
-			if (ci.cur_mk_state != '2')
-				continue;
-			/* check min hardware type */
-			if (minhwtype > 0 && minhwtype > ci.hwtype)
-				continue;
-			if (cur_mkvp || old_mkvp) {
-				/* check mkvps */
-				curmatch = oldmatch = 0;
-				if (cur_mkvp && cur_mkvp == ci.cur_mkvp)
+	/* allocate 1k space for up to 256 apqns */
+	_apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+	if (!_apqns) {
+		kvfree(device_status);
+		return -ENOMEM;
+	}
+
+	/* walk through all the crypto apqnss */
+	for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+		card = AP_QID_CARD(device_status[i].qid);
+		dom = AP_QID_QUEUE(device_status[i].qid);
+		/* check online state */
+		if (!device_status[i].online)
+			continue;
+		/* check for cca functions */
+		if (!(device_status[i].functions & 0x04))
+			continue;
+		/* check cardnr */
+		if (cardnr != 0xFFFF && card != cardnr)
+			continue;
+		/* check domain */
+		if (domain != 0xFFFF && dom != domain)
+			continue;
+		/* get cca info on this apqn */
+		if (cca_get_info(card, dom, &ci, verify))
+			continue;
+		/* current master key needs to be valid */
+		if (mktype == AES_MK_SET && ci.cur_aes_mk_state != '2')
+			continue;
+		if (mktype == APKA_MK_SET && ci.cur_apka_mk_state != '2')
+			continue;
+		/* check min hardware type */
+		if (minhwtype > 0 && minhwtype > ci.hwtype)
+			continue;
+		if (cur_mkvp || old_mkvp) {
+			/* check mkvps */
+			curmatch = oldmatch = 0;
+			if (mktype == AES_MK_SET) {
+				if (cur_mkvp && cur_mkvp == ci.cur_aes_mkvp)
 					curmatch = 1;
-				if (old_mkvp && ci.old_mk_state == '2' &&
-				    old_mkvp == ci.old_mkvp)
+				if (old_mkvp && ci.old_aes_mk_state == '2' &&
+				    old_mkvp == ci.old_aes_mkvp)
 					oldmatch = 1;
-				if ((cur_mkvp || old_mkvp) &&
-				    (curmatch + oldmatch < 1))
-					continue;
+			} else {
+				if (cur_mkvp && cur_mkvp == ci.cur_apka_mkvp)
+					curmatch = 1;
+				if (old_mkvp && ci.old_apka_mk_state == '2' &&
+				    old_mkvp == ci.old_apka_mkvp)
+					oldmatch = 1;
 			}
-			/* apqn passed all filtering criterons */
-			if (*apqns && n < *nr_apqns)
-				(*apqns)[n] = (((u16)card) << 16) | ((u16) dom);
-			n++;
+			if (curmatch + oldmatch < 1)
+				continue;
 		}
-		/* loop 2nd time: array has been filled */
-		if (*apqns)
-			break;
-		/* loop 1st time: have # of eligible apqns in n */
-		if (!n) {
-			rc = -ENODEV; /* no eligible apqns found */
-			break;
-		}
-		*nr_apqns = n;
-		/* allocate array to store n apqns into */
-		*apqns = kmalloc_array(n, sizeof(u32), GFP_KERNEL);
-		if (!*apqns) {
-			rc = -ENOMEM;
-			break;
-		}
-		verify = 0;
+		/* apqn passed all filtering criterons, add to the array */
+		if (_nr_apqns < 256)
+			_apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom);
+	}
+
+	/* nothing found ? */
+	if (!_nr_apqns) {
+		kfree(_apqns);
+		rc = -ENODEV;
+	} else {
+		/* no re-allocation, simple return the _apqns array */
+		*apqns = _apqns;
+		*nr_apqns = _nr_apqns;
+		rc = 0;
 	}
 
 	kvfree(device_status);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
index 77b6cc7..e710544 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.h
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -14,11 +14,13 @@
 #include <asm/pkey.h>
 
 /* Key token types */
-#define TOKTYPE_NON_CCA		0x00 /* Non-CCA key token */
-#define TOKTYPE_CCA_INTERNAL	0x01 /* CCA internal key token */
+#define TOKTYPE_NON_CCA		 0x00 /* Non-CCA key token */
+#define TOKTYPE_CCA_INTERNAL	 0x01 /* CCA internal sym key token */
+#define TOKTYPE_CCA_INTERNAL_PKA 0x1f /* CCA internal asym key token */
 
 /* For TOKTYPE_NON_CCA: */
 #define TOKVER_PROTECTED_KEY	0x01 /* Protected key token */
+#define TOKVER_CLEAR_KEY	0x02 /* Clear key token */
 
 /* For TOKTYPE_CCA_INTERNAL: */
 #define TOKVER_CCA_AES		0x04 /* CCA AES key token */
@@ -89,7 +91,32 @@
 	u16 kmf1;     /* key management field 1 */
 	u16 kmf2;     /* key management field 2 */
 	u16 kmf3;     /* key management field 3 */
-	u8  vdata[0]; /* variable part data follows */
+	u8  vdata[]; /* variable part data follows */
+} __packed;
+
+/* inside view of an CCA secure ECC private key */
+struct eccprivkeytoken {
+	u8  type;     /* 0x1f for internal asym key token */
+	u8  version;  /* should be 0x00 */
+	u16 len;      /* total key token length in bytes */
+	u8  res1[4];
+	u8  secid;    /* 0x20 for ECC priv key section marker */
+	u8  secver;   /* section version */
+	u16 seclen;   /* section length */
+	u8  wtype;    /* wrapping method, 0x00 clear, 0x01 AES */
+	u8  htype;    /* hash method, 0x02 for SHA-256 */
+	u8  res2[2];
+	u8  kutc;     /* key usage and translation control */
+	u8  ctype;    /* curve type */
+	u8  kfs;      /* key format and security */
+	u8  ksrc;     /* key source */
+	u16 pbitlen;  /* length of prime p in bits */
+	u16 ibmadlen; /* IBM associated data length in bytes */
+	u64 mkvp;     /* master key verification pattern */
+	u8  opk[48];  /* encrypted object protection key data */
+	u16 adatalen; /* associated data length in bytes */
+	u16 fseclen;  /* formated section length in bytes */
+	u8  more_data[]; /* more data follows */
 } __packed;
 
 /* Some defines for the CCA AES cipherkeytoken kmf1 field */
@@ -122,6 +149,14 @@
 			      int checkcpacfexport);
 
 /*
+ * Simple check if the token is a valid CCA secure ECC private
+ * key token. Returns 0 on success or errno value on failure.
+ */
+int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
+			     const u8 *token, size_t keysize,
+			     int checkcpacfexport);
+
+/*
  * Generate (random) CCA AES DATA secure key.
  */
 int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey);
@@ -158,6 +193,12 @@
 		      const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
 
 /*
+ * Derive proteced key from CCA ECC secure private key.
+ */
+int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
+		    u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+/*
  * Query cryptographic facility from CCA adapter
  */
 int cca_query_crypto_facility(u16 cardnr, u16 domain,
@@ -185,6 +226,8 @@
  * - if verify is enabled and a cur_mkvp and/or old_mkvp
  *   value is given, then refetch the cca_info and make sure the current
  *   cur_mkvp or old_mkvp values of the apqn are used.
+ * The mktype determines which set of master keys to use:
+ *   0 = AES_MK_SET - AES MK set, 1 = APKA MK_SET - APKA MK set
  * The array of apqn entries is allocated with kmalloc and returned in *apqns;
  * the number of apqns stored into the list is returned in *nr_apqns. One apqn
  * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
@@ -193,18 +236,28 @@
  * -ENODEV is returned.
  */
 int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
-		  int minhwtype, u64 cur_mkvp, u64 old_mkvp, int verify);
+		  int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp,
+		  int verify);
+
+#define AES_MK_SET  0
+#define APKA_MK_SET 1
 
 /* struct to hold info for each CCA queue */
 struct cca_info {
-	int  hwtype;	    /* one of the defined AP_DEVICE_TYPE_* */
-	char new_mk_state;  /* '1' empty, '2' partially full, '3' full */
-	char cur_mk_state;  /* '1' invalid, '2' valid */
-	char old_mk_state;  /* '1' invalid, '2' valid */
-	u64  new_mkvp;	    /* truncated sha256 hash of new master key */
-	u64  cur_mkvp;	    /* truncated sha256 hash of current master key */
-	u64  old_mkvp;	    /* truncated sha256 hash of old master key */
-	char serial[9];     /* serial number string (8 ascii numbers + 0x00) */
+	int  hwtype;		/* one of the defined AP_DEVICE_TYPE_* */
+	char new_aes_mk_state;	/* '1' empty, '2' partially full, '3' full */
+	char cur_aes_mk_state;	/* '1' invalid, '2' valid */
+	char old_aes_mk_state;	/* '1' invalid, '2' valid */
+	char new_apka_mk_state; /* '1' empty, '2' partially full, '3' full */
+	char cur_apka_mk_state; /* '1' invalid, '2' valid */
+	char old_apka_mk_state; /* '1' invalid, '2' valid */
+	u64  new_aes_mkvp;	/* truncated sha256 of new aes master key */
+	u64  cur_aes_mkvp;	/* truncated sha256 of current aes master key */
+	u64  old_aes_mkvp;	/* truncated sha256 of old aes master key */
+	u64  new_apka_mkvp;	/* truncated sha256 of new apka master key */
+	u64  cur_apka_mkvp;	/* truncated sha256 of current apka mk */
+	u64  old_apka_mkvp;	/* truncated sha256 of old apka mk */
+	char serial[9];		/* serial number (8 ascii numbers + 0x00) */
 };
 
 /*
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 7cbb384..226a561 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -94,8 +94,7 @@
 	if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
 		zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
 		zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
-		memcpy(zc->speed_rating, CEX2A_SPEED_IDX,
-		       sizeof(CEX2A_SPEED_IDX));
+		zc->speed_rating = CEX2A_SPEED_IDX;
 		zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
 		zc->type_string = "CEX2A";
 		zc->user_space_type = ZCRYPT_CEX2A;
@@ -108,8 +107,7 @@
 			zc->max_mod_size = CEX3A_MAX_MOD_SIZE;
 			zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
 		}
-		memcpy(zc->speed_rating, CEX3A_SPEED_IDX,
-		       sizeof(CEX3A_SPEED_IDX));
+		zc->speed_rating = CEX3A_SPEED_IDX;
 		zc->type_string = "CEX3A";
 		zc->user_space_type = ZCRYPT_CEX3A;
 	} else {
@@ -204,8 +202,6 @@
 static struct ap_driver zcrypt_cex2a_queue_driver = {
 	.probe = zcrypt_cex2a_queue_probe,
 	.remove = zcrypt_cex2a_queue_remove,
-	.suspend = ap_queue_suspend,
-	.resume = ap_queue_resume,
 	.ids = zcrypt_cex2a_queue_ids,
 	.flags = AP_DRIVER_FLAG_DEFAULT,
 };
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index c78c0d1..7a8cbdb 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -25,6 +25,7 @@
 #include "zcrypt_msgtype6.h"
 #include "zcrypt_cex2c.h"
 #include "zcrypt_cca_key.h"
+#include "zcrypt_ccamisc.h"
 
 #define CEX2C_MIN_MOD_SIZE	 16	/*  128 bits	*/
 #define CEX2C_MAX_MOD_SIZE	256	/* 2048 bits	*/
@@ -58,6 +59,118 @@
 
 MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_queue_ids);
 
+/*
+ * CCA card additional device attributes
+ */
+static ssize_t cca_serialnr_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct cca_info ci;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
+
+	memset(&ci, 0, sizeof(ci));
+
+	if (ap_domain_index >= 0)
+		cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
+}
+
+static struct device_attribute dev_attr_cca_serialnr =
+	__ATTR(serialnr, 0444, cca_serialnr_show, NULL);
+
+static struct attribute *cca_card_attrs[] = {
+	&dev_attr_cca_serialnr.attr,
+	NULL,
+};
+
+static const struct attribute_group cca_card_attr_grp = {
+	.attrs = cca_card_attrs,
+};
+
+ /*
+  * CCA queue additional device attributes
+  */
+static ssize_t cca_mkvps_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	int n = 0;
+	struct cca_info ci;
+	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+	static const char * const cao_state[] = { "invalid", "valid" };
+	static const char * const new_state[] = { "empty", "partial", "full" };
+
+	memset(&ci, 0, sizeof(ci));
+
+	cca_get_info(AP_QID_CARD(zq->queue->qid),
+		     AP_QID_QUEUE(zq->queue->qid),
+		     &ci, zq->online);
+
+	if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
+		n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
+			      new_state[ci.new_aes_mk_state - '1'],
+			      ci.new_aes_mkvp);
+	else
+		n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
+
+	if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "AES CUR: %s 0x%016llx\n",
+			       cao_state[ci.cur_aes_mk_state - '1'],
+			       ci.cur_aes_mkvp);
+	else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
+
+	if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "AES OLD: %s 0x%016llx\n",
+			       cao_state[ci.old_aes_mk_state - '1'],
+			       ci.old_aes_mkvp);
+	else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+
+	if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "APKA NEW: %s 0x%016llx\n",
+			       new_state[ci.new_apka_mk_state - '1'],
+			       ci.new_apka_mkvp);
+	else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "APKA NEW: - -\n");
+
+	if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "APKA CUR: %s 0x%016llx\n",
+			       cao_state[ci.cur_apka_mk_state - '1'],
+			       ci.cur_apka_mkvp);
+	else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "APKA CUR: - -\n");
+
+	if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "APKA OLD: %s 0x%016llx\n",
+			       cao_state[ci.old_apka_mk_state - '1'],
+			       ci.old_apka_mkvp);
+	else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n");
+
+	return n;
+}
+
+static struct device_attribute dev_attr_cca_mkvps =
+	__ATTR(mkvps, 0444, cca_mkvps_show, NULL);
+
+static struct attribute *cca_queue_attrs[] = {
+	&dev_attr_cca_mkvps.attr,
+	NULL,
+};
+
+static const struct attribute_group cca_queue_attr_grp = {
+	.attrs = cca_queue_attrs,
+};
+
 /**
  * Large random number detection function. Its sends a message to a CEX2C/CEX3C
  * card to find out if large random numbers are supported.
@@ -87,24 +200,23 @@
 	int rc, i;
 
 	ap_init_message(&ap_msg);
-	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
-	if (!ap_msg.message)
+	ap_msg.msg = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!ap_msg.msg)
 		return -ENOMEM;
 
 	rng_type6CPRB_msgX(&ap_msg, 4, &domain);
 
-	msg = ap_msg.message;
+	msg = ap_msg.msg;
 	msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
 
-	rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.message,
-		     ap_msg.length);
+	rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.msg, ap_msg.len);
 	if (rc)
 		goto out_free;
 
 	/* Wait for the test message to complete. */
 	for (i = 0; i < 2 * HZ; i++) {
 		msleep(1000 / HZ);
-		rc = ap_recv(aq->qid, &psmid, ap_msg.message, 4096);
+		rc = ap_recv(aq->qid, &psmid, ap_msg.msg, 4096);
 		if (rc == 0 && psmid == 0x0102030405060708ULL)
 			break;
 	}
@@ -115,13 +227,13 @@
 		goto out_free;
 	}
 
-	reply = ap_msg.message;
+	reply = ap_msg.msg;
 	if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
 		rc = 1;
 	else
 		rc = 0;
 out_free:
-	free_page((unsigned long) ap_msg.message);
+	free_page((unsigned long) ap_msg.msg);
 	return rc;
 }
 
@@ -154,8 +266,7 @@
 	case AP_DEVICE_TYPE_CEX2C:
 		zc->user_space_type = ZCRYPT_CEX2C;
 		zc->type_string = "CEX2C";
-		memcpy(zc->speed_rating, CEX2C_SPEED_IDX,
-		       sizeof(CEX2C_SPEED_IDX));
+		zc->speed_rating = CEX2C_SPEED_IDX;
 		zc->min_mod_size = CEX2C_MIN_MOD_SIZE;
 		zc->max_mod_size = CEX2C_MAX_MOD_SIZE;
 		zc->max_exp_bit_length = CEX2C_MAX_MOD_SIZE;
@@ -163,8 +274,7 @@
 	case AP_DEVICE_TYPE_CEX3C:
 		zc->user_space_type = ZCRYPT_CEX3C;
 		zc->type_string = "CEX3C";
-		memcpy(zc->speed_rating, CEX3C_SPEED_IDX,
-		       sizeof(CEX3C_SPEED_IDX));
+		zc->speed_rating = CEX3C_SPEED_IDX;
 		zc->min_mod_size = CEX3C_MIN_MOD_SIZE;
 		zc->max_mod_size = CEX3C_MAX_MOD_SIZE;
 		zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
@@ -179,6 +289,17 @@
 	if (rc) {
 		ac->private = NULL;
 		zcrypt_card_free(zc);
+		return rc;
+	}
+
+	if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
+		rc = sysfs_create_group(&ap_dev->device.kobj,
+					&cca_card_attr_grp);
+		if (rc) {
+			zcrypt_card_unregister(zc);
+			ac->private = NULL;
+			zcrypt_card_free(zc);
+		}
 	}
 
 	return rc;
@@ -190,8 +311,11 @@
  */
 static void zcrypt_cex2c_card_remove(struct ap_device *ap_dev)
 {
+	struct ap_card *ac = to_ap_card(&ap_dev->device);
 	struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
 
+	if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
+		sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
 	if (zc)
 		zcrypt_card_unregister(zc);
 }
@@ -240,7 +364,19 @@
 	if (rc) {
 		aq->private = NULL;
 		zcrypt_queue_free(zq);
+		return rc;
 	}
+
+	if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
+		rc = sysfs_create_group(&ap_dev->device.kobj,
+					&cca_queue_attr_grp);
+		if (rc) {
+			zcrypt_queue_unregister(zq);
+			aq->private = NULL;
+			zcrypt_queue_free(zq);
+		}
+	}
+
 	return rc;
 }
 
@@ -253,6 +389,8 @@
 	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
 	struct zcrypt_queue *zq = aq->private;
 
+	if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
+		sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
 	if (zq)
 		zcrypt_queue_unregister(zq);
 }
@@ -260,8 +398,6 @@
 static struct ap_driver zcrypt_cex2c_queue_driver = {
 	.probe = zcrypt_cex2c_queue_probe,
 	.remove = zcrypt_cex2c_queue_remove,
-	.suspend = ap_queue_suspend,
-	.resume = ap_queue_resume,
 	.ids = zcrypt_cex2c_queue_ids,
 	.flags = AP_DRIVER_FLAG_DEFAULT,
 };
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 6fabc90..f5195bc 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -19,6 +19,7 @@
 #include "zcrypt_error.h"
 #include "zcrypt_cex4.h"
 #include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
 
 #define CEX4A_MIN_MOD_SIZE	  1	/*    8 bits	*/
 #define CEX4A_MAX_MOD_SIZE_2K	256	/* 2048 bits	*/
@@ -71,11 +72,11 @@
 MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
 
 /*
- * CCA card addditional device attributes
+ * CCA card additional device attributes
  */
-static ssize_t serialnr_show(struct device *dev,
-			     struct device_attribute *attr,
-			     char *buf)
+static ssize_t cca_serialnr_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
 {
 	struct cca_info ci;
 	struct ap_card *ac = to_ap_card(dev);
@@ -86,25 +87,27 @@
 	if (ap_domain_index >= 0)
 		cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
 
-	return snprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
 }
-static DEVICE_ATTR_RO(serialnr);
+
+static struct device_attribute dev_attr_cca_serialnr =
+	__ATTR(serialnr, 0444, cca_serialnr_show, NULL);
 
 static struct attribute *cca_card_attrs[] = {
-	&dev_attr_serialnr.attr,
+	&dev_attr_cca_serialnr.attr,
 	NULL,
 };
 
-static const struct attribute_group cca_card_attr_group = {
+static const struct attribute_group cca_card_attr_grp = {
 	.attrs = cca_card_attrs,
 };
 
-/*
- * CCA queue addditional device attributes
- */
-static ssize_t mkvps_show(struct device *dev,
-			  struct device_attribute *attr,
-			  char *buf)
+ /*
+  * CCA queue additional device attributes
+  */
+static ssize_t cca_mkvps_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
 {
 	int n = 0;
 	struct cca_info ci;
@@ -118,37 +121,282 @@
 		     AP_QID_QUEUE(zq->queue->qid),
 		     &ci, zq->online);
 
-	if (ci.new_mk_state >= '1' && ci.new_mk_state <= '3')
-		n = snprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
-			     new_state[ci.new_mk_state - '1'], ci.new_mkvp);
+	if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
+		n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
+			      new_state[ci.new_aes_mk_state - '1'],
+			      ci.new_aes_mkvp);
 	else
-		n = snprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
+		n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
 
-	if (ci.cur_mk_state >= '1' && ci.cur_mk_state <= '2')
-		n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: %s 0x%016llx\n",
-			      cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp);
+	if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "AES CUR: %s 0x%016llx\n",
+			       cao_state[ci.cur_aes_mk_state - '1'],
+			       ci.cur_aes_mkvp);
 	else
-		n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
+		n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
 
-	if (ci.old_mk_state >= '1' && ci.old_mk_state <= '2')
-		n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: %s 0x%016llx\n",
-			      cao_state[ci.old_mk_state - '1'], ci.old_mkvp);
+	if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "AES OLD: %s 0x%016llx\n",
+			       cao_state[ci.old_aes_mk_state - '1'],
+			       ci.old_aes_mkvp);
 	else
-		n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+		n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+
+	if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "APKA NEW: %s 0x%016llx\n",
+			       new_state[ci.new_apka_mk_state - '1'],
+			       ci.new_apka_mkvp);
+	else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "APKA NEW: - -\n");
+
+	if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "APKA CUR: %s 0x%016llx\n",
+			       cao_state[ci.cur_apka_mk_state - '1'],
+			       ci.cur_apka_mkvp);
+	else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "APKA CUR: - -\n");
+
+	if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "APKA OLD: %s 0x%016llx\n",
+			       cao_state[ci.old_apka_mk_state - '1'],
+			       ci.old_apka_mkvp);
+	else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n");
 
 	return n;
 }
-static DEVICE_ATTR_RO(mkvps);
+
+static struct device_attribute dev_attr_cca_mkvps =
+	__ATTR(mkvps, 0444, cca_mkvps_show, NULL);
 
 static struct attribute *cca_queue_attrs[] = {
-	&dev_attr_mkvps.attr,
+	&dev_attr_cca_mkvps.attr,
 	NULL,
 };
 
-static const struct attribute_group cca_queue_attr_group = {
+static const struct attribute_group cca_queue_attr_grp = {
 	.attrs = cca_queue_attrs,
 };
 
+/*
+ * EP11 card additional device attributes
+ */
+static ssize_t ep11_api_ordinalnr_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct ep11_card_info ci;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
+
+	memset(&ci, 0, sizeof(ci));
+
+	ep11_get_card_info(ac->id, &ci, zc->online);
+
+	if (ci.API_ord_nr > 0)
+		return scnprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr);
+	else
+		return scnprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_api_ordinalnr =
+	__ATTR(API_ordinalnr, 0444, ep11_api_ordinalnr_show, NULL);
+
+static ssize_t ep11_fw_version_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct ep11_card_info ci;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
+
+	memset(&ci, 0, sizeof(ci));
+
+	ep11_get_card_info(ac->id, &ci, zc->online);
+
+	if (ci.FW_version > 0)
+		return scnprintf(buf, PAGE_SIZE, "%d.%d\n",
+				 (int)(ci.FW_version >> 8),
+				 (int)(ci.FW_version & 0xFF));
+	else
+		return scnprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_fw_version =
+	__ATTR(FW_version, 0444, ep11_fw_version_show, NULL);
+
+static ssize_t ep11_serialnr_show(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	struct ep11_card_info ci;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
+
+	memset(&ci, 0, sizeof(ci));
+
+	ep11_get_card_info(ac->id, &ci, zc->online);
+
+	if (ci.serial[0])
+		return scnprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial);
+	else
+		return scnprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_serialnr =
+	__ATTR(serialnr, 0444, ep11_serialnr_show, NULL);
+
+static const struct {
+	int	    mode_bit;
+	const char *mode_txt;
+} ep11_op_modes[] = {
+	{ 0, "FIPS2009" },
+	{ 1, "BSI2009" },
+	{ 2, "FIPS2011" },
+	{ 3, "BSI2011" },
+	{ 6, "BSICC2017" },
+	{ 0, NULL }
+};
+
+static ssize_t ep11_card_op_modes_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	int i, n = 0;
+	struct ep11_card_info ci;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
+
+	memset(&ci, 0, sizeof(ci));
+
+	ep11_get_card_info(ac->id, &ci, zc->online);
+
+	for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+		if (ci.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
+			if (n > 0)
+				buf[n++] = ' ';
+			n += scnprintf(buf + n, PAGE_SIZE - n,
+				       "%s", ep11_op_modes[i].mode_txt);
+		}
+	}
+	n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+
+	return n;
+}
+
+static struct device_attribute dev_attr_ep11_card_op_modes =
+	__ATTR(op_modes, 0444, ep11_card_op_modes_show, NULL);
+
+static struct attribute *ep11_card_attrs[] = {
+	&dev_attr_ep11_api_ordinalnr.attr,
+	&dev_attr_ep11_fw_version.attr,
+	&dev_attr_ep11_serialnr.attr,
+	&dev_attr_ep11_card_op_modes.attr,
+	NULL,
+};
+
+static const struct attribute_group ep11_card_attr_grp = {
+	.attrs = ep11_card_attrs,
+};
+
+/*
+ * EP11 queue additional device attributes
+ */
+
+static ssize_t ep11_mkvps_show(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	int n = 0;
+	struct ep11_domain_info di;
+	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+	static const char * const cwk_state[] = { "invalid", "valid" };
+	static const char * const nwk_state[] = { "empty", "uncommitted",
+						  "committed" };
+
+	memset(&di, 0, sizeof(di));
+
+	if (zq->online)
+		ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+				     AP_QID_QUEUE(zq->queue->qid),
+				     &di);
+
+	if (di.cur_wk_state == '0') {
+		n = scnprintf(buf, PAGE_SIZE, "WK CUR: %s -\n",
+			      cwk_state[di.cur_wk_state - '0']);
+	} else if (di.cur_wk_state == '1') {
+		n = scnprintf(buf, PAGE_SIZE, "WK CUR: %s 0x",
+			      cwk_state[di.cur_wk_state - '0']);
+		bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp));
+		n += 2 * sizeof(di.cur_wkvp);
+		n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+	} else
+		n = scnprintf(buf, PAGE_SIZE, "WK CUR: - -\n");
+
+	if (di.new_wk_state == '0') {
+		n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n",
+			       nwk_state[di.new_wk_state - '0']);
+	} else if (di.new_wk_state >= '1' && di.new_wk_state <= '2') {
+		n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x",
+			       nwk_state[di.new_wk_state - '0']);
+		bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp));
+		n += 2 * sizeof(di.new_wkvp);
+		n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+	} else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n");
+
+	return n;
+}
+
+static struct device_attribute dev_attr_ep11_mkvps =
+	__ATTR(mkvps, 0444, ep11_mkvps_show, NULL);
+
+static ssize_t ep11_queue_op_modes_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	int i, n = 0;
+	struct ep11_domain_info di;
+	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+	memset(&di, 0, sizeof(di));
+
+	if (zq->online)
+		ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+				     AP_QID_QUEUE(zq->queue->qid),
+				     &di);
+
+	for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+		if (di.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
+			if (n > 0)
+				buf[n++] = ' ';
+			n += scnprintf(buf + n, PAGE_SIZE - n,
+				       "%s", ep11_op_modes[i].mode_txt);
+		}
+	}
+	n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+
+	return n;
+}
+
+static struct device_attribute dev_attr_ep11_queue_op_modes =
+	__ATTR(op_modes, 0444, ep11_queue_op_modes_show, NULL);
+
+static struct attribute *ep11_queue_attrs[] = {
+	&dev_attr_ep11_mkvps.attr,
+	&dev_attr_ep11_queue_op_modes.attr,
+	NULL,
+};
+
+static const struct attribute_group ep11_queue_attr_grp = {
+	.attrs = ep11_queue_attrs,
+};
+
 /**
  * Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always
  * accepts the AP device since the bus_match already checked
@@ -161,31 +409,31 @@
 	 * Normalized speed ratings per crypto adapter
 	 * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
 	 */
-	static const int CEX4A_SPEED_IDX[] = {
+	static const int CEX4A_SPEED_IDX[NUM_OPS] = {
 		 14,  19, 249, 42, 228, 1458, 0, 0};
-	static const int CEX5A_SPEED_IDX[] = {
+	static const int CEX5A_SPEED_IDX[NUM_OPS] = {
 		  8,   9,  20, 18,  66,	 458, 0, 0};
-	static const int CEX6A_SPEED_IDX[] = {
+	static const int CEX6A_SPEED_IDX[NUM_OPS] = {
 		  6,   9,  20, 17,  65,	 438, 0, 0};
-	static const int CEX7A_SPEED_IDX[] = {
+	static const int CEX7A_SPEED_IDX[NUM_OPS] = {
 		  6,   8,  17, 15,  54,	 362, 0, 0};
 
-	static const int CEX4C_SPEED_IDX[] = {
+	static const int CEX4C_SPEED_IDX[NUM_OPS] = {
 		 59,  69, 308, 83, 278, 2204, 209, 40};
 	static const int CEX5C_SPEED_IDX[] = {
 		 24,  31,  50, 37,  90,	 479,  27, 10};
-	static const int CEX6C_SPEED_IDX[] = {
+	static const int CEX6C_SPEED_IDX[NUM_OPS] = {
 		 16,  20,  32, 27,  77,	 455,  24,  9};
-	static const int CEX7C_SPEED_IDX[] = {
+	static const int CEX7C_SPEED_IDX[NUM_OPS] = {
 		 14,  16,  26, 23,  64,	 376,  23,  8};
 
-	static const int CEX4P_SPEED_IDX[] = {
+	static const int CEX4P_SPEED_IDX[NUM_OPS] = {
 		  0,   0,   0,	 0,   0,   0,	0,  50};
-	static const int CEX5P_SPEED_IDX[] = {
+	static const int CEX5P_SPEED_IDX[NUM_OPS] = {
 		  0,   0,   0,	 0,   0,   0,	0,  10};
-	static const int CEX6P_SPEED_IDX[] = {
+	static const int CEX6P_SPEED_IDX[NUM_OPS] = {
 		  0,   0,   0,	 0,   0,   0,	0,   9};
-	static const int CEX7P_SPEED_IDX[] = {
+	static const int CEX7P_SPEED_IDX[NUM_OPS] = {
 		  0,   0,   0,	 0,   0,   0,	0,   8};
 
 	struct ap_card *ac = to_ap_card(&ap_dev->device);
@@ -201,26 +449,22 @@
 		if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
 			zc->type_string = "CEX4A";
 			zc->user_space_type = ZCRYPT_CEX4;
-			memcpy(zc->speed_rating, CEX4A_SPEED_IDX,
-			       sizeof(CEX4A_SPEED_IDX));
+			zc->speed_rating = CEX4A_SPEED_IDX;
 		} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
 			zc->type_string = "CEX5A";
 			zc->user_space_type = ZCRYPT_CEX5;
-			memcpy(zc->speed_rating, CEX5A_SPEED_IDX,
-			       sizeof(CEX5A_SPEED_IDX));
+			zc->speed_rating = CEX5A_SPEED_IDX;
 		} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
 			zc->type_string = "CEX6A";
 			zc->user_space_type = ZCRYPT_CEX6;
-			memcpy(zc->speed_rating, CEX6A_SPEED_IDX,
-			       sizeof(CEX6A_SPEED_IDX));
+			zc->speed_rating = CEX6A_SPEED_IDX;
 		} else {
 			zc->type_string = "CEX7A";
 			/* wrong user space type, just for compatibility
 			 * with the ZCRYPT_STATUS_MASK ioctl.
 			 */
 			zc->user_space_type = ZCRYPT_CEX6;
-			memcpy(zc->speed_rating, CEX7A_SPEED_IDX,
-			       sizeof(CEX7A_SPEED_IDX));
+			zc->speed_rating = CEX7A_SPEED_IDX;
 		}
 		zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
 		if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
@@ -240,32 +484,28 @@
 			 * just keep it for cca compatibility
 			 */
 			zc->user_space_type = ZCRYPT_CEX3C;
-			memcpy(zc->speed_rating, CEX4C_SPEED_IDX,
-			       sizeof(CEX4C_SPEED_IDX));
+			zc->speed_rating = CEX4C_SPEED_IDX;
 		} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
 			zc->type_string = "CEX5C";
 			/* wrong user space type, must be CEX5
 			 * just keep it for cca compatibility
 			 */
 			zc->user_space_type = ZCRYPT_CEX3C;
-			memcpy(zc->speed_rating, CEX5C_SPEED_IDX,
-			       sizeof(CEX5C_SPEED_IDX));
+			zc->speed_rating = CEX5C_SPEED_IDX;
 		} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
 			zc->type_string = "CEX6C";
 			/* wrong user space type, must be CEX6
 			 * just keep it for cca compatibility
 			 */
 			zc->user_space_type = ZCRYPT_CEX3C;
-			memcpy(zc->speed_rating, CEX6C_SPEED_IDX,
-			       sizeof(CEX6C_SPEED_IDX));
+			zc->speed_rating = CEX6C_SPEED_IDX;
 		} else {
 			zc->type_string = "CEX7C";
 			/* wrong user space type, must be CEX7
 			 * just keep it for cca compatibility
 			 */
 			zc->user_space_type = ZCRYPT_CEX3C;
-			memcpy(zc->speed_rating, CEX7C_SPEED_IDX,
-			       sizeof(CEX7C_SPEED_IDX));
+			zc->speed_rating = CEX7C_SPEED_IDX;
 		}
 		zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
 		zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
@@ -274,26 +514,22 @@
 		if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
 			zc->type_string = "CEX4P";
 			zc->user_space_type = ZCRYPT_CEX4;
-			memcpy(zc->speed_rating, CEX4P_SPEED_IDX,
-			       sizeof(CEX4P_SPEED_IDX));
+			zc->speed_rating = CEX4P_SPEED_IDX;
 		} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
 			zc->type_string = "CEX5P";
 			zc->user_space_type = ZCRYPT_CEX5;
-			memcpy(zc->speed_rating, CEX5P_SPEED_IDX,
-			       sizeof(CEX5P_SPEED_IDX));
+			zc->speed_rating = CEX5P_SPEED_IDX;
 		} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
 			zc->type_string = "CEX6P";
 			zc->user_space_type = ZCRYPT_CEX6;
-			memcpy(zc->speed_rating, CEX6P_SPEED_IDX,
-			       sizeof(CEX6P_SPEED_IDX));
+			zc->speed_rating = CEX6P_SPEED_IDX;
 		} else {
 			zc->type_string = "CEX7P";
 			/* wrong user space type, just for compatibility
 			 * with the ZCRYPT_STATUS_MASK ioctl.
 			 */
 			zc->user_space_type = ZCRYPT_CEX6;
-			memcpy(zc->speed_rating, CEX7P_SPEED_IDX,
-			       sizeof(CEX7P_SPEED_IDX));
+			zc->speed_rating = CEX7P_SPEED_IDX;
 		}
 		zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
 		zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
@@ -308,17 +544,27 @@
 	if (rc) {
 		ac->private = NULL;
 		zcrypt_card_free(zc);
-		goto out;
+		return rc;
 	}
 
 	if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
 		rc = sysfs_create_group(&ap_dev->device.kobj,
-					&cca_card_attr_group);
-		if (rc)
+					&cca_card_attr_grp);
+		if (rc) {
 			zcrypt_card_unregister(zc);
+			ac->private = NULL;
+			zcrypt_card_free(zc);
+		}
+	} else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
+		rc = sysfs_create_group(&ap_dev->device.kobj,
+					&ep11_card_attr_grp);
+		if (rc) {
+			zcrypt_card_unregister(zc);
+			ac->private = NULL;
+			zcrypt_card_free(zc);
+		}
 	}
 
-out:
 	return rc;
 }
 
@@ -332,7 +578,9 @@
 	struct zcrypt_card *zc = ac->private;
 
 	if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
-		sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_group);
+		sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
+	else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
+		sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp);
 	if (zc)
 		zcrypt_card_unregister(zc);
 }
@@ -389,17 +637,27 @@
 	if (rc) {
 		aq->private = NULL;
 		zcrypt_queue_free(zq);
-		goto out;
+		return rc;
 	}
 
 	if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
 		rc = sysfs_create_group(&ap_dev->device.kobj,
-					&cca_queue_attr_group);
-		if (rc)
+					&cca_queue_attr_grp);
+		if (rc) {
 			zcrypt_queue_unregister(zq);
+			aq->private = NULL;
+			zcrypt_queue_free(zq);
+		}
+	} else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
+		rc = sysfs_create_group(&ap_dev->device.kobj,
+					&ep11_queue_attr_grp);
+		if (rc) {
+			zcrypt_queue_unregister(zq);
+			aq->private = NULL;
+			zcrypt_queue_free(zq);
+		}
 	}
 
-out:
 	return rc;
 }
 
@@ -413,7 +671,9 @@
 	struct zcrypt_queue *zq = aq->private;
 
 	if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
-		sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_group);
+		sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
+	else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
+		sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp);
 	if (zq)
 		zcrypt_queue_unregister(zq);
 }
@@ -421,8 +681,6 @@
 static struct ap_driver zcrypt_cex4_queue_driver = {
 	.probe = zcrypt_cex4_queue_probe,
 	.remove = zcrypt_cex4_queue_remove,
-	.suspend = ap_queue_suspend,
-	.resume = ap_queue_resume,
 	.ids = zcrypt_cex4_queue_ids,
 	.flags = AP_DRIVER_FLAG_DEFAULT,
 };
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
index 241dbb5..3225489 100644
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -21,6 +21,14 @@
 
 #define ZCRYPT_DBF(...)					\
 	debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
+#define ZCRYPT_DBF_ERR(...)					\
+	debug_sprintf_event(zcrypt_dbf_info, DBF_ERR, ##__VA_ARGS__)
+#define ZCRYPT_DBF_WARN(...)					\
+	debug_sprintf_event(zcrypt_dbf_info, DBF_WARN, ##__VA_ARGS__)
+#define ZCRYPT_DBF_INFO(...)					\
+	debug_sprintf_event(zcrypt_dbf_info, DBF_INFO, ##__VA_ARGS__)
+#define ZCRYPT_DBF_DBG(...)					\
+	debug_sprintf_event(zcrypt_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
 
 extern debug_info_t *zcrypt_dbf_info;
 
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
new file mode 100644
index 0000000..9ce5a71
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -0,0 +1,1470 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  Copyright IBM Corp. 2019
+ *  Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ *  Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+#include <crypto/aes.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_ep11misc.h"
+#include "zcrypt_ccamisc.h"
+
+#define DEBUG_DBG(...)	ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
+#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
+#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
+#define DEBUG_ERR(...)	ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
+
+/* default iv used here */
+static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+			       0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff };
+
+/* ep11 card info cache */
+struct card_list_entry {
+	struct list_head list;
+	u16 cardnr;
+	struct ep11_card_info info;
+};
+static LIST_HEAD(card_list);
+static DEFINE_SPINLOCK(card_list_lock);
+
+static int card_cache_fetch(u16 cardnr, struct ep11_card_info *ci)
+{
+	int rc = -ENOENT;
+	struct card_list_entry *ptr;
+
+	spin_lock_bh(&card_list_lock);
+	list_for_each_entry(ptr, &card_list, list) {
+		if (ptr->cardnr == cardnr) {
+			memcpy(ci, &ptr->info, sizeof(*ci));
+			rc = 0;
+			break;
+		}
+	}
+	spin_unlock_bh(&card_list_lock);
+
+	return rc;
+}
+
+static void card_cache_update(u16 cardnr, const struct ep11_card_info *ci)
+{
+	int found = 0;
+	struct card_list_entry *ptr;
+
+	spin_lock_bh(&card_list_lock);
+	list_for_each_entry(ptr, &card_list, list) {
+		if (ptr->cardnr == cardnr) {
+			memcpy(&ptr->info, ci, sizeof(*ci));
+			found = 1;
+			break;
+		}
+	}
+	if (!found) {
+		ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
+		if (!ptr) {
+			spin_unlock_bh(&card_list_lock);
+			return;
+		}
+		ptr->cardnr = cardnr;
+		memcpy(&ptr->info, ci, sizeof(*ci));
+		list_add(&ptr->list, &card_list);
+	}
+	spin_unlock_bh(&card_list_lock);
+}
+
+static void card_cache_scrub(u16 cardnr)
+{
+	struct card_list_entry *ptr;
+
+	spin_lock_bh(&card_list_lock);
+	list_for_each_entry(ptr, &card_list, list) {
+		if (ptr->cardnr == cardnr) {
+			list_del(&ptr->list);
+			kfree(ptr);
+			break;
+		}
+	}
+	spin_unlock_bh(&card_list_lock);
+}
+
+static void __exit card_cache_free(void)
+{
+	struct card_list_entry *ptr, *pnext;
+
+	spin_lock_bh(&card_list_lock);
+	list_for_each_entry_safe(ptr, pnext, &card_list, list) {
+		list_del(&ptr->list);
+		kfree(ptr);
+	}
+	spin_unlock_bh(&card_list_lock);
+}
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with header.
+ */
+int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
+				const u8 *key, size_t keylen, int checkcpacfexp)
+{
+	struct ep11kblob_header *hdr = (struct ep11kblob_header *) key;
+	struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr));
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+	if (keylen < sizeof(*hdr) + sizeof(*kb)) {
+		DBF("%s key check failed, keylen %zu < %zu\n",
+		    __func__, keylen, sizeof(*hdr) + sizeof(*kb));
+		return -EINVAL;
+	}
+
+	if (hdr->type != TOKTYPE_NON_CCA) {
+		if (dbg)
+			DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+			    __func__, (int) hdr->type, TOKTYPE_NON_CCA);
+		return -EINVAL;
+	}
+	if (hdr->hver != 0x00) {
+		if (dbg)
+			DBF("%s key check failed, header version 0x%02x != 0x00\n",
+			    __func__, (int) hdr->hver);
+		return -EINVAL;
+	}
+	if (hdr->version != TOKVER_EP11_AES_WITH_HEADER) {
+		if (dbg)
+			DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+			    __func__, (int) hdr->version, TOKVER_EP11_AES_WITH_HEADER);
+		return -EINVAL;
+	}
+	if (hdr->len > keylen) {
+		if (dbg)
+			DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+			    __func__, (int) hdr->len, keylen);
+		return -EINVAL;
+	}
+	if (hdr->len < sizeof(*hdr) + sizeof(*kb)) {
+		if (dbg)
+			DBF("%s key check failed, header len %d < %zu\n",
+			    __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb));
+		return -EINVAL;
+	}
+
+	if (kb->version != EP11_STRUCT_MAGIC) {
+		if (dbg)
+			DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
+			    __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+		return -EINVAL;
+	}
+	if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+		if (dbg)
+			DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
+			    __func__);
+		return -EINVAL;
+	}
+
+#undef DBF
+
+	return 0;
+}
+EXPORT_SYMBOL(ep11_check_aes_key_with_hdr);
+
+/*
+ * Simple check if the key blob is a valid EP11 ECC key blob with header.
+ */
+int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
+				const u8 *key, size_t keylen, int checkcpacfexp)
+{
+	struct ep11kblob_header *hdr = (struct ep11kblob_header *) key;
+	struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr));
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+	if (keylen < sizeof(*hdr) + sizeof(*kb)) {
+		DBF("%s key check failed, keylen %zu < %zu\n",
+		    __func__, keylen, sizeof(*hdr) + sizeof(*kb));
+		return -EINVAL;
+	}
+
+	if (hdr->type != TOKTYPE_NON_CCA) {
+		if (dbg)
+			DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+			    __func__, (int) hdr->type, TOKTYPE_NON_CCA);
+		return -EINVAL;
+	}
+	if (hdr->hver != 0x00) {
+		if (dbg)
+			DBF("%s key check failed, header version 0x%02x != 0x00\n",
+			    __func__, (int) hdr->hver);
+		return -EINVAL;
+	}
+	if (hdr->version != TOKVER_EP11_ECC_WITH_HEADER) {
+		if (dbg)
+			DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+			    __func__, (int) hdr->version, TOKVER_EP11_ECC_WITH_HEADER);
+		return -EINVAL;
+	}
+	if (hdr->len > keylen) {
+		if (dbg)
+			DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+			    __func__, (int) hdr->len, keylen);
+		return -EINVAL;
+	}
+	if (hdr->len < sizeof(*hdr) + sizeof(*kb)) {
+		if (dbg)
+			DBF("%s key check failed, header len %d < %zu\n",
+			    __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb));
+		return -EINVAL;
+	}
+
+	if (kb->version != EP11_STRUCT_MAGIC) {
+		if (dbg)
+			DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
+			    __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+		return -EINVAL;
+	}
+	if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+		if (dbg)
+			DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
+			    __func__);
+		return -EINVAL;
+	}
+
+#undef DBF
+
+	return 0;
+}
+EXPORT_SYMBOL(ep11_check_ecc_key_with_hdr);
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with
+ * the header in the session field (old style EP11 AES key).
+ */
+int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
+		       const u8 *key, size_t keylen, int checkcpacfexp)
+{
+	struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+	if (keylen < sizeof(*kb)) {
+		DBF("%s key check failed, keylen %zu < %zu\n",
+		    __func__, keylen, sizeof(*kb));
+		return -EINVAL;
+	}
+
+	if (kb->head.type != TOKTYPE_NON_CCA) {
+		if (dbg)
+			DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+			    __func__, (int) kb->head.type, TOKTYPE_NON_CCA);
+		return -EINVAL;
+	}
+	if (kb->head.version != TOKVER_EP11_AES) {
+		if (dbg)
+			DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+			    __func__, (int) kb->head.version, TOKVER_EP11_AES);
+		return -EINVAL;
+	}
+	if (kb->head.len > keylen) {
+		if (dbg)
+			DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+			    __func__, (int) kb->head.len, keylen);
+		return -EINVAL;
+	}
+	if (kb->head.len < sizeof(*kb)) {
+		if (dbg)
+			DBF("%s key check failed, header len %d < %zu\n",
+			    __func__, (int) kb->head.len, sizeof(*kb));
+		return -EINVAL;
+	}
+
+	if (kb->version != EP11_STRUCT_MAGIC) {
+		if (dbg)
+			DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
+			    __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+		return -EINVAL;
+	}
+	if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+		if (dbg)
+			DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
+			    __func__);
+		return -EINVAL;
+	}
+
+#undef DBF
+
+	return 0;
+}
+EXPORT_SYMBOL(ep11_check_aes_key);
+
+/*
+ * Allocate and prepare ep11 cprb plus additional payload.
+ */
+static inline struct ep11_cprb *alloc_cprb(size_t payload_len)
+{
+	size_t len = sizeof(struct ep11_cprb) + payload_len;
+	struct ep11_cprb *cprb;
+
+	cprb = kzalloc(len, GFP_KERNEL);
+	if (!cprb)
+		return NULL;
+
+	cprb->cprb_len = sizeof(struct ep11_cprb);
+	cprb->cprb_ver_id = 0x04;
+	memcpy(cprb->func_id, "T4", 2);
+	cprb->ret_code = 0xFFFFFFFF;
+	cprb->payload_len = payload_len;
+
+	return cprb;
+}
+
+/*
+ * Some helper functions related to ASN1 encoding.
+ * Limited to length info <= 2 byte.
+ */
+
+#define ASN1TAGLEN(x) (2 + (x) + ((x) > 127 ? 1 : 0) + ((x) > 255 ? 1 : 0))
+
+static int asn1tag_write(u8 *ptr, u8 tag, const u8 *pvalue, u16 valuelen)
+{
+	ptr[0] = tag;
+	if (valuelen > 255) {
+		ptr[1] = 0x82;
+		*((u16 *)(ptr + 2)) = valuelen;
+		memcpy(ptr + 4, pvalue, valuelen);
+		return 4 + valuelen;
+	}
+	if (valuelen > 127) {
+		ptr[1] = 0x81;
+		ptr[2] = (u8) valuelen;
+		memcpy(ptr + 3, pvalue, valuelen);
+		return 3 + valuelen;
+	}
+	ptr[1] = (u8) valuelen;
+	memcpy(ptr + 2, pvalue, valuelen);
+	return 2 + valuelen;
+}
+
+/* EP11 payload > 127 bytes starts with this struct */
+struct pl_head {
+	u8  tag;
+	u8  lenfmt;
+	u16 len;
+	u8  func_tag;
+	u8  func_len;
+	u32 func;
+	u8  dom_tag;
+	u8  dom_len;
+	u32 dom;
+} __packed;
+
+/* prep ep11 payload head helper function */
+static inline void prep_head(struct pl_head *h,
+			     size_t pl_size, int api, int func)
+{
+	h->tag = 0x30;
+	h->lenfmt = 0x82;
+	h->len = pl_size - 4;
+	h->func_tag = 0x04;
+	h->func_len = sizeof(u32);
+	h->func = (api << 16) + func;
+	h->dom_tag = 0x04;
+	h->dom_len = sizeof(u32);
+}
+
+/* prep urb helper function */
+static inline void prep_urb(struct ep11_urb *u,
+			    struct ep11_target_dev *t, int nt,
+			    struct ep11_cprb *req, size_t req_len,
+			    struct ep11_cprb *rep, size_t rep_len)
+{
+	u->targets = (u8 __user *) t;
+	u->targets_num = nt;
+	u->req = (u8 __user *) req;
+	u->req_len = req_len;
+	u->resp = (u8 __user *) rep;
+	u->resp_len = rep_len;
+}
+
+/* Check ep11 reply payload, return 0 or suggested errno value. */
+static int check_reply_pl(const u8 *pl, const char *func)
+{
+	int len;
+	u32 ret;
+
+	/* start tag */
+	if (*pl++ != 0x30) {
+		DEBUG_ERR("%s reply start tag mismatch\n", func);
+		return -EIO;
+	}
+
+	/* payload length format */
+	if (*pl < 127) {
+		len = *pl;
+		pl++;
+	} else if (*pl == 0x81) {
+		pl++;
+		len = *pl;
+		pl++;
+	} else if (*pl == 0x82) {
+		pl++;
+		len = *((u16 *)pl);
+		pl += 2;
+	} else {
+		DEBUG_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n",
+			  func, *pl);
+		return -EIO;
+	}
+
+	/* len should cover at least 3 fields with 32 bit value each */
+	if (len < 3 * 6) {
+		DEBUG_ERR("%s reply length %d too small\n", func, len);
+		return -EIO;
+	}
+
+	/* function tag, length and value */
+	if (pl[0] != 0x04 || pl[1] != 0x04) {
+		DEBUG_ERR("%s function tag or length mismatch\n", func);
+		return -EIO;
+	}
+	pl += 6;
+
+	/* dom tag, length and value */
+	if (pl[0] != 0x04 || pl[1] != 0x04) {
+		DEBUG_ERR("%s dom tag or length mismatch\n", func);
+		return -EIO;
+	}
+	pl += 6;
+
+	/* return value tag, length and value */
+	if (pl[0] != 0x04 || pl[1] != 0x04) {
+		DEBUG_ERR("%s return value tag or length mismatch\n", func);
+		return -EIO;
+	}
+	pl += 2;
+	ret = *((u32 *)pl);
+	if (ret != 0) {
+		DEBUG_ERR("%s return value 0x%04x != 0\n", func, ret);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+
+/*
+ * Helper function which does an ep11 query with given query type.
+ */
+static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
+			   size_t buflen, u8 *buf)
+{
+	struct ep11_info_req_pl {
+		struct pl_head head;
+		u8  query_type_tag;
+		u8  query_type_len;
+		u32 query_type;
+		u8  query_subtype_tag;
+		u8  query_subtype_len;
+		u32 query_subtype;
+	} __packed * req_pl;
+	struct ep11_info_rep_pl {
+		struct pl_head head;
+		u8  rc_tag;
+		u8  rc_len;
+		u32 rc;
+		u8  data_tag;
+		u8  data_lenfmt;
+		u16 data_len;
+	} __packed * rep_pl;
+	struct ep11_cprb *req = NULL, *rep = NULL;
+	struct ep11_target_dev target;
+	struct ep11_urb *urb = NULL;
+	int api = 1, rc = -ENOMEM;
+
+	/* request cprb and payload */
+	req = alloc_cprb(sizeof(struct ep11_info_req_pl));
+	if (!req)
+		goto out;
+	req_pl = (struct ep11_info_req_pl *) (((u8 *) req) + sizeof(*req));
+	prep_head(&req_pl->head, sizeof(*req_pl), api, 38); /* get xcp info */
+	req_pl->query_type_tag = 0x04;
+	req_pl->query_type_len = sizeof(u32);
+	req_pl->query_type = query_type;
+	req_pl->query_subtype_tag = 0x04;
+	req_pl->query_subtype_len = sizeof(u32);
+
+	/* reply cprb and payload */
+	rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen);
+	if (!rep)
+		goto out;
+	rep_pl = (struct ep11_info_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+	/* urb and target */
+	urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+	if (!urb)
+		goto out;
+	target.ap_id = cardnr;
+	target.dom_id = domain;
+	prep_urb(urb, &target, 1,
+		 req, sizeof(*req) + sizeof(*req_pl),
+		 rep, sizeof(*rep) + sizeof(*rep_pl) + buflen);
+
+	rc = zcrypt_send_ep11_cprb(urb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+			__func__, (int) cardnr, (int) domain, rc);
+		goto out;
+	}
+
+	rc = check_reply_pl((u8 *)rep_pl, __func__);
+	if (rc)
+		goto out;
+	if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+		DEBUG_ERR("%s unknown reply data format\n", __func__);
+		rc = -EIO;
+		goto out;
+	}
+	if (rep_pl->data_len > buflen) {
+		DEBUG_ERR("%s mismatch between reply data len and buffer len\n",
+			  __func__);
+		rc = -ENOSPC;
+		goto out;
+	}
+
+	memcpy(buf, ((u8 *) rep_pl) + sizeof(*rep_pl), rep_pl->data_len);
+
+out:
+	kfree(req);
+	kfree(rep);
+	kfree(urb);
+	return rc;
+}
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify)
+{
+	int rc;
+	struct ep11_module_query_info {
+		u32 API_ord_nr;
+		u32 firmware_id;
+		u8  FW_major_vers;
+		u8  FW_minor_vers;
+		u8  CSP_major_vers;
+		u8  CSP_minor_vers;
+		u8  fwid[32];
+		u8  xcp_config_hash[32];
+		u8  CSP_config_hash[32];
+		u8  serial[16];
+		u8  module_date_time[16];
+		u64 op_mode;
+		u32 PKCS11_flags;
+		u32 ext_flags;
+		u32 domains;
+		u32 sym_state_bytes;
+		u32 digest_state_bytes;
+		u32 pin_blob_bytes;
+		u32 SPKI_bytes;
+		u32 priv_key_blob_bytes;
+		u32 sym_blob_bytes;
+		u32 max_payload_bytes;
+		u32 CP_profile_bytes;
+		u32 max_CP_index;
+	} __packed * pmqi = NULL;
+
+	rc = card_cache_fetch(card, info);
+	if (rc || verify) {
+		pmqi = kmalloc(sizeof(*pmqi), GFP_KERNEL);
+		if (!pmqi)
+			return -ENOMEM;
+		rc = ep11_query_info(card, AUTOSEL_DOM,
+				     0x01 /* module info query */,
+				     sizeof(*pmqi), (u8 *) pmqi);
+		if (rc) {
+			if (rc == -ENODEV)
+				card_cache_scrub(card);
+			goto out;
+		}
+		memset(info, 0, sizeof(*info));
+		info->API_ord_nr = pmqi->API_ord_nr;
+		info->FW_version =
+			(pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers;
+		memcpy(info->serial, pmqi->serial, sizeof(info->serial));
+		info->op_mode = pmqi->op_mode;
+		card_cache_update(card, info);
+	}
+
+out:
+	kfree(pmqi);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_get_card_info);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info)
+{
+	int rc;
+	struct ep11_domain_query_info {
+		u32 dom_index;
+		u8  cur_WK_VP[32];
+		u8  new_WK_VP[32];
+		u32 dom_flags;
+		u64 op_mode;
+	} __packed * p_dom_info;
+
+	p_dom_info = kmalloc(sizeof(*p_dom_info), GFP_KERNEL);
+	if (!p_dom_info)
+		return -ENOMEM;
+
+	rc = ep11_query_info(card, domain, 0x03 /* domain info query */,
+			     sizeof(*p_dom_info), (u8 *) p_dom_info);
+	if (rc)
+		goto out;
+
+	memset(info, 0, sizeof(*info));
+	info->cur_wk_state = '0';
+	info->new_wk_state = '0';
+	if (p_dom_info->dom_flags & 0x10 /* left imprint mode */) {
+		if (p_dom_info->dom_flags & 0x02 /* cur wk valid */) {
+			info->cur_wk_state = '1';
+			memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32);
+		}
+		if (p_dom_info->dom_flags & 0x04 /* new wk present */
+		    || p_dom_info->dom_flags & 0x08 /* new wk committed */) {
+			info->new_wk_state =
+				p_dom_info->dom_flags & 0x08 ? '2' : '1';
+			memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32);
+		}
+	}
+	info->op_mode = p_dom_info->op_mode;
+
+out:
+	kfree(p_dom_info);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_get_domain_info);
+
+/*
+ * Default EP11 AES key generate attributes, used when no keygenflags given:
+ * XCP_BLOB_ENCRYPT | XCP_BLOB_DECRYPT | XCP_BLOB_PROTKEY_EXTRACTABLE
+ */
+#define KEY_ATTR_DEFAULTS 0x00200c00
+
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+		   u8 *keybuf, size_t *keybufsize)
+{
+	struct keygen_req_pl {
+		struct pl_head head;
+		u8  var_tag;
+		u8  var_len;
+		u32 var;
+		u8  keybytes_tag;
+		u8  keybytes_len;
+		u32 keybytes;
+		u8  mech_tag;
+		u8  mech_len;
+		u32 mech;
+		u8  attr_tag;
+		u8  attr_len;
+		u32 attr_header;
+		u32 attr_bool_mask;
+		u32 attr_bool_bits;
+		u32 attr_val_len_type;
+		u32 attr_val_len_value;
+		u8  pin_tag;
+		u8  pin_len;
+	} __packed * req_pl;
+	struct keygen_rep_pl {
+		struct pl_head head;
+		u8  rc_tag;
+		u8  rc_len;
+		u32 rc;
+		u8  data_tag;
+		u8  data_lenfmt;
+		u16 data_len;
+		u8  data[512];
+	} __packed * rep_pl;
+	struct ep11_cprb *req = NULL, *rep = NULL;
+	struct ep11_target_dev target;
+	struct ep11_urb *urb = NULL;
+	struct ep11keyblob *kb;
+	int api, rc = -ENOMEM;
+
+	switch (keybitsize) {
+	case 128:
+	case 192:
+	case 256:
+		break;
+	default:
+		DEBUG_ERR(
+			"%s unknown/unsupported keybitsize %d\n",
+			__func__, keybitsize);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* request cprb and payload */
+	req = alloc_cprb(sizeof(struct keygen_req_pl));
+	if (!req)
+		goto out;
+	req_pl = (struct keygen_req_pl *) (((u8 *) req) + sizeof(*req));
+	api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
+	prep_head(&req_pl->head, sizeof(*req_pl), api, 21); /* GenerateKey */
+	req_pl->var_tag = 0x04;
+	req_pl->var_len = sizeof(u32);
+	req_pl->keybytes_tag = 0x04;
+	req_pl->keybytes_len = sizeof(u32);
+	req_pl->keybytes = keybitsize / 8;
+	req_pl->mech_tag = 0x04;
+	req_pl->mech_len = sizeof(u32);
+	req_pl->mech = 0x00001080; /* CKM_AES_KEY_GEN */
+	req_pl->attr_tag = 0x04;
+	req_pl->attr_len = 5 * sizeof(u32);
+	req_pl->attr_header = 0x10010000;
+	req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+	req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+	req_pl->attr_val_len_type = 0x00000161; /* CKA_VALUE_LEN */
+	req_pl->attr_val_len_value = keybitsize / 8;
+	req_pl->pin_tag = 0x04;
+
+	/* reply cprb and payload */
+	rep = alloc_cprb(sizeof(struct keygen_rep_pl));
+	if (!rep)
+		goto out;
+	rep_pl = (struct keygen_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+	/* urb and target */
+	urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+	if (!urb)
+		goto out;
+	target.ap_id = card;
+	target.dom_id = domain;
+	prep_urb(urb, &target, 1,
+		 req, sizeof(*req) + sizeof(*req_pl),
+		 rep, sizeof(*rep) + sizeof(*rep_pl));
+
+	rc = zcrypt_send_ep11_cprb(urb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+			__func__, (int) card, (int) domain, rc);
+		goto out;
+	}
+
+	rc = check_reply_pl((u8 *)rep_pl, __func__);
+	if (rc)
+		goto out;
+	if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+		DEBUG_ERR("%s unknown reply data format\n", __func__);
+		rc = -EIO;
+		goto out;
+	}
+	if (rep_pl->data_len > *keybufsize) {
+		DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
+			  __func__);
+		rc = -ENOSPC;
+		goto out;
+	}
+
+	/* copy key blob and set header values */
+	memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+	*keybufsize = rep_pl->data_len;
+	kb = (struct ep11keyblob *) keybuf;
+	kb->head.type = TOKTYPE_NON_CCA;
+	kb->head.len = rep_pl->data_len;
+	kb->head.version = TOKVER_EP11_AES;
+	kb->head.keybitlen = keybitsize;
+
+out:
+	kfree(req);
+	kfree(rep);
+	kfree(urb);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_genaeskey);
+
+static int ep11_cryptsingle(u16 card, u16 domain,
+			    u16 mode, u32 mech, const u8 *iv,
+			    const u8 *key, size_t keysize,
+			    const u8 *inbuf, size_t inbufsize,
+			    u8 *outbuf, size_t *outbufsize)
+{
+	struct crypt_req_pl {
+		struct pl_head head;
+		u8  var_tag;
+		u8  var_len;
+		u32 var;
+		u8  mech_tag;
+		u8  mech_len;
+		u32 mech;
+		/*
+		 * maybe followed by iv data
+		 * followed by key tag + key blob
+		 * followed by plaintext tag + plaintext
+		 */
+	} __packed * req_pl;
+	struct crypt_rep_pl {
+		struct pl_head head;
+		u8  rc_tag;
+		u8  rc_len;
+		u32 rc;
+		u8  data_tag;
+		u8  data_lenfmt;
+		/* data follows */
+	} __packed * rep_pl;
+	struct ep11_cprb *req = NULL, *rep = NULL;
+	struct ep11_target_dev target;
+	struct ep11_urb *urb = NULL;
+	size_t req_pl_size, rep_pl_size;
+	int n, api = 1, rc = -ENOMEM;
+	u8 *p;
+
+	/* the simple asn1 coding used has length limits */
+	if (keysize > 0xFFFF || inbufsize > 0xFFFF)
+		return -EINVAL;
+
+	/* request cprb and payload */
+	req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0)
+		+ ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize);
+	req = alloc_cprb(req_pl_size);
+	if (!req)
+		goto out;
+	req_pl = (struct crypt_req_pl *) (((u8 *) req) + sizeof(*req));
+	prep_head(&req_pl->head, req_pl_size, api, (mode ? 20 : 19));
+	req_pl->var_tag = 0x04;
+	req_pl->var_len = sizeof(u32);
+	/* mech is mech + mech params (iv here) */
+	req_pl->mech_tag = 0x04;
+	req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+	req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+	p = ((u8 *) req_pl) + sizeof(*req_pl);
+	if (iv) {
+		memcpy(p, iv, 16);
+		p += 16;
+	}
+	/* key and input data */
+	p += asn1tag_write(p, 0x04, key, keysize);
+	p += asn1tag_write(p, 0x04, inbuf, inbufsize);
+
+	/* reply cprb and payload, assume out data size <= in data size + 32 */
+	rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32);
+	rep = alloc_cprb(rep_pl_size);
+	if (!rep)
+		goto out;
+	rep_pl = (struct crypt_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+	/* urb and target */
+	urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+	if (!urb)
+		goto out;
+	target.ap_id = card;
+	target.dom_id = domain;
+	prep_urb(urb, &target, 1,
+		 req, sizeof(*req) + req_pl_size,
+		 rep, sizeof(*rep) + rep_pl_size);
+
+	rc = zcrypt_send_ep11_cprb(urb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+			__func__, (int) card, (int) domain, rc);
+		goto out;
+	}
+
+	rc = check_reply_pl((u8 *)rep_pl, __func__);
+	if (rc)
+		goto out;
+	if (rep_pl->data_tag != 0x04) {
+		DEBUG_ERR("%s unknown reply data format\n", __func__);
+		rc = -EIO;
+		goto out;
+	}
+	p = ((u8 *) rep_pl) + sizeof(*rep_pl);
+	if (rep_pl->data_lenfmt <= 127)
+		n = rep_pl->data_lenfmt;
+	else if (rep_pl->data_lenfmt == 0x81)
+		n = *p++;
+	else if (rep_pl->data_lenfmt == 0x82) {
+		n = *((u16 *) p);
+		p += 2;
+	} else {
+		DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n",
+			  __func__, rep_pl->data_lenfmt);
+		rc = -EIO;
+		goto out;
+	}
+	if (n > *outbufsize) {
+		DEBUG_ERR("%s mismatch reply data len %d / output buffer %zu\n",
+			  __func__, n, *outbufsize);
+		rc = -ENOSPC;
+		goto out;
+	}
+
+	memcpy(outbuf, p, n);
+	*outbufsize = n;
+
+out:
+	kfree(req);
+	kfree(rep);
+	kfree(urb);
+	return rc;
+}
+
+static int ep11_unwrapkey(u16 card, u16 domain,
+			  const u8 *kek, size_t keksize,
+			  const u8 *enckey, size_t enckeysize,
+			  u32 mech, const u8 *iv,
+			  u32 keybitsize, u32 keygenflags,
+			  u8 *keybuf, size_t *keybufsize)
+{
+	struct uw_req_pl {
+		struct pl_head head;
+		u8  attr_tag;
+		u8  attr_len;
+		u32 attr_header;
+		u32 attr_bool_mask;
+		u32 attr_bool_bits;
+		u32 attr_key_type;
+		u32 attr_key_type_value;
+		u32 attr_val_len;
+		u32 attr_val_len_value;
+		u8  mech_tag;
+		u8  mech_len;
+		u32 mech;
+		/*
+		 * maybe followed by iv data
+		 * followed by kek tag + kek blob
+		 * followed by empty mac tag
+		 * followed by empty pin tag
+		 * followed by encryted key tag + bytes
+		 */
+	} __packed * req_pl;
+	struct uw_rep_pl {
+		struct pl_head head;
+		u8  rc_tag;
+		u8  rc_len;
+		u32 rc;
+		u8  data_tag;
+		u8  data_lenfmt;
+		u16 data_len;
+		u8  data[512];
+	} __packed * rep_pl;
+	struct ep11_cprb *req = NULL, *rep = NULL;
+	struct ep11_target_dev target;
+	struct ep11_urb *urb = NULL;
+	struct ep11keyblob *kb;
+	size_t req_pl_size;
+	int api, rc = -ENOMEM;
+	u8 *p;
+
+	/* request cprb and payload */
+	req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0)
+		+ ASN1TAGLEN(keksize) + 4 + ASN1TAGLEN(enckeysize);
+	req = alloc_cprb(req_pl_size);
+	if (!req)
+		goto out;
+	req_pl = (struct uw_req_pl *) (((u8 *) req) + sizeof(*req));
+	api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
+	prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */
+	req_pl->attr_tag = 0x04;
+	req_pl->attr_len = 7 * sizeof(u32);
+	req_pl->attr_header = 0x10020000;
+	req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+	req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+	req_pl->attr_key_type = 0x00000100; /* CKA_KEY_TYPE */
+	req_pl->attr_key_type_value = 0x0000001f; /* CKK_AES */
+	req_pl->attr_val_len = 0x00000161; /* CKA_VALUE_LEN */
+	req_pl->attr_val_len_value = keybitsize / 8;
+	/* mech is mech + mech params (iv here) */
+	req_pl->mech_tag = 0x04;
+	req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+	req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+	p = ((u8 *) req_pl) + sizeof(*req_pl);
+	if (iv) {
+		memcpy(p, iv, 16);
+		p += 16;
+	}
+	/* kek */
+	p += asn1tag_write(p, 0x04, kek, keksize);
+	/* empty mac key tag */
+	*p++ = 0x04;
+	*p++ = 0;
+	/* empty pin tag */
+	*p++ = 0x04;
+	*p++ = 0;
+	/* encrypted key value tag and bytes */
+	p += asn1tag_write(p, 0x04, enckey, enckeysize);
+
+	/* reply cprb and payload */
+	rep = alloc_cprb(sizeof(struct uw_rep_pl));
+	if (!rep)
+		goto out;
+	rep_pl = (struct uw_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+	/* urb and target */
+	urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+	if (!urb)
+		goto out;
+	target.ap_id = card;
+	target.dom_id = domain;
+	prep_urb(urb, &target, 1,
+		 req, sizeof(*req) + req_pl_size,
+		 rep, sizeof(*rep) + sizeof(*rep_pl));
+
+	rc = zcrypt_send_ep11_cprb(urb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+			__func__, (int) card, (int) domain, rc);
+		goto out;
+	}
+
+	rc = check_reply_pl((u8 *)rep_pl, __func__);
+	if (rc)
+		goto out;
+	if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+		DEBUG_ERR("%s unknown reply data format\n", __func__);
+		rc = -EIO;
+		goto out;
+	}
+	if (rep_pl->data_len > *keybufsize) {
+		DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
+			  __func__);
+		rc = -ENOSPC;
+		goto out;
+	}
+
+	/* copy key blob and set header values */
+	memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+	*keybufsize = rep_pl->data_len;
+	kb = (struct ep11keyblob *) keybuf;
+	kb->head.type = TOKTYPE_NON_CCA;
+	kb->head.len = rep_pl->data_len;
+	kb->head.version = TOKVER_EP11_AES;
+	kb->head.keybitlen = keybitsize;
+
+out:
+	kfree(req);
+	kfree(rep);
+	kfree(urb);
+	return rc;
+}
+
+static int ep11_wrapkey(u16 card, u16 domain,
+			const u8 *key, size_t keysize,
+			u32 mech, const u8 *iv,
+			u8 *databuf, size_t *datasize)
+{
+	struct wk_req_pl {
+		struct pl_head head;
+		u8  var_tag;
+		u8  var_len;
+		u32 var;
+		u8  mech_tag;
+		u8  mech_len;
+		u32 mech;
+		/*
+		 * followed by iv data
+		 * followed by key tag + key blob
+		 * followed by dummy kek param
+		 * followed by dummy mac param
+		 */
+	} __packed * req_pl;
+	struct wk_rep_pl {
+		struct pl_head head;
+		u8  rc_tag;
+		u8  rc_len;
+		u32 rc;
+		u8  data_tag;
+		u8  data_lenfmt;
+		u16 data_len;
+		u8  data[1024];
+	} __packed * rep_pl;
+	struct ep11_cprb *req = NULL, *rep = NULL;
+	struct ep11_target_dev target;
+	struct ep11_urb *urb = NULL;
+	struct ep11keyblob *kb;
+	size_t req_pl_size;
+	int api, rc = -ENOMEM;
+	bool has_header = false;
+	u8 *p;
+
+	/* maybe the session field holds a header with key info */
+	kb = (struct ep11keyblob *) key;
+	if (kb->head.type == TOKTYPE_NON_CCA &&
+	    kb->head.version == TOKVER_EP11_AES) {
+		has_header = true;
+		keysize = kb->head.len < keysize ? kb->head.len : keysize;
+	}
+
+	/* request cprb and payload */
+	req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0)
+		+ ASN1TAGLEN(keysize) + 4;
+	req = alloc_cprb(req_pl_size);
+	if (!req)
+		goto out;
+	if (!mech || mech == 0x80060001)
+		req->flags |= 0x20; /* CPACF_WRAP needs special bit */
+	req_pl = (struct wk_req_pl *) (((u8 *) req) + sizeof(*req));
+	api = (!mech || mech == 0x80060001) ? 4 : 1; /* CKM_IBM_CPACF_WRAP */
+	prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */
+	req_pl->var_tag = 0x04;
+	req_pl->var_len = sizeof(u32);
+	/* mech is mech + mech params (iv here) */
+	req_pl->mech_tag = 0x04;
+	req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+	req_pl->mech = (mech ? mech : 0x80060001); /* CKM_IBM_CPACF_WRAP */
+	p = ((u8 *) req_pl) + sizeof(*req_pl);
+	if (iv) {
+		memcpy(p, iv, 16);
+		p += 16;
+	}
+	/* key blob */
+	p += asn1tag_write(p, 0x04, key, keysize);
+	/* maybe the key argument needs the head data cleaned out */
+	if (has_header) {
+		kb = (struct ep11keyblob *)(p - keysize);
+		memset(&kb->head, 0, sizeof(kb->head));
+	}
+	/* empty kek tag */
+	*p++ = 0x04;
+	*p++ = 0;
+	/* empty mac tag */
+	*p++ = 0x04;
+	*p++ = 0;
+
+	/* reply cprb and payload */
+	rep = alloc_cprb(sizeof(struct wk_rep_pl));
+	if (!rep)
+		goto out;
+	rep_pl = (struct wk_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+	/* urb and target */
+	urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+	if (!urb)
+		goto out;
+	target.ap_id = card;
+	target.dom_id = domain;
+	prep_urb(urb, &target, 1,
+		 req, sizeof(*req) + req_pl_size,
+		 rep, sizeof(*rep) + sizeof(*rep_pl));
+
+	rc = zcrypt_send_ep11_cprb(urb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+			__func__, (int) card, (int) domain, rc);
+		goto out;
+	}
+
+	rc = check_reply_pl((u8 *)rep_pl, __func__);
+	if (rc)
+		goto out;
+	if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+		DEBUG_ERR("%s unknown reply data format\n", __func__);
+		rc = -EIO;
+		goto out;
+	}
+	if (rep_pl->data_len > *datasize) {
+		DEBUG_ERR("%s mismatch reply data len / data buffer len\n",
+			  __func__);
+		rc = -ENOSPC;
+		goto out;
+	}
+
+	/* copy the data from the cprb to the data buffer */
+	memcpy(databuf, rep_pl->data, rep_pl->data_len);
+	*datasize = rep_pl->data_len;
+
+out:
+	kfree(req);
+	kfree(rep);
+	kfree(urb);
+	return rc;
+}
+
+int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+		     const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
+{
+	int rc;
+	struct ep11keyblob *kb;
+	u8 encbuf[64], *kek = NULL;
+	size_t clrkeylen, keklen, encbuflen = sizeof(encbuf);
+
+	if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256)
+		clrkeylen = keybitsize / 8;
+	else {
+		DEBUG_ERR(
+			"%s unknown/unsupported keybitsize %d\n",
+			__func__, keybitsize);
+		return -EINVAL;
+	}
+
+	/* allocate memory for the temp kek */
+	keklen = MAXEP11AESKEYBLOBSIZE;
+	kek = kmalloc(keklen, GFP_ATOMIC);
+	if (!kek) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* Step 1: generate AES 256 bit random kek key */
+	rc = ep11_genaeskey(card, domain, 256,
+			    0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
+			    kek, &keklen);
+	if (rc) {
+		DEBUG_ERR(
+			"%s generate kek key failed, rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+	kb = (struct ep11keyblob *) kek;
+	memset(&kb->head, 0, sizeof(kb->head));
+
+	/* Step 2: encrypt clear key value with the kek key */
+	rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
+			      clrkey, clrkeylen, encbuf, &encbuflen);
+	if (rc) {
+		DEBUG_ERR(
+			"%s encrypting key value with kek key failed, rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+
+	/* Step 3: import the encrypted key value as a new key */
+	rc = ep11_unwrapkey(card, domain, kek, keklen,
+			    encbuf, encbuflen, 0, def_iv,
+			    keybitsize, 0, keybuf, keybufsize);
+	if (rc) {
+		DEBUG_ERR(
+			"%s importing key value as new key failed,, rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+
+out:
+	kfree(kek);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_clr2keyblob);
+
+int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
+		       u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+	int rc = -EIO;
+	u8 *wkbuf = NULL;
+	size_t wkbuflen, keylen;
+	struct wk_info {
+		u16 version;
+		u8  res1[16];
+		u32 pkeytype;
+		u32 pkeybitsize;
+		u64 pkeysize;
+		u8  res2[8];
+		u8  pkey[0];
+	} __packed * wki;
+	const u8 *key;
+	struct ep11kblob_header *hdr;
+
+	/* key with or without header ? */
+	hdr = (struct ep11kblob_header *) keyblob;
+	if (hdr->type == TOKTYPE_NON_CCA
+	    && (hdr->version == TOKVER_EP11_AES_WITH_HEADER
+		|| hdr->version == TOKVER_EP11_ECC_WITH_HEADER)
+	    && is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) {
+		/* EP11 AES or ECC key with header */
+		key = keyblob + sizeof(struct ep11kblob_header);
+		keylen = hdr->len - sizeof(struct ep11kblob_header);
+	} else if (hdr->type == TOKTYPE_NON_CCA
+		   && hdr->version == TOKVER_EP11_AES
+		   && is_ep11_keyblob(keyblob)) {
+		/* EP11 AES key (old style) */
+		key = keyblob;
+		keylen = hdr->len;
+	} else if (is_ep11_keyblob(keyblob)) {
+		/* raw EP11 key blob */
+		key = keyblob;
+		keylen = keybloblen;
+	} else
+		return -EINVAL;
+
+	/* alloc temp working buffer */
+	wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1));
+	wkbuf = kmalloc(wkbuflen, GFP_ATOMIC);
+	if (!wkbuf)
+		return -ENOMEM;
+
+	/* ep11 secure key -> protected key + info */
+	rc = ep11_wrapkey(card, dom, key, keylen,
+			  0, def_iv, wkbuf, &wkbuflen);
+	if (rc) {
+		DEBUG_ERR(
+			"%s rewrapping ep11 key to pkey failed, rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+	wki = (struct wk_info *) wkbuf;
+
+	/* check struct version and pkey type */
+	if (wki->version != 1 || wki->pkeytype < 1 || wki->pkeytype > 5) {
+		DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
+			  __func__, (int) wki->version, (int) wki->pkeytype);
+		rc = -EIO;
+		goto out;
+	}
+
+	/* check protected key type field */
+	switch (wki->pkeytype) {
+	case 1: /* AES */
+		switch (wki->pkeysize) {
+		case 16+32:
+			/* AES 128 protected key */
+			if (protkeytype)
+				*protkeytype = PKEY_KEYTYPE_AES_128;
+			break;
+		case 24+32:
+			/* AES 192 protected key */
+			if (protkeytype)
+				*protkeytype = PKEY_KEYTYPE_AES_192;
+			break;
+		case 32+32:
+			/* AES 256 protected key */
+			if (protkeytype)
+				*protkeytype = PKEY_KEYTYPE_AES_256;
+			break;
+		default:
+			DEBUG_ERR("%s unknown/unsupported AES pkeysize %d\n",
+				  __func__, (int) wki->pkeysize);
+			rc = -EIO;
+			goto out;
+		}
+		break;
+	case 3: /* EC-P */
+	case 4: /* EC-ED */
+	case 5: /* EC-BP */
+		if (protkeytype)
+			*protkeytype = PKEY_KEYTYPE_ECC;
+		break;
+	case 2: /* TDES */
+	default:
+		DEBUG_ERR("%s unknown/unsupported key type %d\n",
+			  __func__, (int) wki->pkeytype);
+		rc = -EIO;
+		goto out;
+	}
+
+	/* copy the tanslated protected key */
+	if (wki->pkeysize > *protkeylen) {
+		DEBUG_ERR("%s wk info pkeysize %llu > protkeysize %u\n",
+			  __func__, wki->pkeysize, *protkeylen);
+		rc = -EINVAL;
+		goto out;
+	}
+	memcpy(protkey, wki->pkey, wki->pkeysize);
+	*protkeylen = wki->pkeysize;
+
+out:
+	kfree(wkbuf);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_kblob2protkey);
+
+int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+		   int minhwtype, int minapi, const u8 *wkvp)
+{
+	struct zcrypt_device_status_ext *device_status;
+	u32 *_apqns = NULL, _nr_apqns = 0;
+	int i, card, dom, rc = -ENOMEM;
+	struct ep11_domain_info edi;
+	struct ep11_card_info eci;
+
+	/* fetch status of all crypto cards */
+	device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+				       sizeof(struct zcrypt_device_status_ext),
+				       GFP_KERNEL);
+	if (!device_status)
+		return -ENOMEM;
+	zcrypt_device_status_mask_ext(device_status);
+
+	/* allocate 1k space for up to 256 apqns */
+	_apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+	if (!_apqns) {
+		kvfree(device_status);
+		return -ENOMEM;
+	}
+
+	/* walk through all the crypto apqnss */
+	for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+		card = AP_QID_CARD(device_status[i].qid);
+		dom = AP_QID_QUEUE(device_status[i].qid);
+		/* check online state */
+		if (!device_status[i].online)
+			continue;
+		/* check for ep11 functions */
+		if (!(device_status[i].functions & 0x01))
+			continue;
+		/* check cardnr */
+		if (cardnr != 0xFFFF && card != cardnr)
+			continue;
+		/* check domain */
+		if (domain != 0xFFFF && dom != domain)
+			continue;
+		/* check min hardware type */
+		if (minhwtype && device_status[i].hwtype < minhwtype)
+			continue;
+		/* check min api version if given */
+		if (minapi > 0) {
+			if (ep11_get_card_info(card, &eci, 0))
+				continue;
+			if (minapi > eci.API_ord_nr)
+				continue;
+		}
+		/* check wkvp if given */
+		if (wkvp) {
+			if (ep11_get_domain_info(card, dom, &edi))
+				continue;
+			if (edi.cur_wk_state != '1')
+				continue;
+			if (memcmp(wkvp, edi.cur_wkvp, 16))
+				continue;
+		}
+		/* apqn passed all filtering criterons, add to the array */
+		if (_nr_apqns < 256)
+			_apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom);
+	}
+
+	/* nothing found ? */
+	if (!_nr_apqns) {
+		kfree(_apqns);
+		rc = -ENODEV;
+	} else {
+		/* no re-allocation, simple return the _apqns array */
+		*apqns = _apqns;
+		*nr_apqns = _nr_apqns;
+		rc = 0;
+	}
+
+	kvfree(device_status);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_findcard2);
+
+void __exit zcrypt_ep11misc_exit(void)
+{
+	card_cache_free();
+}
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
new file mode 100644
index 0000000..1e02b19
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ep11misc.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ *  Copyright IBM Corp. 2019
+ *  Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ *  Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#ifndef _ZCRYPT_EP11MISC_H_
+#define _ZCRYPT_EP11MISC_H_
+
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#define EP11_API_V 4  /* highest known and supported EP11 API version */
+#define EP11_STRUCT_MAGIC 0x1234
+#define EP11_BLOB_PKEY_EXTRACTABLE 0x00200000
+
+/*
+ * Internal used values for the version field of the key header.
+ * Should match to the enum pkey_key_type in pkey.h.
+ */
+#define TOKVER_EP11_AES  0x03  /* EP11 AES key blob (old style) */
+#define TOKVER_EP11_AES_WITH_HEADER 0x06 /* EP11 AES key blob with header */
+#define TOKVER_EP11_ECC_WITH_HEADER 0x07 /* EP11 ECC key blob with header */
+
+/* inside view of an EP11 secure key blob */
+struct ep11keyblob {
+	union {
+		u8 session[32];
+		/* only used for PKEY_TYPE_EP11: */
+		struct {
+			u8  type;      /* 0x00 (TOKTYPE_NON_CCA) */
+			u8  res0;      /* unused */
+			u16 len;       /* total length in bytes of this blob */
+			u8  version;   /* 0x03 (TOKVER_EP11_AES) */
+			u8  res1;      /* unused */
+			u16 keybitlen; /* clear key bit len, 0 for unknown */
+		} head;
+	};
+	u8  wkvp[16];  /* wrapping key verification pattern */
+	u64 attr;      /* boolean key attributes */
+	u64 mode;      /* mode bits */
+	u16 version;   /* 0x1234, EP11_STRUCT_MAGIC */
+	u8  iv[14];
+	u8  encrypted_key_data[144];
+	u8  mac[32];
+} __packed;
+
+/* check ep11 key magic to find out if this is an ep11 key blob */
+static inline bool is_ep11_keyblob(const u8 *key)
+{
+	struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+	return (kb->version == EP11_STRUCT_MAGIC);
+}
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with header.
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
+				const u8 *key, size_t keylen, int checkcpacfexp);
+
+/*
+ * Simple check if the key blob is a valid EP11 ECC key blob with header.
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
+				const u8 *key, size_t keylen, int checkcpacfexp);
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with
+ * the header in the session field (old style EP11 AES key).
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
+		       const u8 *key, size_t keylen, int checkcpacfexp);
+
+/* EP11 card info struct */
+struct ep11_card_info {
+	u32  API_ord_nr;    /* API ordinal number */
+	u16  FW_version;    /* Firmware major and minor version */
+	char serial[16];    /* serial number string (16 ascii, no 0x00 !) */
+	u64  op_mode;	    /* card operational mode(s) */
+};
+
+/* EP11 domain info struct */
+struct ep11_domain_info {
+	char cur_wk_state;  /* '0' invalid, '1' valid */
+	char new_wk_state;  /* '0' empty, '1' uncommitted, '2' committed */
+	u8   cur_wkvp[32];  /* current wrapping key verification pattern */
+	u8   new_wkvp[32];  /* new wrapping key verification pattern */
+	u64  op_mode;	    /* domain operational mode(s) */
+};
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info);
+
+/*
+ * Generate (random) EP11 AES secure key.
+ */
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+		   u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Generate EP11 AES secure key with given clear key value.
+ */
+int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+		     const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Build a list of ep11 apqns meeting the following constrains:
+ * - apqn is online and is in fact an EP11 apqn
+ * - if cardnr is not FFFF only apqns with this cardnr
+ * - if domain is not FFFF only apqns with this domainnr
+ * - if minhwtype > 0 only apqns with hwtype >= minhwtype
+ * - if minapi > 0 only apqns with API_ord_nr >= minapi
+ * - if wkvp != NULL only apqns where the wkvp (EP11_WKVPLEN bytes) matches
+ *   to the first EP11_WKVPLEN bytes of the wkvp of the current wrapping
+ *   key for this domain. When a wkvp is given there will aways be a re-fetch
+ *   of the domain info for the potential apqn - so this triggers an request
+ *   reply to each apqn eligible.
+ * The array of apqn entries is allocated with kmalloc and returned in *apqns;
+ * the number of apqns stored into the list is returned in *nr_apqns. One apqn
+ * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
+ * may be casted to struct pkey_apqn. The return value is either 0 for success
+ * or a negative errno value. If no apqn meeting the criterias is found,
+ * -ENODEV is returned.
+ */
+int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+		   int minhwtype, int minapi, const u8 *wkvp);
+
+/*
+ * Derive proteced key from EP11 key blob (AES and ECC keys).
+ */
+int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, size_t keylen,
+		       u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+void zcrypt_ep11misc_exit(void);
+
+#endif /* _ZCRYPT_EP11MISC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 4f4dd9d..39e626e 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -52,7 +52,6 @@
 #define REP82_ERROR_INVALID_COMMAND	    0x30
 #define REP82_ERROR_MALFORMED_MSG	    0x40
 #define REP82_ERROR_INVALID_SPECIAL_CMD	    0x41
-#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
 #define REP82_ERROR_RESERVED_FIELDO	    0x50 /* old value	*/
 #define REP82_ERROR_WORD_ALIGNMENT	    0x60
 #define REP82_ERROR_MESSAGE_LENGTH	    0x80
@@ -67,7 +66,6 @@
 #define REP82_ERROR_ZERO_BUFFER_LEN	    0xB0
 
 #define REP88_ERROR_MODULE_FAILURE	    0x10
-
 #define REP88_ERROR_MESSAGE_TYPE	    0x20
 #define REP88_ERROR_MESSAGE_MALFORMD	    0x22
 #define REP88_ERROR_MESSAGE_LENGTH	    0x23
@@ -80,83 +78,61 @@
 static inline int convert_error(struct zcrypt_queue *zq,
 				struct ap_message *reply)
 {
-	struct error_hdr *ehdr = reply->message;
+	struct error_hdr *ehdr = reply->msg;
 	int card = AP_QID_CARD(zq->queue->qid);
 	int queue = AP_QID_QUEUE(zq->queue->qid);
 
 	switch (ehdr->reply_code) {
-	case REP82_ERROR_OPERAND_INVALID:
-	case REP82_ERROR_OPERAND_SIZE:
-	case REP82_ERROR_EVEN_MOD_IN_OPND:
-	case REP88_ERROR_MESSAGE_MALFORMD:
-	case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
-	case REP82_ERROR_INVALID_DOMAIN_PENDING:
-	case REP82_ERROR_INVALID_SPECIAL_CMD:
-	case REP82_ERROR_FILTERED_BY_HYPERVISOR:
-	//   REP88_ERROR_INVALID_KEY		// '82' CEX2A
-	//   REP88_ERROR_OPERAND		// '84' CEX2A
-	//   REP88_ERROR_OPERAND_EVEN_MOD	// '85' CEX2A
-		/* Invalid input data. */
+	case REP82_ERROR_INVALID_MSG_LEN:	 /* 0x23 */
+	case REP82_ERROR_RESERVD_FIELD:		 /* 0x24 */
+	case REP82_ERROR_FORMAT_FIELD:		 /* 0x29 */
+	case REP82_ERROR_MALFORMED_MSG:		 /* 0x40 */
+	case REP82_ERROR_INVALID_SPECIAL_CMD:	 /* 0x41 */
+	case REP82_ERROR_MESSAGE_LENGTH:	 /* 0x80 */
+	case REP82_ERROR_OPERAND_INVALID:	 /* 0x82 */
+	case REP82_ERROR_OPERAND_SIZE:		 /* 0x84 */
+	case REP82_ERROR_EVEN_MOD_IN_OPND:	 /* 0x85 */
+	case REP82_ERROR_INVALID_DOMAIN_PENDING: /* 0x8A */
+	case REP82_ERROR_FILTERED_BY_HYPERVISOR: /* 0x8B */
+	case REP82_ERROR_PACKET_TRUNCATED:	 /* 0xA0 */
+	case REP88_ERROR_MESSAGE_MALFORMD:	 /* 0x22 */
+	case REP88_ERROR_KEY_TYPE:		 /* 0x34 */
+		/* RY indicates malformed request */
 		ZCRYPT_DBF(DBF_WARN,
-			   "device=%02x.%04x reply=0x%02x => rc=EINVAL\n",
+			   "dev=%02x.%04x RY=0x%02x => rc=EINVAL\n",
 			   card, queue, ehdr->reply_code);
 		return -EINVAL;
-	case REP82_ERROR_MESSAGE_TYPE:
-	//   REP88_ERROR_MESSAGE_TYPE		// '20' CEX2A
+	case REP82_ERROR_MACHINE_FAILURE:	 /* 0x10 */
+	case REP82_ERROR_MESSAGE_TYPE:		 /* 0x20 */
+	case REP82_ERROR_TRANSPORT_FAIL:	 /* 0x90 */
 		/*
-		 * To sent a message of the wrong type is a bug in the
-		 * device driver. Send error msg, disable the device
-		 * and then repeat the request.
+		 * Msg to wrong type or card/infrastructure failure.
+		 * Trigger rescan of the ap bus, trigger retry request.
 		 */
 		atomic_set(&zcrypt_rescan_req, 1);
-		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
-		       card, queue);
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
-			   card, queue, ehdr->reply_code);
-		return -EAGAIN;
-	case REP82_ERROR_TRANSPORT_FAIL:
-		/* Card or infrastructure failure, disable card */
-		atomic_set(&zcrypt_rescan_req, 1);
-		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
-		       card, queue);
 		/* For type 86 response show the apfs value (failure reason) */
-		if (ehdr->type == TYPE86_RSP_CODE) {
+		if (ehdr->reply_code == REP82_ERROR_TRANSPORT_FAIL &&
+		    ehdr->type == TYPE86_RSP_CODE) {
 			struct {
 				struct type86_hdr hdr;
 				struct type86_fmt2_ext fmt2;
-			} __packed * head = reply->message;
+			} __packed * head = reply->msg;
 			unsigned int apfs = *((u32 *)head->fmt2.apfs);
 
-			ZCRYPT_DBF(DBF_ERR,
-				   "device=%02x.%04x reply=0x%02x apfs=0x%x => online=0 rc=EAGAIN\n",
-				   card, queue, apfs, ehdr->reply_code);
+			ZCRYPT_DBF(DBF_WARN,
+				   "dev=%02x.%04x RY=0x%02x apfs=0x%x => bus rescan, rc=EAGAIN\n",
+				   card, queue, ehdr->reply_code, apfs);
 		} else
-			ZCRYPT_DBF(DBF_ERR,
-				   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+			ZCRYPT_DBF(DBF_WARN,
+				   "dev=%02x.%04x RY=0x%02x => bus rescan, rc=EAGAIN\n",
 				   card, queue, ehdr->reply_code);
 		return -EAGAIN;
-	case REP82_ERROR_MACHINE_FAILURE:
-	//   REP88_ERROR_MODULE_FAILURE		// '10' CEX2A
-		/* If a card fails disable it and repeat the request. */
-		atomic_set(&zcrypt_rescan_req, 1);
-		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
-		       card, queue);
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+	default:
+		/* Assume request is valid and a retry will be worth it */
+		ZCRYPT_DBF(DBF_WARN,
+			   "dev=%02x.%04x RY=0x%02x => rc=EAGAIN\n",
 			   card, queue, ehdr->reply_code);
 		return -EAGAIN;
-	default:
-		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
-		       card, queue);
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
-			   card, queue, ehdr->reply_code);
-		return -EAGAIN;	/* repeat the request on a different device. */
 	}
 }
 
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index fc4295b..bf14ee4 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -207,10 +207,10 @@
 	mod_len = mex->inputdatalength;
 
 	if (mod_len <= 128) {
-		struct type50_meb1_msg *meb1 = ap_msg->message;
+		struct type50_meb1_msg *meb1 = ap_msg->msg;
 
 		memset(meb1, 0, sizeof(*meb1));
-		ap_msg->length = sizeof(*meb1);
+		ap_msg->len = sizeof(*meb1);
 		meb1->header.msg_type_code = TYPE50_TYPE_CODE;
 		meb1->header.msg_len = sizeof(*meb1);
 		meb1->keyblock_type = TYPE50_MEB1_FMT;
@@ -218,10 +218,10 @@
 		exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
 		inp = meb1->message + sizeof(meb1->message) - mod_len;
 	} else if (mod_len <= 256) {
-		struct type50_meb2_msg *meb2 = ap_msg->message;
+		struct type50_meb2_msg *meb2 = ap_msg->msg;
 
 		memset(meb2, 0, sizeof(*meb2));
-		ap_msg->length = sizeof(*meb2);
+		ap_msg->len = sizeof(*meb2);
 		meb2->header.msg_type_code = TYPE50_TYPE_CODE;
 		meb2->header.msg_len = sizeof(*meb2);
 		meb2->keyblock_type = TYPE50_MEB2_FMT;
@@ -229,10 +229,10 @@
 		exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
 		inp = meb2->message + sizeof(meb2->message) - mod_len;
 	} else if (mod_len <= 512) {
-		struct type50_meb3_msg *meb3 = ap_msg->message;
+		struct type50_meb3_msg *meb3 = ap_msg->msg;
 
 		memset(meb3, 0, sizeof(*meb3));
-		ap_msg->length = sizeof(*meb3);
+		ap_msg->len = sizeof(*meb3);
 		meb3->header.msg_type_code = TYPE50_TYPE_CODE;
 		meb3->header.msg_len = sizeof(*meb3);
 		meb3->keyblock_type = TYPE50_MEB3_FMT;
@@ -246,6 +246,12 @@
 	    copy_from_user(exp, mex->b_key, mod_len) ||
 	    copy_from_user(inp, mex->inputdata, mod_len))
 		return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
+		ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
+#endif
+
 	return 0;
 }
 
@@ -275,10 +281,10 @@
 	 * 512 byte modulus (4k keys).
 	 */
 	if (mod_len <= 128) {		/* up to 1024 bit key size */
-		struct type50_crb1_msg *crb1 = ap_msg->message;
+		struct type50_crb1_msg *crb1 = ap_msg->msg;
 
 		memset(crb1, 0, sizeof(*crb1));
-		ap_msg->length = sizeof(*crb1);
+		ap_msg->len = sizeof(*crb1);
 		crb1->header.msg_type_code = TYPE50_TYPE_CODE;
 		crb1->header.msg_len = sizeof(*crb1);
 		crb1->keyblock_type = TYPE50_CRB1_FMT;
@@ -289,10 +295,10 @@
 		u = crb1->u + sizeof(crb1->u) - short_len;
 		inp = crb1->message + sizeof(crb1->message) - mod_len;
 	} else if (mod_len <= 256) {	/* up to 2048 bit key size */
-		struct type50_crb2_msg *crb2 = ap_msg->message;
+		struct type50_crb2_msg *crb2 = ap_msg->msg;
 
 		memset(crb2, 0, sizeof(*crb2));
-		ap_msg->length = sizeof(*crb2);
+		ap_msg->len = sizeof(*crb2);
 		crb2->header.msg_type_code = TYPE50_TYPE_CODE;
 		crb2->header.msg_len = sizeof(*crb2);
 		crb2->keyblock_type = TYPE50_CRB2_FMT;
@@ -304,10 +310,10 @@
 		inp = crb2->message + sizeof(crb2->message) - mod_len;
 	} else if ((mod_len <= 512) &&	/* up to 4096 bit key size */
 		   (zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) {
-		struct type50_crb3_msg *crb3 = ap_msg->message;
+		struct type50_crb3_msg *crb3 = ap_msg->msg;
 
 		memset(crb3, 0, sizeof(*crb3));
-		ap_msg->length = sizeof(*crb3);
+		ap_msg->len = sizeof(*crb3);
 		crb3->header.msg_type_code = TYPE50_TYPE_CODE;
 		crb3->header.msg_len = sizeof(*crb3);
 		crb3->keyblock_type = TYPE50_CRB3_FMT;
@@ -332,6 +338,11 @@
 	    copy_from_user(inp, crt->inputdata, mod_len))
 		return -EFAULT;
 
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
+		ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
+#endif
+
 	return 0;
 }
 
@@ -350,39 +361,39 @@
 			  char __user *outputdata,
 			  unsigned int outputdatalength)
 {
-	struct type80_hdr *t80h = reply->message;
+	struct type80_hdr *t80h = reply->msg;
 	unsigned char *data;
 
 	if (t80h->len < sizeof(*t80h) + outputdatalength) {
 		/* The result is too short, the CEXxA card may not do that.. */
 		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		pr_err("Crypto dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
 		       AP_QID_CARD(zq->queue->qid),
-		       AP_QID_QUEUE(zq->queue->qid));
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
-			   AP_QID_CARD(zq->queue->qid),
-			   AP_QID_QUEUE(zq->queue->qid),
-			   t80h->code);
-		return -EAGAIN;	/* repeat the request on a different device. */
+		       AP_QID_QUEUE(zq->queue->qid),
+		       t80h->code);
+		ZCRYPT_DBF_ERR("dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
+			       AP_QID_CARD(zq->queue->qid),
+			       AP_QID_QUEUE(zq->queue->qid),
+			       t80h->code);
+		return -EAGAIN;
 	}
 	if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
 		BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
 	else
 		BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
-	data = reply->message + t80h->len - outputdatalength;
+	data = reply->msg + t80h->len - outputdatalength;
 	if (copy_to_user(outputdata, data, outputdatalength))
 		return -EFAULT;
 	return 0;
 }
 
-static int convert_response(struct zcrypt_queue *zq,
-			    struct ap_message *reply,
-			    char __user *outputdata,
-			    unsigned int outputdatalength)
+static int convert_response_cex2a(struct zcrypt_queue *zq,
+				  struct ap_message *reply,
+				  char __user *outputdata,
+				  unsigned int outputdatalength)
 {
 	/* Response type byte is the second byte in the response. */
-	unsigned char rtype = ((unsigned char *) reply->message)[1];
+	unsigned char rtype = ((unsigned char *) reply->msg)[1];
 
 	switch (rtype) {
 	case TYPE82_RSP_CODE:
@@ -393,15 +404,15 @@
 				      outputdata, outputdatalength);
 	default: /* Unknown response type, this should NEVER EVER happen */
 		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
 		       AP_QID_CARD(zq->queue->qid),
-		       AP_QID_QUEUE(zq->queue->qid));
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
-			   AP_QID_CARD(zq->queue->qid),
-			   AP_QID_QUEUE(zq->queue->qid),
-			   (unsigned int) rtype);
-		return -EAGAIN;	/* repeat the request on a different device. */
+		       AP_QID_QUEUE(zq->queue->qid),
+		       (int) rtype);
+		ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+			       AP_QID_CARD(zq->queue->qid),
+			       AP_QID_QUEUE(zq->queue->qid),
+			       (int) rtype);
+		return -EAGAIN;
 	}
 }
 
@@ -422,22 +433,20 @@
 		.reply_code = REP82_ERROR_MACHINE_FAILURE,
 	};
 	struct type80_hdr *t80h;
-	int length;
+	int len;
 
 	/* Copy the reply message to the request message buffer. */
 	if (!reply)
 		goto out;	/* ap_msg->rc indicates the error */
-	t80h = reply->message;
+	t80h = reply->msg;
 	if (t80h->type == TYPE80_RSP_CODE) {
 		if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A)
-			length = min_t(int,
-				       CEX2A_MAX_RESPONSE_SIZE, t80h->len);
+			len = min_t(int, CEX2A_MAX_RESPONSE_SIZE, t80h->len);
 		else
-			length = min_t(int,
-				       CEX3A_MAX_RESPONSE_SIZE, t80h->len);
-		memcpy(msg->message, reply->message, length);
+			len = min_t(int, CEX3A_MAX_RESPONSE_SIZE, t80h->len);
+		memcpy(msg->msg, reply->msg, len);
 	} else
-		memcpy(msg->message, reply->message, sizeof(error_reply));
+		memcpy(msg->msg, reply->msg, sizeof(error_reply));
 out:
 	complete((struct completion *) msg->private);
 }
@@ -452,41 +461,41 @@
  * @mex: pointer to the modexpo request buffer
  */
 static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
-				 struct ica_rsa_modexpo *mex)
+				 struct ica_rsa_modexpo *mex,
+				 struct ap_message *ap_msg)
 {
-	struct ap_message ap_msg;
 	struct completion work;
 	int rc;
 
-	ap_init_message(&ap_msg);
 	if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
-		ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
-					 GFP_KERNEL);
+		ap_msg->msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL);
 	else
-		ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
-					 GFP_KERNEL);
-	if (!ap_msg.message)
+		ap_msg->msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL);
+	if (!ap_msg->msg)
 		return -ENOMEM;
-	ap_msg.receive = zcrypt_cex2a_receive;
-	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
-				atomic_inc_return(&zcrypt_step);
-	ap_msg.private = &work;
-	rc = ICAMEX_msg_to_type50MEX_msg(zq, &ap_msg, mex);
+	ap_msg->receive = zcrypt_cex2a_receive;
+	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+		atomic_inc_return(&zcrypt_step);
+	ap_msg->private = &work;
+	rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex);
 	if (rc)
-		goto out_free;
+		goto out;
 	init_completion(&work);
-	ap_queue_message(zq->queue, &ap_msg);
+	rc = ap_queue_message(zq->queue, ap_msg);
+	if (rc)
+		goto out;
 	rc = wait_for_completion_interruptible(&work);
 	if (rc == 0) {
-		rc = ap_msg.rc;
+		rc = ap_msg->rc;
 		if (rc == 0)
-			rc = convert_response(zq, &ap_msg, mex->outputdata,
-					      mex->outputdatalength);
+			rc = convert_response_cex2a(zq, ap_msg,
+						    mex->outputdata,
+						    mex->outputdatalength);
 	} else
 		/* Signal pending. */
-		ap_cancel_message(zq->queue, &ap_msg);
-out_free:
-	kfree(ap_msg.message);
+		ap_cancel_message(zq->queue, ap_msg);
+out:
+	ap_msg->private = NULL;
 	return rc;
 }
 
@@ -498,41 +507,41 @@
  * @crt: pointer to the modexpoc_crt request buffer
  */
 static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
-				     struct ica_rsa_modexpo_crt *crt)
+				     struct ica_rsa_modexpo_crt *crt,
+				     struct ap_message *ap_msg)
 {
-	struct ap_message ap_msg;
 	struct completion work;
 	int rc;
 
-	ap_init_message(&ap_msg);
 	if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
-		ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
-					 GFP_KERNEL);
+		ap_msg->msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL);
 	else
-		ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
-					 GFP_KERNEL);
-	if (!ap_msg.message)
+		ap_msg->msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL);
+	if (!ap_msg->msg)
 		return -ENOMEM;
-	ap_msg.receive = zcrypt_cex2a_receive;
-	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
-				atomic_inc_return(&zcrypt_step);
-	ap_msg.private = &work;
-	rc = ICACRT_msg_to_type50CRT_msg(zq, &ap_msg, crt);
+	ap_msg->receive = zcrypt_cex2a_receive;
+	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+		atomic_inc_return(&zcrypt_step);
+	ap_msg->private = &work;
+	rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt);
 	if (rc)
-		goto out_free;
+		goto out;
 	init_completion(&work);
-	ap_queue_message(zq->queue, &ap_msg);
+	rc = ap_queue_message(zq->queue, ap_msg);
+	if (rc)
+		goto out;
 	rc = wait_for_completion_interruptible(&work);
 	if (rc == 0) {
-		rc = ap_msg.rc;
+		rc = ap_msg->rc;
 		if (rc == 0)
-			rc = convert_response(zq, &ap_msg, crt->outputdata,
-					      crt->outputdatalength);
+			rc = convert_response_cex2a(zq, ap_msg,
+						    crt->outputdata,
+						    crt->outputdatalength);
 	} else
 		/* Signal pending. */
-		ap_cancel_message(zq->queue, &ap_msg);
-out_free:
-	kfree(ap_msg.message);
+		ap_cancel_message(zq->queue, ap_msg);
+out:
+	ap_msg->private = NULL;
 	return rc;
 }
 
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index a36251d..307f906 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -266,7 +266,7 @@
 		struct function_and_rules_block fr;
 		unsigned short length;
 		char text[0];
-	} __packed * msg = ap_msg->message;
+	} __packed * msg = ap_msg->msg;
 	int size;
 
 	/*
@@ -301,7 +301,7 @@
 
 	msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
 
-	ap_msg->length = size;
+	ap_msg->len = size;
 	return 0;
 }
 
@@ -336,7 +336,7 @@
 		struct function_and_rules_block fr;
 		unsigned short length;
 		char text[0];
-	} __packed * msg = ap_msg->message;
+	} __packed * msg = ap_msg->msg;
 	int size;
 
 	/*
@@ -370,7 +370,7 @@
 
 	msg->fr = static_pkd_fnr;
 
-	ap_msg->length = size;
+	ap_msg->len = size;
 	return 0;
 }
 
@@ -388,7 +388,7 @@
 	struct type86_fmt2_ext fmt2;
 } __packed;
 
-static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
+static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg,
 				      struct ica_xcRB *xcRB,
 				      unsigned int *fcode,
 				      unsigned short **dom)
@@ -400,11 +400,11 @@
 	struct {
 		struct type6_hdr hdr;
 		struct CPRBX cprbx;
-	} __packed * msg = ap_msg->message;
+	} __packed * msg = ap_msg->msg;
 
 	int rcblen = CEIL4(xcRB->request_control_blk_length);
 	int replylen, req_sumlen, resp_sumlen;
-	char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
+	char *req_data = ap_msg->msg + sizeof(struct type6_hdr) + rcblen;
 	char *function_code;
 
 	if (CEIL4(xcRB->request_control_blk_length) <
@@ -412,10 +412,10 @@
 		return -EINVAL; /* overflow after alignment*/
 
 	/* length checks */
-	ap_msg->length = sizeof(struct type6_hdr) +
+	ap_msg->len = sizeof(struct type6_hdr) +
 		CEIL4(xcRB->request_control_blk_length) +
 		xcRB->request_data_length;
-	if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE)
+	if (ap_msg->len > MSGTYPE06_MAX_MSG_SIZE)
 		return -EINVAL;
 
 	/*
@@ -465,8 +465,8 @@
 	msg->hdr.FromCardLen2 = xcRB->reply_data_length;
 
 	/* prepare CPRB */
-	if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
-		    xcRB->request_control_blk_length))
+	if (z_copy_from_user(userspace, &(msg->cprbx), xcRB->request_control_blk_addr,
+			     xcRB->request_control_blk_length))
 		return -EFAULT;
 	if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
 	    xcRB->request_control_blk_length)
@@ -480,22 +480,25 @@
 
 	if (memcmp(function_code, "US", 2) == 0
 	    || memcmp(function_code, "AU", 2) == 0)
-		ap_msg->special = 1;
-	else
-		ap_msg->special = 0;
+		ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
+		ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
+#endif
 
 	/* copy data block */
 	if (xcRB->request_data_length &&
-	    copy_from_user(req_data, xcRB->request_data_address,
-		xcRB->request_data_length))
+	    z_copy_from_user(userspace, req_data, xcRB->request_data_address,
+			     xcRB->request_data_length))
 		return -EFAULT;
 
 	return 0;
 }
 
-static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
-				       struct ep11_urb *xcRB,
-				       unsigned int *fcode)
+static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap_msg,
+					   struct ep11_urb *xcRB,
+					   unsigned int *fcode)
 {
 	unsigned int lfmt;
 	static struct type6_hdr static_type6_ep11_hdr = {
@@ -512,7 +515,7 @@
 		struct ep11_cprb cprbx;
 		unsigned char	pld_tag;	/* fixed value 0x30 */
 		unsigned char	pld_lenfmt;	/* payload length format */
-	} __packed * msg = ap_msg->message;
+	} __packed * msg = ap_msg->msg;
 
 	struct pld_hdr {
 		unsigned char	func_tag;	/* fixed value 0x4 */
@@ -527,7 +530,7 @@
 		return -EINVAL; /* overflow after alignment*/
 
 	/* length checks */
-	ap_msg->length = sizeof(struct type6_hdr) + xcRB->req_len;
+	ap_msg->len = sizeof(struct type6_hdr) + xcRB->req_len;
 	if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE -
 				   (sizeof(struct type6_hdr)))
 		return -EINVAL;
@@ -545,8 +548,8 @@
 	msg->hdr.FromCardLen1 = xcRB->resp_len;
 
 	/* Import CPRB data from the ioctl input parameter */
-	if (copy_from_user(&(msg->cprbx.cprb_len),
-			   (char __force __user *)xcRB->req, xcRB->req_len)) {
+	if (z_copy_from_user(userspace, &(msg->cprbx.cprb_len),
+			     (char __force __user *)xcRB->req, xcRB->req_len)) {
 		return -EFAULT;
 	}
 
@@ -569,7 +572,12 @@
 
 	/* enable special processing based on the cprbs flags special bit */
 	if (msg->cprbx.flags & 0x20)
-		ap_msg->special = 1;
+		ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
+		ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
+#endif
 
 	return 0;
 }
@@ -590,7 +598,7 @@
 	struct CPRBX cprbx;
 	unsigned char pad[4];	/* 4 byte function code/rules block ? */
 	unsigned short length;
-	char text[0];
+	char text[];
 } __packed;
 
 struct type86_ep11_reply {
@@ -639,7 +647,7 @@
 		0x35, 0x9D, 0xD3, 0xD3, 0xA7, 0x9D, 0x5D, 0x41,
 		0x6F, 0x65, 0x1B, 0xCF, 0xA9, 0x87, 0x91, 0x09
 	};
-	struct type86x_reply *msg = reply->message;
+	struct type86x_reply *msg = reply->msg;
 	unsigned short service_rc, service_rs;
 	unsigned int reply_len, pad_len;
 	char *data;
@@ -652,23 +660,22 @@
 		    (service_rc == 8 && service_rs == 72) ||
 		    (service_rc == 8 && service_rs == 770) ||
 		    (service_rc == 12 && service_rs == 769)) {
-			ZCRYPT_DBF(DBF_DEBUG,
-				   "device=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
-				   AP_QID_CARD(zq->queue->qid),
-				   AP_QID_QUEUE(zq->queue->qid),
-				   (int) service_rc, (int) service_rs);
+			ZCRYPT_DBF_WARN("dev=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
+					AP_QID_CARD(zq->queue->qid),
+					AP_QID_QUEUE(zq->queue->qid),
+					(int) service_rc, (int) service_rs);
 			return -EINVAL;
 		}
 		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		pr_err("Crypto dev=%02x.%04x rc/rs=%d/%d online=0 rc=EAGAIN\n",
 		       AP_QID_CARD(zq->queue->qid),
-		       AP_QID_QUEUE(zq->queue->qid));
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
-			   AP_QID_CARD(zq->queue->qid),
-			   AP_QID_QUEUE(zq->queue->qid),
-			   (int) service_rc, (int) service_rs);
-		return -EAGAIN;	/* repeat the request on a different device. */
+		       AP_QID_QUEUE(zq->queue->qid),
+		       (int) service_rc, (int) service_rs);
+		ZCRYPT_DBF_ERR("dev=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
+			       AP_QID_CARD(zq->queue->qid),
+			       AP_QID_QUEUE(zq->queue->qid),
+			       (int) service_rc, (int) service_rs);
+		return -EAGAIN;
 	}
 	data = msg->text;
 	reply_len = msg->length - 2;
@@ -709,23 +716,23 @@
  *
  * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
  */
-static int convert_type86_xcrb(struct zcrypt_queue *zq,
+static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
 			       struct ap_message *reply,
 			       struct ica_xcRB *xcRB)
 {
-	struct type86_fmt2_msg *msg = reply->message;
-	char *data = reply->message;
+	struct type86_fmt2_msg *msg = reply->msg;
+	char *data = reply->msg;
 
 	/* Copy CPRB to user */
-	if (copy_to_user(xcRB->reply_control_blk_addr,
-		data + msg->fmt2.offset1, msg->fmt2.count1))
+	if (z_copy_to_user(userspace, xcRB->reply_control_blk_addr,
+			   data + msg->fmt2.offset1, msg->fmt2.count1))
 		return -EFAULT;
 	xcRB->reply_control_blk_length = msg->fmt2.count1;
 
 	/* Copy data buffer to user */
 	if (msg->fmt2.count2)
-		if (copy_to_user(xcRB->reply_data_addr,
-			data + msg->fmt2.offset2, msg->fmt2.count2))
+		if (z_copy_to_user(userspace, xcRB->reply_data_addr,
+				   data + msg->fmt2.offset2, msg->fmt2.count2))
 			return -EFAULT;
 	xcRB->reply_data_length = msg->fmt2.count2;
 	return 0;
@@ -740,19 +747,19 @@
  *
  * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
  */
-static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq,
+static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
 				    struct ap_message *reply,
 				    struct ep11_urb *xcRB)
 {
-	struct type86_fmt2_msg *msg = reply->message;
-	char *data = reply->message;
+	struct type86_fmt2_msg *msg = reply->msg;
+	char *data = reply->msg;
 
 	if (xcRB->resp_len < msg->fmt2.count1)
 		return -EINVAL;
 
 	/* Copy response CPRB to user */
-	if (copy_to_user((char __force __user *)xcRB->resp,
-			 data + msg->fmt2.offset1, msg->fmt2.count1))
+	if (z_copy_to_user(userspace, (char __force __user *)xcRB->resp,
+			   data + msg->fmt2.offset1, msg->fmt2.count1))
 		return -EFAULT;
 	xcRB->resp_len = msg->fmt2.count1;
 	return 0;
@@ -766,8 +773,8 @@
 		struct type86_hdr hdr;
 		struct type86_fmt2_ext fmt2;
 		struct CPRBX cprbx;
-	} __packed * msg = reply->message;
-	char *data = reply->message;
+	} __packed * msg = reply->msg;
+	char *data = reply->msg;
 
 	if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
 		return -EINVAL;
@@ -780,7 +787,7 @@
 			    char __user *outputdata,
 			    unsigned int outputdatalength)
 {
-	struct type86x_reply *msg = reply->message;
+	struct type86x_reply *msg = reply->msg;
 
 	switch (msg->hdr.type) {
 	case TYPE82_RSP_CODE:
@@ -801,26 +808,27 @@
 		if (msg->cprbx.cprb_ver_id == 0x02)
 			return convert_type86_ica(zq, reply,
 						  outputdata, outputdatalength);
-		/* fall through - wrong cprb version is an unknown response */
-	default: /* Unknown response type, this should NEVER EVER happen */
+		fallthrough;	/* wrong cprb version is an unknown response */
+	default:
+		/* Unknown response type, this should NEVER EVER happen */
 		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
 		       AP_QID_CARD(zq->queue->qid),
-		       AP_QID_QUEUE(zq->queue->qid));
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
-			   AP_QID_CARD(zq->queue->qid),
-			   AP_QID_QUEUE(zq->queue->qid),
-			   (int) msg->hdr.type);
-		return -EAGAIN;	/* repeat the request on a different device. */
+		       AP_QID_QUEUE(zq->queue->qid),
+		       (int) msg->hdr.type);
+		ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+			       AP_QID_CARD(zq->queue->qid),
+			       AP_QID_QUEUE(zq->queue->qid),
+			       (int) msg->hdr.type);
+		return -EAGAIN;
 	}
 }
 
-static int convert_response_xcrb(struct zcrypt_queue *zq,
-			    struct ap_message *reply,
-			    struct ica_xcRB *xcRB)
+static int convert_response_xcrb(bool userspace, struct zcrypt_queue *zq,
+				 struct ap_message *reply,
+				 struct ica_xcRB *xcRB)
 {
-	struct type86x_reply *msg = reply->message;
+	struct type86x_reply *msg = reply->msg;
 
 	switch (msg->hdr.type) {
 	case TYPE82_RSP_CODE:
@@ -833,27 +841,27 @@
 			return convert_error(zq, reply);
 		}
 		if (msg->cprbx.cprb_ver_id == 0x02)
-			return convert_type86_xcrb(zq, reply, xcRB);
-		/* fall through - wrong cprb version is an unknown response */
+			return convert_type86_xcrb(userspace, zq, reply, xcRB);
+		fallthrough;	/* wrong cprb version is an unknown response */
 	default: /* Unknown response type, this should NEVER EVER happen */
 		xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
 		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
 		       AP_QID_CARD(zq->queue->qid),
-		       AP_QID_QUEUE(zq->queue->qid));
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
-			   AP_QID_CARD(zq->queue->qid),
-			   AP_QID_QUEUE(zq->queue->qid),
-			   (int) msg->hdr.type);
-		return -EAGAIN;	/* repeat the request on a different device. */
+		       AP_QID_QUEUE(zq->queue->qid),
+		       (int) msg->hdr.type);
+		ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+			       AP_QID_CARD(zq->queue->qid),
+			       AP_QID_QUEUE(zq->queue->qid),
+			       (int) msg->hdr.type);
+		return -EAGAIN;
 	}
 }
 
-static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
-	struct ap_message *reply, struct ep11_urb *xcRB)
+static int convert_response_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
+				      struct ap_message *reply, struct ep11_urb *xcRB)
 {
-	struct type86_ep11_reply *msg = reply->message;
+	struct type86_ep11_reply *msg = reply->msg;
 
 	switch (msg->hdr.type) {
 	case TYPE82_RSP_CODE:
@@ -863,19 +871,19 @@
 		if (msg->hdr.reply_code)
 			return convert_error(zq, reply);
 		if (msg->cprbx.cprb_ver_id == 0x04)
-			return convert_type86_ep11_xcrb(zq, reply, xcRB);
-		/* fall through - wrong cprb version is an unknown resp */
+			return convert_type86_ep11_xcrb(userspace, zq, reply, xcRB);
+		fallthrough;	/* wrong cprb version is an unknown resp */
 	default: /* Unknown response type, this should NEVER EVER happen */
 		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
 		       AP_QID_CARD(zq->queue->qid),
-		       AP_QID_QUEUE(zq->queue->qid));
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
-			   AP_QID_CARD(zq->queue->qid),
-			   AP_QID_QUEUE(zq->queue->qid),
-			   (int) msg->hdr.type);
-		return -EAGAIN; /* repeat the request on a different device. */
+		       AP_QID_QUEUE(zq->queue->qid),
+		       (int) msg->hdr.type);
+		ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+			       AP_QID_CARD(zq->queue->qid),
+			       AP_QID_QUEUE(zq->queue->qid),
+			       (int) msg->hdr.type);
+		return -EAGAIN;
 	}
 }
 
@@ -883,7 +891,7 @@
 				 struct ap_message *reply,
 				 char *data)
 {
-	struct type86x_reply *msg = reply->message;
+	struct type86x_reply *msg = reply->msg;
 
 	switch (msg->hdr.type) {
 	case TYPE82_RSP_CODE:
@@ -894,18 +902,18 @@
 			return -EINVAL;
 		if (msg->cprbx.cprb_ver_id == 0x02)
 			return convert_type86_rng(zq, reply, data);
-		/* fall through - wrong cprb version is an unknown response */
+		fallthrough;	/* wrong cprb version is an unknown response */
 	default: /* Unknown response type, this should NEVER EVER happen */
 		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
 		       AP_QID_CARD(zq->queue->qid),
-		       AP_QID_QUEUE(zq->queue->qid));
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
-			   AP_QID_CARD(zq->queue->qid),
-			   AP_QID_QUEUE(zq->queue->qid),
-			   (int) msg->hdr.type);
-		return -EAGAIN;	/* repeat the request on a different device. */
+		       AP_QID_QUEUE(zq->queue->qid),
+		       (int) msg->hdr.type);
+		ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+			       AP_QID_CARD(zq->queue->qid),
+			       AP_QID_QUEUE(zq->queue->qid),
+			       (int) msg->hdr.type);
+		return -EAGAIN;
 	}
 }
 
@@ -928,32 +936,30 @@
 	struct response_type *resp_type =
 		(struct response_type *) msg->private;
 	struct type86x_reply *t86r;
-	int length;
+	int len;
 
 	/* Copy the reply message to the request message buffer. */
 	if (!reply)
 		goto out;	/* ap_msg->rc indicates the error */
-	t86r = reply->message;
+	t86r = reply->msg;
 	if (t86r->hdr.type == TYPE86_RSP_CODE &&
 		 t86r->cprbx.cprb_ver_id == 0x02) {
 		switch (resp_type->type) {
 		case CEXXC_RESPONSE_TYPE_ICA:
-			length = sizeof(struct type86x_reply)
-				+ t86r->length - 2;
-			length = min(CEXXC_MAX_ICA_RESPONSE_SIZE, length);
-			memcpy(msg->message, reply->message, length);
+			len = sizeof(struct type86x_reply) + t86r->length - 2;
+			len = min_t(int, CEXXC_MAX_ICA_RESPONSE_SIZE, len);
+			memcpy(msg->msg, reply->msg, len);
 			break;
 		case CEXXC_RESPONSE_TYPE_XCRB:
-			length = t86r->fmt2.offset2 + t86r->fmt2.count2;
-			length = min(MSGTYPE06_MAX_MSG_SIZE, length);
-			memcpy(msg->message, reply->message, length);
+			len = t86r->fmt2.offset2 + t86r->fmt2.count2;
+			len = min_t(int, MSGTYPE06_MAX_MSG_SIZE, len);
+			memcpy(msg->msg, reply->msg, len);
 			break;
 		default:
-			memcpy(msg->message, &error_reply,
-			       sizeof(error_reply));
+			memcpy(msg->msg, &error_reply, sizeof(error_reply));
 		}
 	} else
-		memcpy(msg->message, reply->message, sizeof(error_reply));
+		memcpy(msg->msg, reply->msg, sizeof(error_reply));
 out:
 	complete(&(resp_type->work));
 }
@@ -977,25 +983,25 @@
 	struct response_type *resp_type =
 		(struct response_type *)msg->private;
 	struct type86_ep11_reply *t86r;
-	int length;
+	int len;
 
 	/* Copy the reply message to the request message buffer. */
 	if (!reply)
 		goto out;	/* ap_msg->rc indicates the error */
-	t86r = reply->message;
+	t86r = reply->msg;
 	if (t86r->hdr.type == TYPE86_RSP_CODE &&
 	    t86r->cprbx.cprb_ver_id == 0x04) {
 		switch (resp_type->type) {
 		case CEXXC_RESPONSE_TYPE_EP11:
-			length = t86r->fmt2.offset1 + t86r->fmt2.count1;
-			length = min(MSGTYPE06_MAX_MSG_SIZE, length);
-			memcpy(msg->message, reply->message, length);
+			len = t86r->fmt2.offset1 + t86r->fmt2.count1;
+			len = min_t(int, MSGTYPE06_MAX_MSG_SIZE, len);
+			memcpy(msg->msg, reply->msg, len);
 			break;
 		default:
-			memcpy(msg->message, &error_reply, sizeof(error_reply));
+			memcpy(msg->msg, &error_reply, sizeof(error_reply));
 		}
 	} else {
-		memcpy(msg->message, reply->message, sizeof(error_reply));
+		memcpy(msg->msg, reply->msg, sizeof(error_reply));
 	}
 out:
 	complete(&(resp_type->work));
@@ -1011,39 +1017,42 @@
  * @mex: pointer to the modexpo request buffer
  */
 static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
-				  struct ica_rsa_modexpo *mex)
+				    struct ica_rsa_modexpo *mex,
+				    struct ap_message *ap_msg)
 {
-	struct ap_message ap_msg;
 	struct response_type resp_type = {
 		.type = CEXXC_RESPONSE_TYPE_ICA,
 	};
 	int rc;
 
-	ap_init_message(&ap_msg);
-	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
-	if (!ap_msg.message)
+	ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!ap_msg->msg)
 		return -ENOMEM;
-	ap_msg.receive = zcrypt_msgtype6_receive;
-	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
-				atomic_inc_return(&zcrypt_step);
-	ap_msg.private = &resp_type;
-	rc = ICAMEX_msg_to_type6MEX_msgX(zq, &ap_msg, mex);
+	ap_msg->receive = zcrypt_msgtype6_receive;
+	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+		atomic_inc_return(&zcrypt_step);
+	ap_msg->private = &resp_type;
+	rc = ICAMEX_msg_to_type6MEX_msgX(zq, ap_msg, mex);
 	if (rc)
 		goto out_free;
 	init_completion(&resp_type.work);
-	ap_queue_message(zq->queue, &ap_msg);
+	rc = ap_queue_message(zq->queue, ap_msg);
+	if (rc)
+		goto out_free;
 	rc = wait_for_completion_interruptible(&resp_type.work);
 	if (rc == 0) {
-		rc = ap_msg.rc;
+		rc = ap_msg->rc;
 		if (rc == 0)
-			rc = convert_response_ica(zq, &ap_msg,
+			rc = convert_response_ica(zq, ap_msg,
 						  mex->outputdata,
 						  mex->outputdatalength);
 	} else
 		/* Signal pending. */
-		ap_cancel_message(zq->queue, &ap_msg);
+		ap_cancel_message(zq->queue, ap_msg);
 out_free:
-	free_page((unsigned long) ap_msg.message);
+	free_page((unsigned long) ap_msg->msg);
+	ap_msg->private = NULL;
+	ap_msg->msg = NULL;
 	return rc;
 }
 
@@ -1055,40 +1064,43 @@
  * @crt: pointer to the modexpoc_crt request buffer
  */
 static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
-				      struct ica_rsa_modexpo_crt *crt)
+					struct ica_rsa_modexpo_crt *crt,
+					struct ap_message *ap_msg)
 {
-	struct ap_message ap_msg;
 	struct response_type resp_type = {
 		.type = CEXXC_RESPONSE_TYPE_ICA,
 	};
 	int rc;
 
-	ap_init_message(&ap_msg);
-	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
-	if (!ap_msg.message)
+	ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!ap_msg->msg)
 		return -ENOMEM;
-	ap_msg.receive = zcrypt_msgtype6_receive;
-	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
-				atomic_inc_return(&zcrypt_step);
-	ap_msg.private = &resp_type;
-	rc = ICACRT_msg_to_type6CRT_msgX(zq, &ap_msg, crt);
+	ap_msg->receive = zcrypt_msgtype6_receive;
+	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+		atomic_inc_return(&zcrypt_step);
+	ap_msg->private = &resp_type;
+	rc = ICACRT_msg_to_type6CRT_msgX(zq, ap_msg, crt);
 	if (rc)
 		goto out_free;
 	init_completion(&resp_type.work);
-	ap_queue_message(zq->queue, &ap_msg);
+	rc = ap_queue_message(zq->queue, ap_msg);
+	if (rc)
+		goto out_free;
 	rc = wait_for_completion_interruptible(&resp_type.work);
 	if (rc == 0) {
-		rc = ap_msg.rc;
+		rc = ap_msg->rc;
 		if (rc == 0)
-			rc = convert_response_ica(zq, &ap_msg,
+			rc = convert_response_ica(zq, ap_msg,
 						  crt->outputdata,
 						  crt->outputdatalength);
 	} else {
 		/* Signal pending. */
-		ap_cancel_message(zq->queue, &ap_msg);
+		ap_cancel_message(zq->queue, ap_msg);
 	}
 out_free:
-	free_page((unsigned long) ap_msg.message);
+	free_page((unsigned long) ap_msg->msg);
+	ap_msg->private = NULL;
+	ap_msg->msg = NULL;
 	return rc;
 }
 
@@ -1099,16 +1111,16 @@
  * by the caller with ap_init_message(). Also the caller has to
  * make sure ap_release_message() is always called even on failure.
  */
-unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
-				struct ap_message *ap_msg,
-				unsigned int *func_code, unsigned short **dom)
+unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *xcRB,
+			 struct ap_message *ap_msg,
+			 unsigned int *func_code, unsigned short **dom)
 {
 	struct response_type resp_type = {
 		.type = CEXXC_RESPONSE_TYPE_XCRB,
 	};
 
-	ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
-	if (!ap_msg->message)
+	ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+	if (!ap_msg->msg)
 		return -ENOMEM;
 	ap_msg->receive = zcrypt_msgtype6_receive;
 	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
@@ -1116,7 +1128,7 @@
 	ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
 	if (!ap_msg->private)
 		return -ENOMEM;
-	return XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom);
+	return XCRB_msg_to_type6CPRB_msgX(userspace, ap_msg, xcRB, func_code, dom);
 }
 
 /**
@@ -1126,24 +1138,26 @@
  *	CEXxC device to the request distributor
  * @xcRB: pointer to the send_cprb request buffer
  */
-static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq,
-				    struct ica_xcRB *xcRB,
-				    struct ap_message *ap_msg)
+static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
+				      struct ica_xcRB *xcRB,
+				      struct ap_message *ap_msg)
 {
 	int rc;
 	struct response_type *rtype = (struct response_type *)(ap_msg->private);
 
 	init_completion(&rtype->work);
-	ap_queue_message(zq->queue, ap_msg);
+	rc = ap_queue_message(zq->queue, ap_msg);
+	if (rc)
+		goto out;
 	rc = wait_for_completion_interruptible(&rtype->work);
 	if (rc == 0) {
 		rc = ap_msg->rc;
 		if (rc == 0)
-			rc = convert_response_xcrb(zq, ap_msg, xcRB);
+			rc = convert_response_xcrb(userspace, zq, ap_msg, xcRB);
 	} else
 		/* Signal pending. */
 		ap_cancel_message(zq->queue, ap_msg);
-
+out:
 	return rc;
 }
 
@@ -1154,16 +1168,16 @@
  * by the caller with ap_init_message(). Also the caller has to
  * make sure ap_release_message() is always called even on failure.
  */
-unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
-				    struct ap_message *ap_msg,
-				    unsigned int *func_code)
+unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *xcrb,
+			     struct ap_message *ap_msg,
+			     unsigned int *func_code)
 {
 	struct response_type resp_type = {
 		.type = CEXXC_RESPONSE_TYPE_EP11,
 	};
 
-	ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
-	if (!ap_msg->message)
+	ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+	if (!ap_msg->msg)
 		return -ENOMEM;
 	ap_msg->receive = zcrypt_msgtype6_receive_ep11;
 	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
@@ -1171,7 +1185,7 @@
 	ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
 	if (!ap_msg->private)
 		return -ENOMEM;
-	return xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code);
+	return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb, func_code);
 }
 
 /**
@@ -1181,7 +1195,7 @@
  *	  CEX4P device to the request distributor
  * @xcRB: pointer to the ep11 user request block
  */
-static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq,
+static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *zq,
 					   struct ep11_urb *xcrb,
 					   struct ap_message *ap_msg)
 {
@@ -1193,7 +1207,7 @@
 		struct ep11_cprb cprbx;
 		unsigned char	pld_tag;	/* fixed value 0x30 */
 		unsigned char	pld_lenfmt;	/* payload length format */
-	} __packed * msg = ap_msg->message;
+	} __packed * msg = ap_msg->msg;
 	struct pld_hdr {
 		unsigned char	func_tag;	/* fixed value 0x4 */
 		unsigned char	func_len;	/* fixed value 0x4 */
@@ -1236,16 +1250,18 @@
 	}
 
 	init_completion(&rtype->work);
-	ap_queue_message(zq->queue, ap_msg);
+	rc = ap_queue_message(zq->queue, ap_msg);
+	if (rc)
+		goto out;
 	rc = wait_for_completion_interruptible(&rtype->work);
 	if (rc == 0) {
 		rc = ap_msg->rc;
 		if (rc == 0)
-			rc = convert_response_ep11_xcrb(zq, ap_msg, xcrb);
+			rc = convert_response_ep11_xcrb(userspace, zq, ap_msg, xcrb);
 	} else
 		/* Signal pending. */
 		ap_cancel_message(zq->queue, ap_msg);
-
+out:
 	return rc;
 }
 
@@ -1256,8 +1272,8 @@
 		.type = CEXXC_RESPONSE_TYPE_XCRB,
 	};
 
-	ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
-	if (!ap_msg->message)
+	ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+	if (!ap_msg->msg)
 		return -ENOMEM;
 	ap_msg->receive = zcrypt_msgtype6_receive;
 	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
@@ -1290,14 +1306,16 @@
 		char rule[8];
 		short int verb_length;
 		short int key_length;
-	} __packed * msg = ap_msg->message;
+	} __packed * msg = ap_msg->msg;
 	struct response_type *rtype = (struct response_type *)(ap_msg->private);
 	int rc;
 
 	msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
 
 	init_completion(&rtype->work);
-	ap_queue_message(zq->queue, ap_msg);
+	rc = ap_queue_message(zq->queue, ap_msg);
+	if (rc)
+		goto out;
 	rc = wait_for_completion_interruptible(&rtype->work);
 	if (rc == 0) {
 		rc = ap_msg->rc;
@@ -1306,7 +1324,7 @@
 	} else
 		/* Signal pending. */
 		ap_cancel_message(zq->queue, ap_msg);
-
+out:
 	return rc;
 }
 
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index 41a0df5..0a0bf07 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -96,9 +96,9 @@
 	unsigned int	  offset4;	/* 0x00000000			*/
 } __packed;
 
-unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *,
+unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *, struct ap_message *,
 			 unsigned int *, unsigned short **);
-unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *,
+unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *, struct ap_message *,
 			     unsigned int *);
 unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *);
 
@@ -127,7 +127,7 @@
 		char rule[8];
 		short int verb_length;
 		short int key_length;
-	} __packed * msg = ap_msg->message;
+	} __packed * msg = ap_msg->msg;
 	static struct type6_hdr static_type6_hdrX = {
 		.type		= 0x06,
 		.offset1	= 0x00000058,
@@ -154,7 +154,7 @@
 	memcpy(msg->rule, "RANDOM  ", 8);
 	msg->verb_length = 0x02;
 	msg->key_length = 0x02;
-	ap_msg->length = sizeof(*msg);
+	ap_msg->len = sizeof(*msg);
 	*domain = (unsigned short)msg->cprbx.domain;
 }
 
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index 522c4bc..c3ffbd2 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -40,22 +40,27 @@
 			   struct device_attribute *attr,
 			   char *buf)
 {
-	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+	struct ap_queue *aq = to_ap_queue(dev);
+	struct zcrypt_queue *zq = aq->private;
+	int online = aq->config && zq->online ? 1 : 0;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", zq->online);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", online);
 }
 
 static ssize_t online_store(struct device *dev,
 			    struct device_attribute *attr,
 			    const char *buf, size_t count)
 {
-	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+	struct ap_queue *aq = to_ap_queue(dev);
+	struct zcrypt_queue *zq = aq->private;
 	struct zcrypt_card *zc = zq->zcard;
 	int online;
 
 	if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
 		return -EINVAL;
 
+	if (online && (!aq->config || !aq->card->config))
+		return -ENODEV;
 	if (online && !zc->online)
 		return -EINVAL;
 	zq->online = online;
@@ -78,7 +83,7 @@
 {
 	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
+	return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
 }
 
 static DEVICE_ATTR_RO(load);
@@ -107,10 +112,10 @@
 	zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL);
 	if (!zq)
 		return NULL;
-	zq->reply.message = kmalloc(max_response_size, GFP_KERNEL);
-	if (!zq->reply.message)
+	zq->reply.msg = kmalloc(max_response_size, GFP_KERNEL);
+	if (!zq->reply.msg)
 		goto out_free;
-	zq->reply.length = max_response_size;
+	zq->reply.len = max_response_size;
 	INIT_LIST_HEAD(&zq->list);
 	kref_init(&zq->refcount);
 	return zq;
@@ -123,7 +128,7 @@
 
 void zcrypt_queue_free(struct zcrypt_queue *zq)
 {
-	kfree(zq->reply.message);
+	kfree(zq->reply.msg);
 	kfree(zq);
 }
 EXPORT_SYMBOL(zcrypt_queue_free);
@@ -175,7 +180,6 @@
 				&zcrypt_queue_attr_group);
 	if (rc)
 		goto out;
-	get_device(&zq->queue->ap_dev.device);
 
 	if (zq->ops->rng) {
 		rc = zcrypt_rng_device_add();
@@ -187,7 +191,6 @@
 out_unregister:
 	sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
 			   &zcrypt_queue_attr_group);
-	put_device(&zq->queue->ap_dev.device);
 out:
 	spin_lock(&zcrypt_list_lock);
 	list_del_init(&zq->list);
@@ -215,12 +218,11 @@
 	list_del_init(&zq->list);
 	zcrypt_device_count--;
 	spin_unlock(&zcrypt_list_lock);
-	zcrypt_card_put(zc);
 	if (zq->ops->rng)
 		zcrypt_rng_device_remove();
 	sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
 			   &zcrypt_queue_attr_group);
-	put_device(&zq->queue->ap_dev.device);
+	zcrypt_card_put(zc);
 	zcrypt_queue_put(zq);
 }
 EXPORT_SYMBOL(zcrypt_queue_unregister);
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index ced896d..bf236d4 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -63,12 +63,9 @@
 	prompt "Gigabit Ethernet device support"
 	depends on CCW && NETDEVICES && IP_MULTICAST && QDIO && ETHERNET
 	help
-	  This driver supports the IBM System z OSA Express adapters
-	  in QDIO mode (all media types), HiperSockets interfaces and z/VM
-	  virtual NICs for Guest LAN and VSWITCH.
-	
-	  For details please refer to the documentation provided by IBM at
-	  <http://www.ibm.com/developerworks/linux/linux390>
+	  This driver supports IBM's OSA Express network adapters in QDIO mode,
+	  HiperSockets interfaces and z/VM virtual NICs for Guest LAN and
+	  VSWITCH.
 
 	  To compile this driver as a module, choose M.
 	  The module name is qeth.
@@ -91,9 +88,26 @@
 	  To compile as a module choose M. The module name is qeth_l3.
 	  If unsure, choose Y.
 
+config QETH_OSN
+	def_bool !HAVE_MARCH_Z14_FEATURES
+	prompt "qeth OSN device support"
+	depends on QETH
+	help
+	  This enables the qeth driver to support devices in OSN mode.
+	  This feature will be removed in 2021.
+	  If unsure, choose N.
+
+config QETH_OSX
+	def_bool !HAVE_MARCH_Z15_FEATURES
+	prompt "qeth OSX device support"
+	depends on QETH
+	help
+	  This enables the qeth driver to support devices in OSX mode.
+	  If unsure, choose N.
+
 config CCWGROUP
 	tristate
-	default (LCS || CTCM || QETH)
+	default (LCS || CTCM || QETH || SMC)
 
 config ISM
 	tristate "Support for ISM vPCI Adapter"
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 3ce99e4..661d2a4 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1695,7 +1695,7 @@
 			grp->changed_side = 2;
 			break;
 		}
-		/* Else, fall through */
+		fallthrough;
 	case MPCG_STATE_XID0IOWAIX:
 	case MPCG_STATE_XID7INITW:
 	case MPCG_STATE_XID7INITX:
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
index 2257372..d98c486 100644
--- a/drivers/s390/net/ctcm_fsms.h
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -159,7 +159,6 @@
 
 void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg);
 void ctcm_purge_skb_queue(struct sk_buff_head *q);
-void fsm_action_nop(fsm_instance *fi, int event, void *arg);
 
 /*
  * ----- non-static actions for ctcm channel statemachine -----
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 437a6d8..d06809e 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1698,43 +1698,6 @@
 	put_device(&cgdev->dev);
 }
 
-static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
-{
-	struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
-
-	if (gdev->state == CCWGROUP_OFFLINE)
-		return 0;
-	netif_device_detach(priv->channel[CTCM_READ]->netdev);
-	ctcm_close(priv->channel[CTCM_READ]->netdev);
-	if (!wait_event_timeout(priv->fsm->wait_q,
-	    fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
-		netif_device_attach(priv->channel[CTCM_READ]->netdev);
-		return -EBUSY;
-	}
-	ccw_device_set_offline(gdev->cdev[1]);
-	ccw_device_set_offline(gdev->cdev[0]);
-	return 0;
-}
-
-static int ctcm_pm_resume(struct ccwgroup_device *gdev)
-{
-	struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
-	int rc;
-
-	if (gdev->state == CCWGROUP_OFFLINE)
-		return 0;
-	rc = ccw_device_set_online(gdev->cdev[1]);
-	if (rc)
-		goto err_out;
-	rc = ccw_device_set_online(gdev->cdev[0]);
-	if (rc)
-		goto err_out;
-	ctcm_open(priv->channel[CTCM_READ]->netdev);
-err_out:
-	netif_device_attach(priv->channel[CTCM_READ]->netdev);
-	return rc;
-}
-
 static struct ccw_device_id ctcm_ids[] = {
 	{CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel},
 	{CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon},
@@ -1764,9 +1727,6 @@
 	.remove      = ctcm_remove_device,
 	.set_online  = ctcm_new_device,
 	.set_offline = ctcm_shutdown_device,
-	.freeze	     = ctcm_pm_suspend,
-	.thaw	     = ctcm_pm_resume,
-	.restore     = ctcm_pm_resume,
 };
 
 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index ab316ba..85a1a45 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -357,7 +357,7 @@
 		/*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/
 		if (callback)
 			grp->send_qllc_disc = 1;
-		/* Else, fall through */
+		fallthrough;
 	case MPCG_STATE_XID0IOWAIT:
 		fsm_deltimer(&grp->timer);
 		grp->outstanding_xid2 = 0;
@@ -1470,7 +1470,7 @@
 		if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) &&
 		   (fsm_getstate(wch->fsm) == CH_XID0_PENDING))
 			break;
-		/* Else, fall through */
+		fallthrough;
 	default:
 		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
 	}
@@ -2089,7 +2089,7 @@
 			grp->estconnfunc = NULL;
 			break;
 		}
-		/* Else, fall through */
+		fallthrough;
 	case MPCG_STATE_FLOWC:
 	case MPCG_STATE_READY:
 		grp->send_qllc_disc = 2;
diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h
index 441d7b2..da41b26 100644
--- a/drivers/s390/net/ctcm_mpc.h
+++ b/drivers/s390/net/ctcm_mpc.h
@@ -228,7 +228,6 @@
 		ctcmpc_dumpit(buf, 32);
 }
 
-int ctcmpc_open(struct net_device *);
 void ctcm_ccw_check_rc(struct channel *, int, char *);
 void mpc_group_ready(unsigned long adev);
 void mpc_channel_action(struct channel *ch, int direction, int action);
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
index 66eac2b..38fe90c 100644
--- a/drivers/s390/net/ism.h
+++ b/drivers/s390/net/ism.h
@@ -16,6 +16,7 @@
 #define ISM_DMB_WORD_OFFSET	1
 #define ISM_DMB_BIT_OFFSET	(ISM_DMB_WORD_OFFSET * 32)
 #define ISM_NR_DMBS		1920
+#define ISM_IDENT_MASK		0x00FFFF
 
 #define ISM_REG_SBA	0x1
 #define ISM_REG_IEQ	0x2
@@ -32,8 +33,6 @@
 #define ISM_UNREG_SBA	0x11
 #define ISM_UNREG_IEQ	0x12
 
-#define ISM_ERROR	0xFFFF
-
 struct ism_req_hdr {
 	u32 cmd;
 	u16 : 16;
@@ -208,6 +207,12 @@
 #define ISM_CREATE_REQ(dmb, idx, sf, offset)		\
 	((dmb) | (idx) << 24 | (sf) << 23 | (offset))
 
+struct ism_systemeid {
+	u8	seid_string[24];
+	u8	serial_number[4];
+	u8	type[4];
+};
+
 static inline void __ism_read_cmd(struct ism_dev *ism, void *data,
 				  unsigned long offset, unsigned long len)
 {
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index e615dc2..26cc943 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -13,6 +13,8 @@
 #include <linux/device.h>
 #include <linux/pci.h>
 #include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/processor.h>
 #include <net/smc.h>
 
 #include <asm/debug.h>
@@ -231,7 +233,7 @@
 		bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
 					 ISM_DMB_BIT_OFFSET);
 		if (bit == ISM_NR_DMBS)
-			return -ENOMEM;
+			return -ENOSPC;
 
 		dmb->sba_idx = bit;
 	}
@@ -387,6 +389,42 @@
 	return 0;
 }
 
+static struct ism_systemeid SYSTEM_EID = {
+	.seid_string = "IBM-SYSZ-ISMSEID00000000",
+	.serial_number = "0000",
+	.type = "0000",
+};
+
+static void ism_create_system_eid(void)
+{
+	struct cpuid id;
+	u16 ident_tail;
+	char tmp[5];
+
+	get_cpu_id(&id);
+	ident_tail = (u16)(id.ident & ISM_IDENT_MASK);
+	snprintf(tmp, 5, "%04X", ident_tail);
+	memcpy(&SYSTEM_EID.serial_number, tmp, 4);
+	snprintf(tmp, 5, "%04X", id.machine);
+	memcpy(&SYSTEM_EID.type, tmp, 4);
+}
+
+static void ism_get_system_eid(struct smcd_dev *smcd, u8 **eid)
+{
+	*eid = &SYSTEM_EID.seid_string[0];
+}
+
+static u16 ism_get_chid(struct smcd_dev *smcd)
+{
+	struct ism_dev *ismdev;
+
+	ismdev = (struct ism_dev *)smcd->priv;
+	if (!ismdev || !ismdev->pdev)
+		return 0;
+
+	return to_zpci(ismdev->pdev)->pchid;
+}
+
 static void ism_handle_event(struct ism_dev *ism)
 {
 	struct smcd_event *entry;
@@ -443,6 +481,8 @@
 	.reset_vlan_required = ism_reset_vlan_required,
 	.signal_event = ism_signal_ieq,
 	.move_data = ism_move,
+	.get_system_eid = ism_get_system_eid,
+	.get_chid = ism_get_chid,
 };
 
 static int ism_dev_init(struct ism_dev *ism)
@@ -471,6 +511,10 @@
 	if (ret)
 		goto unreg_ieq;
 
+	if (!ism_add_vlan_id(ism->smcd, ISM_RESERVED_VLANID))
+		/* hardware is V2 capable */
+		ism_create_system_eid();
+
 	ret = smcd_register_dev(ism->smcd);
 	if (ret)
 		goto unreg_ieq;
@@ -550,6 +594,9 @@
 	struct pci_dev *pdev = ism->pdev;
 
 	smcd_unregister_dev(ism->smcd);
+	if (SYSTEM_EID.serial_number[0] != '0' ||
+	    SYSTEM_EID.type[0] != '0')
+		ism_del_vlan_id(ism->smcd, ISM_RESERVED_VLANID);
 	unregister_ieq(ism);
 	unregister_sba(ism);
 	free_irq(pci_irq_vector(pdev, 0), ism);
@@ -569,31 +616,11 @@
 	kfree(ism);
 }
 
-static int ism_suspend(struct device *dev)
-{
-	struct ism_dev *ism = dev_get_drvdata(dev);
-
-	ism_dev_exit(ism);
-	return 0;
-}
-
-static int ism_resume(struct device *dev)
-{
-	struct ism_dev *ism = dev_get_drvdata(dev);
-
-	return ism_dev_init(ism);
-}
-
-static SIMPLE_DEV_PM_OPS(ism_pm_ops, ism_suspend, ism_resume);
-
 static struct pci_driver ism_driver = {
 	.name	  = DRV_NAME,
 	.id_table = ism_device_table,
 	.probe	  = ism_probe,
 	.remove	  = ism_remove,
-	.driver	  = {
-		.pm = &ism_pm_ops,
-	},
 };
 
 static int __init ism_init(void)
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 8f08b0a..440219b 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2296,60 +2296,6 @@
 	put_device(&ccwgdev->dev);
 }
 
-static int lcs_pm_suspend(struct lcs_card *card)
-{
-	if (card->dev)
-		netif_device_detach(card->dev);
-	lcs_set_allowed_threads(card, 0);
-	lcs_wait_for_threads(card, 0xffffffff);
-	if (card->state != DEV_STATE_DOWN)
-		__lcs_shutdown_device(card->gdev, 1);
-	return 0;
-}
-
-static int lcs_pm_resume(struct lcs_card *card)
-{
-	int rc = 0;
-
-	if (card->state == DEV_STATE_RECOVER)
-		rc = lcs_new_device(card->gdev);
-	if (card->dev)
-		netif_device_attach(card->dev);
-	if (rc) {
-		dev_warn(&card->gdev->dev, "The lcs device driver "
-			"failed to recover the device\n");
-	}
-	return rc;
-}
-
-static int lcs_prepare(struct ccwgroup_device *gdev)
-{
-	return 0;
-}
-
-static void lcs_complete(struct ccwgroup_device *gdev)
-{
-	return;
-}
-
-static int lcs_freeze(struct ccwgroup_device *gdev)
-{
-	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
-	return lcs_pm_suspend(card);
-}
-
-static int lcs_thaw(struct ccwgroup_device *gdev)
-{
-	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
-	return lcs_pm_resume(card);
-}
-
-static int lcs_restore(struct ccwgroup_device *gdev)
-{
-	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
-	return lcs_pm_resume(card);
-}
-
 static struct ccw_device_id lcs_ids[] = {
 	{CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
 	{CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
@@ -2382,11 +2328,6 @@
 	.remove      = lcs_remove_device,
 	.set_online  = lcs_new_device,
 	.set_offline = lcs_shutdown_device,
-	.prepare     = lcs_prepare,
-	.complete    = lcs_complete,
-	.freeze	     = lcs_freeze,
-	.thaw	     = lcs_thaw,
-	.restore     = lcs_restore,
 };
 
 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 5ce2424..260860c 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -112,27 +112,10 @@
  */
 #define PRINTK_HEADER " iucv: "       /* for debugging */
 
-/* dummy device to make sure netiucv_pm functions are called */
-static struct device *netiucv_dev;
-
-static int netiucv_pm_prepare(struct device *);
-static void netiucv_pm_complete(struct device *);
-static int netiucv_pm_freeze(struct device *);
-static int netiucv_pm_restore_thaw(struct device *);
-
-static const struct dev_pm_ops netiucv_pm_ops = {
-	.prepare = netiucv_pm_prepare,
-	.complete = netiucv_pm_complete,
-	.freeze = netiucv_pm_freeze,
-	.thaw = netiucv_pm_restore_thaw,
-	.restore = netiucv_pm_restore_thaw,
-};
-
 static struct device_driver netiucv_driver = {
 	.owner = THIS_MODULE,
 	.name = "netiucv",
 	.bus  = &iucv_bus,
-	.pm = &netiucv_pm_ops,
 };
 
 static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
@@ -213,7 +196,6 @@
 	fsm_instance            *fsm;
         struct iucv_connection  *conn;
 	struct device           *dev;
-	int			 pm_state;
 };
 
 /**
@@ -1275,72 +1257,6 @@
 	return 0;
 }
 
-static int netiucv_pm_prepare(struct device *dev)
-{
-	IUCV_DBF_TEXT(trace, 3, __func__);
-	return 0;
-}
-
-static void netiucv_pm_complete(struct device *dev)
-{
-	IUCV_DBF_TEXT(trace, 3, __func__);
-	return;
-}
-
-/**
- * netiucv_pm_freeze() - Freeze PM callback
- * @dev:	netiucv device
- *
- * close open netiucv interfaces
- */
-static int netiucv_pm_freeze(struct device *dev)
-{
-	struct netiucv_priv *priv = dev_get_drvdata(dev);
-	struct net_device *ndev = NULL;
-	int rc = 0;
-
-	IUCV_DBF_TEXT(trace, 3, __func__);
-	if (priv && priv->conn)
-		ndev = priv->conn->netdev;
-	if (!ndev)
-		goto out;
-	netif_device_detach(ndev);
-	priv->pm_state = fsm_getstate(priv->fsm);
-	rc = netiucv_close(ndev);
-out:
-	return rc;
-}
-
-/**
- * netiucv_pm_restore_thaw() - Thaw and restore PM callback
- * @dev:	netiucv device
- *
- * re-open netiucv interfaces closed during freeze
- */
-static int netiucv_pm_restore_thaw(struct device *dev)
-{
-	struct netiucv_priv *priv = dev_get_drvdata(dev);
-	struct net_device *ndev = NULL;
-	int rc = 0;
-
-	IUCV_DBF_TEXT(trace, 3, __func__);
-	if (priv && priv->conn)
-		ndev = priv->conn->netdev;
-	if (!ndev)
-		goto out;
-	switch (priv->pm_state) {
-	case DEV_STATE_RUNNING:
-	case DEV_STATE_STARTWAIT:
-		rc = netiucv_open(ndev);
-		break;
-	default:
-		break;
-	}
-	netif_device_attach(ndev);
-out:
-	return rc;
-}
-
 /**
  * Start transmission of a packet.
  * Called from generic network device layer.
@@ -2156,7 +2072,6 @@
 		netiucv_unregister_device(dev);
 	}
 
-	device_unregister(netiucv_dev);
 	driver_unregister(&netiucv_driver);
 	iucv_unregister(&netiucv_handler, 1);
 	iucv_unregister_dbf_views();
@@ -2182,27 +2097,10 @@
 		IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
 		goto out_iucv;
 	}
-	/* establish dummy device */
-	netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
-	if (!netiucv_dev) {
-		rc = -ENOMEM;
-		goto out_driver;
-	}
-	dev_set_name(netiucv_dev, "netiucv");
-	netiucv_dev->bus = &iucv_bus;
-	netiucv_dev->parent = iucv_root;
-	netiucv_dev->release = (void (*)(struct device *))kfree;
-	netiucv_dev->driver = &netiucv_driver;
-	rc = device_register(netiucv_dev);
-	if (rc) {
-		put_device(netiucv_dev);
-		goto out_driver;
-	}
+
 	netiucv_banner();
 	return rc;
 
-out_driver:
-	driver_unregister(&netiucv_driver);
 out_iucv:
 	iucv_unregister(&netiucv_handler, 1);
 out_dbf:
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 93b4cb1..bf8404b 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -11,6 +11,7 @@
 #define __QETH_CORE_H__
 
 #include <linux/completion.h>
+#include <linux/debugfs.h>
 #include <linux/if.h>
 #include <linux/if_arp.h>
 #include <linux/etherdevice.h>
@@ -21,8 +22,10 @@
 #include <linux/seq_file.h>
 #include <linux/hashtable.h>
 #include <linux/ip.h>
+#include <linux/rcupdate.h>
 #include <linux/refcount.h>
 #include <linux/timer.h>
+#include <linux/types.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
 
@@ -31,6 +34,7 @@
 #include <net/ipv6.h>
 #include <net/if_inet6.h>
 #include <net/addrconf.h>
+#include <net/route.h>
 #include <net/sch_generic.h>
 #include <net/tcp.h>
 
@@ -125,12 +129,6 @@
 	enum qeth_routing_types type;
 };
 
-/* IPA stuff */
-struct qeth_ipa_info {
-	__u32 supported_funcs;
-	__u32 enabled_funcs;
-};
-
 /* SETBRIDGEPORT stuff */
 enum qeth_sbp_roles {
 	QETH_SBP_ROLE_NONE	= 0,
@@ -169,41 +167,6 @@
 	bool rx_bcast_enabled;
 };
 
-static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa,
-		enum qeth_ipa_setadp_cmd func)
-{
-	return (ipa->supported_funcs & func);
-}
-
-static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
-		enum qeth_ipa_funcs func)
-{
-	return (ipa->supported_funcs & func);
-}
-
-static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
-		enum qeth_ipa_funcs func)
-{
-	return (ipa->supported_funcs & ipa->enabled_funcs & func);
-}
-
-#define qeth_adp_supported(c, f) \
-	qeth_is_adp_supported(&c->options.adp, f)
-#define qeth_is_supported(c, f) \
-	qeth_is_ipa_supported(&c->options.ipa4, f)
-#define qeth_is_enabled(c, f) \
-	qeth_is_ipa_enabled(&c->options.ipa4, f)
-#define qeth_is_supported6(c, f) \
-	qeth_is_ipa_supported(&c->options.ipa6, f)
-#define qeth_is_enabled6(c, f) \
-	qeth_is_ipa_enabled(&c->options.ipa6, f)
-#define qeth_is_ipafunc_supported(c, prot, f) \
-	 ((prot == QETH_PROT_IPV6) ? \
-		qeth_is_supported6(c, f) : qeth_is_supported(c, f))
-#define qeth_is_ipafunc_enabled(c, prot, f) \
-	 ((prot == QETH_PROT_IPV6) ? \
-		qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
-
 #define QETH_IDX_FUNC_LEVEL_OSD		 0x0101
 #define QETH_IDX_FUNC_LEVEL_IQD		 0x4108
 
@@ -214,27 +177,26 @@
 /**
  * some more defs
  */
-#define QETH_TX_TIMEOUT		100 * HZ
-#define QETH_RCD_TIMEOUT	60 * HZ
+#define QETH_TX_TIMEOUT		(100 * HZ)
+#define QETH_RCD_TIMEOUT	(60 * HZ)
 #define QETH_RECLAIM_WORK_TIME	HZ
 #define QETH_MAX_PORTNO		15
 
-/*IPv6 address autoconfiguration stuff*/
-#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
-#define UNIQUE_ID_NOT_BY_CARD		0x10000
-
 /*****************************************************************************/
 /* QDIO queue and buffer handling                                            */
 /*****************************************************************************/
-#define QETH_MAX_QUEUES 4
+#define QETH_MAX_OUT_QUEUES	4
 #define QETH_IQD_MIN_TXQ	2	/* One for ucast, one for mcast. */
 #define QETH_IQD_MCAST_TXQ	0
 #define QETH_IQD_MIN_UCAST_TXQ	1
+
+#define QETH_MAX_IN_QUEUES	2
+#define QETH_RX_COPYBREAK      (PAGE_SIZE >> 1)
 #define QETH_IN_BUF_SIZE_DEFAULT 65536
 #define QETH_IN_BUF_COUNT_DEFAULT 64
 #define QETH_IN_BUF_COUNT_HSDEFAULT 128
-#define QETH_IN_BUF_COUNT_MIN 8
-#define QETH_IN_BUF_COUNT_MAX 128
+#define QETH_IN_BUF_COUNT_MIN	8U
+#define QETH_IN_BUF_COUNT_MAX	128U
 #define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
 #define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
 		 ((card)->qdio.in_buf_pool.buf_count / 2)
@@ -254,16 +216,13 @@
 #define QETH_PRIO_Q_ING_TOS   2
 #define QETH_PRIO_Q_ING_SKB   3
 #define QETH_PRIO_Q_ING_VLAN  4
+#define QETH_PRIO_Q_ING_FIXED 5
 
 /* Packing */
 #define QETH_LOW_WATERMARK_PACK  2
 #define QETH_HIGH_WATERMARK_PACK 5
 #define QETH_WATERMARK_PACK_FUZZ 1
 
-/* large receive scatter gather copy break */
-#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
-#define QETH_RX_PULL_LEN 256
-
 struct qeth_hdr_layer3 {
 	__u8  id;
 	__u8  flags;
@@ -276,11 +235,7 @@
 	__u16 frame_offset;
 	union {
 		/* TX: */
-		struct in6_addr ipv6_addr;
-		struct ipv4 {
-			u8 res[12];
-			u32 addr;
-		} ipv4;
+		struct in6_addr addr;
 		/* RX: */
 		struct rx {
 			u8 res1[2];
@@ -323,6 +278,26 @@
 	} hdr;
 } __attribute__ ((packed));
 
+#define QETH_QIB_PQUE_ORDER_RR		0
+#define QETH_QIB_PQUE_UNITS_SBAL	2
+#define QETH_QIB_PQUE_PRIO_DEFAULT	4
+
+struct qeth_qib_parms {
+	char pcit_magic[4];
+	u32 pcit_a;
+	u32 pcit_b;
+	u32 pcit_c;
+	char blkt_magic[4];
+	u32 blkt_total;
+	u32 blkt_inter_packet;
+	u32 blkt_inter_packet_jumbo;
+	char pque_magic[4];
+	u8 pque_order;
+	u8 pque_units;
+	u16 reserved;
+	u32 pque_priority[4];
+};
+
 /*TCP Segmentation Offload header*/
 struct qeth_hdr_ext_tso {
 	__u16 hdr_tot_len;
@@ -368,6 +343,7 @@
 	QETH_HEADER_TYPE_L3_TSO	= 0x03,
 	QETH_HEADER_TYPE_OSN    = 0x04,
 	QETH_HEADER_TYPE_L2_TSO	= 0x06,
+	QETH_HEADER_MASK_INVAL	= 0x80,
 };
 /* flags for qeth_hdr.ext_flags */
 #define QETH_HDR_EXT_VLAN_FRAME       0x01
@@ -396,10 +372,15 @@
 					 struct qeth_hdr_layer3 *h2)
 {
 	return !((h1->flags ^ h2->flags) & QETH_HDR_IPV6) &&
-	       ipv6_addr_equal(&h1->next_hop.ipv6_addr,
-			       &h2->next_hop.ipv6_addr);
+	       ipv6_addr_equal(&h1->next_hop.addr, &h2->next_hop.addr);
 }
 
+struct qeth_local_addr {
+	struct hlist_node hnode;
+	struct rcu_head rcu;
+	struct in6_addr addr;
+};
+
 enum qeth_qdio_info_states {
 	QETH_QDIO_UNINITIALIZED,
 	QETH_QDIO_ALLOCATED,
@@ -410,7 +391,7 @@
 struct qeth_buffer_pool_entry {
 	struct list_head list;
 	struct list_head init_list;
-	void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
+	struct page *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
 };
 
 struct qeth_qdio_buffer_pool {
@@ -443,30 +424,23 @@
 	/* Received QAOB notification on CQ: */
 	QETH_QDIO_BUF_QAOB_OK,
 	QETH_QDIO_BUF_QAOB_ERROR,
-	/* Handled via transfer pending / completion queue. */
-	QETH_QDIO_BUF_HANDLED_DELAYED,
 };
 
 struct qeth_qdio_out_buffer {
 	struct qdio_buffer *buffer;
 	atomic_t state;
 	int next_element_to_fill;
+	unsigned int frames;
 	unsigned int bytes;
 	struct sk_buff_head skb_list;
 	int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
 
 	struct qeth_qdio_out_q *q;
-	struct qeth_qdio_out_buffer *next_pending;
+	struct list_head list_entry;
 };
 
 struct qeth_card;
 
-enum qeth_out_q_states {
-       QETH_OUT_Q_UNLOCKED,
-       QETH_OUT_Q_LOCKED,
-       QETH_OUT_Q_LOCKED_FLUSH,
-};
-
 #define QETH_CARD_STAT_ADD(_c, _stat, _val)	((_c)->stats._stat += (_val))
 #define QETH_CARD_STAT_INC(_c, _stat)		QETH_CARD_STAT_ADD(_c, _stat, 1)
 
@@ -480,12 +454,17 @@
 	u64 rx_sg_frags;
 	u64 rx_sg_alloc_page;
 
+	u64 rx_dropped_nomem;
+	u64 rx_dropped_notsupp;
+	u64 rx_dropped_runt;
+
 	/* rtnl_link_stats64 */
 	u64 rx_packets;
 	u64 rx_bytes;
-	u64 rx_errors;
-	u64 rx_dropped;
 	u64 rx_multicast;
+	u64 rx_length_errors;
+	u64 rx_frame_errors;
+	u64 rx_fifo_errors;
 };
 
 struct qeth_out_q_stats {
@@ -501,6 +480,8 @@
 	u64 tso_bytes;
 	u64 packing_mode_switch;
 	u64 stopped;
+	u64 doorbell;
+	u64 coal_frames;
 	u64 completion_yield;
 	u64 completion_timer;
 
@@ -511,19 +492,23 @@
 	u64 tx_dropped;
 };
 
+#define QETH_TX_MAX_COALESCED_FRAMES	1
+#define QETH_TX_COALESCE_USECS		25
 #define QETH_TX_TIMER_USECS		500
 
 struct qeth_qdio_out_q {
 	struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
 	struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
 	struct qdio_outbuf_state *bufstates; /* convenience pointer */
+	struct list_head pending_bufs;
 	struct qeth_out_q_stats stats;
+	spinlock_t lock;
+	unsigned int priority;
 	u8 next_buf_to_fill;
 	u8 max_elements;
 	u8 queue_no;
 	u8 do_pack;
 	struct qeth_card *card;
-	atomic_t state;
 	/*
 	 * number of buffers that are currently filled (PRIMED)
 	 * -> these buffers are hardware-owned
@@ -534,7 +519,13 @@
 	struct napi_struct napi;
 	struct timer_list timer;
 	struct qeth_hdr *prev_hdr;
+	unsigned int coalesced_frames;
 	u8 bulk_start;
+	u8 bulk_count;
+	u8 bulk_max;
+
+	unsigned int coalesce_usecs;
+	unsigned int max_coalesced_frames;
 };
 
 #define qeth_for_each_output_queue(card, q, i)		\
@@ -543,12 +534,10 @@
 
 #define	qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi)
 
-static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue)
+static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue,
+				     unsigned long usecs)
 {
-	if (timer_pending(&queue->timer))
-		return;
-	mod_timer(&queue->timer, usecs_to_jiffies(QETH_TX_TIMER_USECS) +
-				 jiffies);
+	timer_reduce(&queue->timer, usecs_to_jiffies(usecs) + jiffies);
 }
 
 static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
@@ -572,8 +561,8 @@
 	int in_buf_size;
 
 	/* output */
-	int no_out_queues;
-	struct qeth_qdio_out_q *out_qs[QETH_MAX_QUEUES];
+	unsigned int no_out_queues;
+	struct qeth_qdio_out_q *out_qs[QETH_MAX_OUT_QUEUES];
 	struct qdio_outbuf_state *out_bufstates;
 
 	/* priority queueing */
@@ -595,7 +584,6 @@
  */
 enum qeth_card_states {
 	CARD_STATE_DOWN,
-	CARD_STATE_HARDSETUP,
 	CARD_STATE_SOFTSETUP,
 };
 
@@ -645,6 +633,8 @@
 	long timeout;
 	unsigned char *data;
 	void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob);
+	bool (*match)(struct qeth_cmd_buffer *iob,
+		      struct qeth_cmd_buffer *reply);
 	void (*callback)(struct qeth_card *card, struct qeth_cmd_buffer *iob,
 			 unsigned int data_length);
 	int rc;
@@ -655,6 +645,14 @@
 	refcount_inc(&iob->ref_count);
 }
 
+static inline struct qeth_ipa_cmd *__ipa_reply(struct qeth_cmd_buffer *iob)
+{
+	if (!IS_IPA(iob->data))
+		return NULL;
+
+	return (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
+}
+
 static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
 {
 	return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
@@ -699,24 +697,39 @@
 	int inter_packet_jumbo;
 };
 
+enum qeth_pnso_mode {
+	QETH_PNSO_NONE,
+	QETH_PNSO_BRIDGEPORT,
+	QETH_PNSO_ADDR_INFO,
+};
+
 #define QETH_BROADCAST_WITH_ECHO    0x01
 #define QETH_BROADCAST_WITHOUT_ECHO 0x02
-#define QETH_LAYER2_MAC_REGISTERED  0x02
 struct qeth_card_info {
 	unsigned short unit_addr2;
 	unsigned short cula;
-	u8 chpid;
 	__u16 func_level;
 	char mcl_level[QETH_MCL_LENGTH + 1];
+	/* doubleword below corresponds to net_if_token */
+	u16 ddev_devno;
+	u8 cssid;
+	u8 iid;
+	u8 ssid;
+	u8 chpid;
+	u16 chid;
+	u8 ids_valid:1; /* cssid,iid,chid */
+	u8 dev_addr_is_registered:1;
 	u8 open_when_online:1;
 	u8 promisc_mode:1;
 	u8 use_v1_blkt:1;
 	u8 is_vm_nic:1;
-	int mac_bits;
+	/* no bitfield, we take a pointer on these two: */
+	u8 has_lp2lp_cso_v6;
+	u8 has_lp2lp_cso_v4;
+	enum qeth_pnso_mode pnso_mode;
 	enum qeth_card_types type;
 	enum qeth_link_types link_type;
 	int broadcast_capable;
-	int unique_id;
 	bool layer_enforced;
 	struct qeth_card_blkt blkt;
 	__u32 diagass_support;
@@ -730,18 +743,15 @@
 };
 
 struct qeth_card_options {
+	struct qeth_ipa_caps ipa4;
+	struct qeth_ipa_caps ipa6;
 	struct qeth_routing_info route4;
-	struct qeth_ipa_info ipa4;
-	struct qeth_ipa_info adp; /*Adapter parameters*/
 	struct qeth_routing_info route6;
-	struct qeth_ipa_info ipa6;
+	struct qeth_ipa_caps adp; /* Adapter parameters */
 	struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
 	struct qeth_vnicc_info vnicc; /* VNICC options */
-	int fake_broadcast;
 	enum qeth_discipline_id layer;
-	int rx_sg_cb;
 	enum qeth_ipa_isolation_modes isolation;
-	enum qeth_ipa_isolation_modes prev_isolation;
 	int sniffer;
 	enum qeth_cq cq;
 	char hsuid[9];
@@ -764,12 +774,10 @@
 
 struct qeth_discipline {
 	const struct device_type *devtype;
-	int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done);
-	int (*recover)(void *ptr);
 	int (*setup) (struct ccwgroup_device *);
 	void (*remove) (struct ccwgroup_device *);
-	int (*set_online) (struct ccwgroup_device *);
-	int (*set_offline) (struct ccwgroup_device *);
+	int (*set_online)(struct qeth_card *card, bool carrier_ok);
+	void (*set_offline)(struct qeth_card *card);
 	int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd);
 	int (*control_event_handler)(struct qeth_card *card,
 					struct qeth_ipa_cmd *cmd);
@@ -784,9 +792,10 @@
 struct qeth_rx {
 	int b_count;
 	int b_index;
-	struct qdio_buffer_element *b_element;
+	u8 buf_element;
 	int e_offset;
 	int qdio_err;
+	u8 bufs_refill;
 };
 
 struct carrier_info {
@@ -800,6 +809,13 @@
 	__u32 settings;
 };
 
+struct qeth_priv {
+	unsigned int rx_copybreak;
+	unsigned int tx_wanted_queues;
+	u32 brport_hw_features;
+	u32 brport_features;
+};
+
 #define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
 
 struct qeth_card {
@@ -812,6 +828,7 @@
 	struct qeth_channel data;
 
 	struct net_device *dev;
+	struct dentry *debugfs;
 	struct qeth_card_stats stats;
 	struct qeth_card_info info;
 	struct qeth_token token;
@@ -821,24 +838,29 @@
 	struct workqueue_struct *event_wq;
 	struct workqueue_struct *cmd_wq;
 	wait_queue_head_t wait_q;
-	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-	DECLARE_HASHTABLE(mac_htable, 4);
-	DECLARE_HASHTABLE(ip_htable, 4);
+
 	struct mutex ip_lock;
-	DECLARE_HASHTABLE(ip_mc_htable, 4);
+	/* protected by ip_lock: */
+	DECLARE_HASHTABLE(ip_htable, 4);
+	struct qeth_ipato ipato;
+
+	DECLARE_HASHTABLE(local_addrs4, 4);
+	DECLARE_HASHTABLE(local_addrs6, 4);
+	spinlock_t local_addrs4_lock;
+	spinlock_t local_addrs6_lock;
+	DECLARE_HASHTABLE(rx_mode_addrs, 4);
 	struct work_struct rx_mode_work;
 	struct work_struct kernel_thread_starter;
 	spinlock_t thread_mask_lock;
 	unsigned long thread_start_mask;
 	unsigned long thread_allowed_mask;
 	unsigned long thread_running_mask;
-	struct qeth_ipato ipato;
 	struct list_head cmd_waiter_list;
 	/* QDIO buffer handling */
 	struct qeth_qdio_info qdio;
 	int read_or_write_problem;
 	struct qeth_osn_info osn_info;
-	struct qeth_discipline *discipline;
+	const struct qeth_discipline *discipline;
 	atomic_t force_alloc_skb;
 	struct service_level qeth_service_level;
 	struct qdio_ssqd_desc ssqd;
@@ -849,7 +871,6 @@
 	struct napi_struct napi;
 	struct qeth_rx rx;
 	struct delayed_work buffer_reclaim_work;
-	int reclaim_index;
 	struct work_struct close_dev_work;
 };
 
@@ -858,6 +879,13 @@
 	return card->state == CARD_STATE_SOFTSETUP;
 }
 
+static inline void qeth_unlock_channel(struct qeth_card *card,
+				       struct qeth_channel *channel)
+{
+	atomic_set(&channel->irq_pending, 0);
+	wake_up(&card->wait_q);
+}
+
 struct qeth_trap_id {
 	__u16 lparnr;
 	char vmname[8];
@@ -866,12 +894,19 @@
 	__u16 devno;
 } __packed;
 
-/*some helper functions*/
-#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
-
-static inline bool qeth_netdev_is_registered(struct net_device *dev)
+static inline bool qeth_uses_tx_prio_queueing(struct qeth_card *card)
 {
-	return dev->netdev_ops != NULL;
+	return card->qdio.do_prio_queueing != QETH_NO_PRIO_QUEUEING;
+}
+
+static inline unsigned int qeth_tx_actual_queues(struct qeth_card *card)
+{
+	struct qeth_priv *priv = netdev_priv(card->dev);
+
+	if (qeth_uses_tx_prio_queueing(card))
+		return min(card->dev->num_tx_queues, card->qdio.no_out_queues);
+
+	return min(priv->tx_wanted_queues, card->qdio.no_out_queues);
 }
 
 static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
@@ -883,6 +918,13 @@
 	return txq;
 }
 
+static inline bool qeth_iqd_is_mcast_queue(struct qeth_card *card,
+					   struct qeth_qdio_out_q *queue)
+{
+	return qeth_iqd_translate_txq(card->dev, queue->queue_no) ==
+	       QETH_IQD_MCAST_TXQ;
+}
+
 static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
 					  unsigned int elements)
 {
@@ -946,16 +988,23 @@
 	return dst;
 }
 
-static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb,
-				u8 flags)
+static inline __be32 qeth_next_hop_v4_rcu(struct sk_buff *skb,
+					  struct dst_entry *dst)
 {
-	if ((card->dev->features & NETIF_F_RXCSUM) &&
-	    (flags & QETH_HDR_EXT_CSUM_TRANSP_REQ)) {
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-		QETH_CARD_STAT_INC(card, rx_skb_csum);
-	} else {
-		skb->ip_summed = CHECKSUM_NONE;
-	}
+	struct rtable *rt = (struct rtable *) dst;
+
+	return (rt) ? rt_nexthop(rt, ip_hdr(skb)->daddr) : ip_hdr(skb)->daddr;
+}
+
+static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb,
+						    struct dst_entry *dst)
+{
+	struct rt6_info *rt = (struct rt6_info *) dst;
+
+	if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
+		return &rt->rt6i_gateway;
+	else
+		return &ipv6_hdr(skb)->daddr;
 }
 
 static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
@@ -1001,8 +1050,8 @@
 
 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb);
 
-extern struct qeth_discipline qeth_l2_discipline;
-extern struct qeth_discipline qeth_l3_discipline;
+extern const struct qeth_discipline qeth_l2_discipline;
+extern const struct qeth_discipline qeth_l3_discipline;
 extern const struct ethtool_ops qeth_ethtool_ops;
 extern const struct ethtool_ops qeth_osn_ethtool_ops;
 extern const struct attribute_group *qeth_generic_attr_groups[];
@@ -1012,7 +1061,7 @@
 extern const struct device_type qeth_generic_devtype;
 
 const char *qeth_get_cardname_short(struct qeth_card *);
-int qeth_realloc_buffer_pool(struct qeth_card *, int);
+int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count);
 int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
 void qeth_core_free_discipline(struct qeth_card *);
 
@@ -1022,16 +1071,12 @@
 
 struct net_device *qeth_clone_netdev(struct net_device *orig);
 struct qeth_card *qeth_get_card_by_busid(char *bus_id);
-void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
+void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
+			      int clear_start_mask);
 int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_do_run_thread(struct qeth_card *, unsigned long);
-void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
-void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
-int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
-int qeth_stop_channel(struct qeth_channel *channel);
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+		     bool resetting);
 
-void qeth_print_status_message(struct qeth_card *);
-int qeth_init_qdio_queues(struct qeth_card *);
 int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
 		  int (*reply_cb)
 		  (struct qeth_card *, struct qeth_reply *, unsigned long),
@@ -1054,24 +1099,22 @@
 void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason);
 void qeth_put_cmd(struct qeth_cmd_buffer *iob);
 
-struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
-		struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
-		struct qeth_hdr **);
-void qeth_schedule_recovery(struct qeth_card *);
+int qeth_schedule_recovery(struct qeth_card *card);
 int qeth_poll(struct napi_struct *napi, int budget);
-void qeth_clear_ipacmd_list(struct qeth_card *);
-int qeth_qdio_clear_card(struct qeth_card *, int);
-void qeth_clear_working_pool_list(struct qeth_card *);
-void qeth_drain_output_queues(struct qeth_card *card);
 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable);
 int qeth_setadpparms_change_macaddr(struct qeth_card *);
-void qeth_tx_timeout(struct net_device *);
+void qeth_tx_timeout(struct net_device *, unsigned int txqueue);
 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
-			  u16 cmd_length);
+			  u16 cmd_length,
+			  bool (*match)(struct qeth_cmd_buffer *iob,
+					struct qeth_cmd_buffer *reply));
 int qeth_query_switch_attributes(struct qeth_card *card,
 				  struct qeth_switch_info *sw_info);
 int qeth_query_card_info(struct qeth_card *card,
 			 struct carrier_info *carrier_info);
+int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
+				     enum qeth_ipa_isolation_modes mode);
+
 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset);
 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
 			struct sk_buff *skb, struct qeth_hdr *hdr,
@@ -1079,10 +1122,8 @@
 			int elements_needed);
 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
-int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
 int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
 int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
-void qeth_trace_features(struct qeth_card *);
 int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
 int qeth_set_features(struct net_device *, netdev_features_t);
 void qeth_enable_hw_features(struct net_device *dev);
@@ -1091,6 +1132,7 @@
 				      struct net_device *dev,
 				      netdev_features_t features);
 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
+int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count);
 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
 			  u8 cast_type, struct net_device *sb_dev);
 int qeth_open(struct net_device *dev);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index fad1c46..7b0155b 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -17,6 +17,7 @@
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/log2.h>
+#include <linux/io.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/mii.h>
@@ -26,6 +27,7 @@
 #include <linux/if_vlan.h>
 #include <linux/netdevice.h>
 #include <linux/netdev_features.h>
+#include <linux/rcutree.h>
 #include <linux/skbuff.h>
 #include <linux/vmalloc.h>
 
@@ -35,7 +37,6 @@
 
 #include <asm/ebcdic.h>
 #include <asm/chpid.h>
-#include <asm/io.h>
 #include <asm/sysinfo.h>
 #include <asm/diag.h>
 #include <asm/cio.h>
@@ -61,20 +62,17 @@
 static struct kmem_cache *qeth_qdio_outbuf_cache;
 
 static struct device *qeth_core_root_dev;
+static struct dentry *qeth_debugfs_root;
 static struct lock_class_key qdio_out_skb_queue_key;
 
 static void qeth_issue_next_read_cb(struct qeth_card *card,
 				    struct qeth_cmd_buffer *iob,
 				    unsigned int data_length);
-static void qeth_free_buffer_pool(struct qeth_card *);
 static int qeth_qdio_establish(struct qeth_card *);
 static void qeth_free_qdio_queues(struct qeth_card *card);
 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
 		struct qeth_qdio_out_buffer *buf,
 		enum iucv_tx_notify notification);
-static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
-				 int budget);
-static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
 
 static void qeth_close_dev_handler(struct work_struct *work)
 {
@@ -201,64 +199,139 @@
 }
 EXPORT_SYMBOL_GPL(qeth_threads_running);
 
-void qeth_clear_working_pool_list(struct qeth_card *card)
+static void qeth_clear_working_pool_list(struct qeth_card *card)
 {
 	struct qeth_buffer_pool_entry *pool_entry, *tmp;
+	struct qeth_qdio_q *queue = card->qdio.in_q;
+	unsigned int i;
 
 	QETH_CARD_TEXT(card, 5, "clwrklst");
 	list_for_each_entry_safe(pool_entry, tmp,
-			    &card->qdio.in_buf_pool.entry_list, list){
-			list_del(&pool_entry->list);
+				 &card->qdio.in_buf_pool.entry_list, list)
+		list_del(&pool_entry->list);
+
+	if (!queue)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
+		queue->bufs[i].pool_entry = NULL;
+}
+
+static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
+		if (entry->elements[i])
+			__free_page(entry->elements[i]);
+	}
+
+	kfree(entry);
+}
+
+static void qeth_free_buffer_pool(struct qeth_card *card)
+{
+	struct qeth_buffer_pool_entry *entry, *tmp;
+
+	list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
+				 init_list) {
+		list_del(&entry->init_list);
+		qeth_free_pool_entry(entry);
 	}
 }
-EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
+
+static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
+{
+	struct qeth_buffer_pool_entry *entry;
+	unsigned int i;
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return NULL;
+
+	for (i = 0; i < pages; i++) {
+		entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
+
+		if (!entry->elements[i]) {
+			qeth_free_pool_entry(entry);
+			return NULL;
+		}
+	}
+
+	return entry;
+}
 
 static int qeth_alloc_buffer_pool(struct qeth_card *card)
 {
-	struct qeth_buffer_pool_entry *pool_entry;
-	void *ptr;
-	int i, j;
+	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+	unsigned int i;
 
 	QETH_CARD_TEXT(card, 5, "alocpool");
 	for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
-		pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
-		if (!pool_entry) {
+		struct qeth_buffer_pool_entry *entry;
+
+		entry = qeth_alloc_pool_entry(buf_elements);
+		if (!entry) {
 			qeth_free_buffer_pool(card);
 			return -ENOMEM;
 		}
-		for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
-			ptr = (void *) __get_free_page(GFP_KERNEL);
-			if (!ptr) {
-				while (j > 0)
-					free_page((unsigned long)
-						  pool_entry->elements[--j]);
-				kfree(pool_entry);
-				qeth_free_buffer_pool(card);
-				return -ENOMEM;
-			}
-			pool_entry->elements[j] = ptr;
-		}
-		list_add(&pool_entry->init_list,
-			 &card->qdio.init_pool.entry_list);
+
+		list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
 	}
 	return 0;
 }
 
-int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
+int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
 {
+	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+	struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
+	struct qeth_buffer_pool_entry *entry, *tmp;
+	int delta = count - pool->buf_count;
+	LIST_HEAD(entries);
+
 	QETH_CARD_TEXT(card, 2, "realcbp");
 
-	if (card->state != CARD_STATE_DOWN)
-		return -EPERM;
+	/* Defer until queue is allocated: */
+	if (!card->qdio.in_q)
+		goto out;
 
-	/* TODO: steel/add buffers from/to a running card's buffer pool (?) */
-	qeth_clear_working_pool_list(card);
-	qeth_free_buffer_pool(card);
-	card->qdio.in_buf_pool.buf_count = bufcnt;
-	card->qdio.init_pool.buf_count = bufcnt;
-	return qeth_alloc_buffer_pool(card);
+	/* Remove entries from the pool: */
+	while (delta < 0) {
+		entry = list_first_entry(&pool->entry_list,
+					 struct qeth_buffer_pool_entry,
+					 init_list);
+		list_del(&entry->init_list);
+		qeth_free_pool_entry(entry);
+
+		delta++;
+	}
+
+	/* Allocate additional entries: */
+	while (delta > 0) {
+		entry = qeth_alloc_pool_entry(buf_elements);
+		if (!entry) {
+			list_for_each_entry_safe(entry, tmp, &entries,
+						 init_list) {
+				list_del(&entry->init_list);
+				qeth_free_pool_entry(entry);
+			}
+
+			return -ENOMEM;
+		}
+
+		list_add(&entry->init_list, &entries);
+
+		delta--;
+	}
+
+	list_splice(&entries, &pool->entry_list);
+
+out:
+	card->qdio.in_buf_pool.buf_count = count;
+	pool->buf_count = count;
+	return 0;
 }
-EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
+EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
 
 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
 {
@@ -393,42 +466,6 @@
 	return n;
 }
 
-static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
-					 int forced_cleanup)
-{
-	if (q->card->options.cq != QETH_CQ_ENABLED)
-		return;
-
-	if (q->bufs[bidx]->next_pending != NULL) {
-		struct qeth_qdio_out_buffer *head = q->bufs[bidx];
-		struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
-
-		while (c) {
-			if (forced_cleanup ||
-			    atomic_read(&c->state) ==
-			      QETH_QDIO_BUF_HANDLED_DELAYED) {
-				struct qeth_qdio_out_buffer *f = c;
-				QETH_CARD_TEXT(f->q->card, 5, "fp");
-				QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
-				/* release here to avoid interleaving between
-				   outbound tasklet and inbound tasklet
-				   regarding notifications and lifecycle */
-				qeth_tx_complete_buf(c, forced_cleanup, 0);
-
-				c = f->next_pending;
-				WARN_ON_ONCE(head->next_pending != f);
-				head->next_pending = c;
-				kmem_cache_free(qeth_qdio_outbuf_cache, f);
-			} else {
-				head = c;
-				c = c->next_pending;
-			}
-
-		}
-	}
-}
-
-
 static void qeth_qdio_handle_aob(struct qeth_card *card,
 				 unsigned long phys_aob_addr)
 {
@@ -436,6 +473,7 @@
 	struct qaob *aob;
 	struct qeth_qdio_out_buffer *buffer;
 	enum iucv_tx_notify notification;
+	struct qeth_qdio_out_q *queue;
 	unsigned int i;
 
 	aob = (struct qaob *) phys_to_virt(phys_aob_addr);
@@ -444,17 +482,6 @@
 	buffer = (struct qeth_qdio_out_buffer *) aob->user1;
 	QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
 
-	/* Free dangling allocations. The attached skbs are handled by
-	 * qeth_cleanup_handled_pending().
-	 */
-	for (i = 0;
-	     i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
-	     i++) {
-		if (aob->sba[i] && buffer->is_header[i])
-			kmem_cache_free(qeth_core_header_cache,
-					(void *) aob->sba[i]);
-	}
-
 	if (aob->aorc) {
 		QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
 		new_state = QETH_QDIO_BUF_QAOB_ERROR;
@@ -462,10 +489,9 @@
 
 	switch (atomic_xchg(&buffer->state, new_state)) {
 	case QETH_QDIO_BUF_PRIMED:
-		/* Faster than TX completion code. */
-		notification = qeth_compute_cq_notification(aob->aorc, 0);
-		qeth_notify_skbs(buffer->q, buffer, notification);
-		atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
+		/* Faster than TX completion code, let it handle the async
+		 * completion for us.
+		 */
 		break;
 	case QETH_QDIO_BUF_PENDING:
 		/* TX completion code is active and will handle the async
@@ -476,7 +502,22 @@
 		/* TX completion code is already finished. */
 		notification = qeth_compute_cq_notification(aob->aorc, 1);
 		qeth_notify_skbs(buffer->q, buffer, notification);
-		atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
+
+		/* Free dangling allocations. The attached skbs are handled by
+		 * qeth_tx_complete_pending_bufs().
+		 */
+		for (i = 0;
+		     i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
+		     i++) {
+			void *data = phys_to_virt(aob->sba[i]);
+
+			if (data && buffer->is_header[i])
+				kmem_cache_free(qeth_core_header_cache, data);
+		}
+
+		queue = buffer->q;
+		atomic_set(&buffer->state, QETH_QDIO_BUF_EMPTY);
+		napi_schedule(&queue->napi);
 		break;
 	default:
 		WARN_ON_ONCE(1);
@@ -485,14 +526,6 @@
 	qdio_release_aob(aob);
 }
 
-static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
-{
-	return card->options.cq == QETH_CQ_ENABLED &&
-	    card->qdio.c_q != NULL &&
-	    queue != 0 &&
-	    queue == card->qdio.no_in_queues - 1;
-}
-
 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
 			   void *data)
 {
@@ -526,11 +559,10 @@
 	} else {
 		QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
 				 rc, CARD_DEVID(card));
-		atomic_set(&channel->irq_pending, 0);
+		qeth_unlock_channel(card, channel);
 		qeth_put_cmd(iob);
 		card->read_or_write_problem = 1;
 		qeth_schedule_recovery(card);
-		wake_up(&card->wait_q);
 	}
 	return rc;
 }
@@ -569,11 +601,262 @@
 }
 EXPORT_SYMBOL_GPL(qeth_notify_cmd);
 
+static void qeth_flush_local_addrs4(struct qeth_card *card)
+{
+	struct qeth_local_addr *addr;
+	struct hlist_node *tmp;
+	unsigned int i;
+
+	spin_lock_irq(&card->local_addrs4_lock);
+	hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
+		hash_del_rcu(&addr->hnode);
+		kfree_rcu(addr, rcu);
+	}
+	spin_unlock_irq(&card->local_addrs4_lock);
+}
+
+static void qeth_flush_local_addrs6(struct qeth_card *card)
+{
+	struct qeth_local_addr *addr;
+	struct hlist_node *tmp;
+	unsigned int i;
+
+	spin_lock_irq(&card->local_addrs6_lock);
+	hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
+		hash_del_rcu(&addr->hnode);
+		kfree_rcu(addr, rcu);
+	}
+	spin_unlock_irq(&card->local_addrs6_lock);
+}
+
+static void qeth_flush_local_addrs(struct qeth_card *card)
+{
+	qeth_flush_local_addrs4(card);
+	qeth_flush_local_addrs6(card);
+}
+
+static void qeth_add_local_addrs4(struct qeth_card *card,
+				  struct qeth_ipacmd_local_addrs4 *cmd)
+{
+	unsigned int i;
+
+	if (cmd->addr_length !=
+	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
+		dev_err_ratelimited(&card->gdev->dev,
+				    "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
+				    cmd->addr_length);
+		return;
+	}
+
+	spin_lock(&card->local_addrs4_lock);
+	for (i = 0; i < cmd->count; i++) {
+		unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
+		struct qeth_local_addr *addr;
+		bool duplicate = false;
+
+		hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
+			if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
+				duplicate = true;
+				break;
+			}
+		}
+
+		if (duplicate)
+			continue;
+
+		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
+		if (!addr) {
+			dev_err(&card->gdev->dev,
+				"Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
+				&cmd->addrs[i].addr);
+			continue;
+		}
+
+		ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
+		hash_add_rcu(card->local_addrs4, &addr->hnode, key);
+	}
+	spin_unlock(&card->local_addrs4_lock);
+}
+
+static void qeth_add_local_addrs6(struct qeth_card *card,
+				  struct qeth_ipacmd_local_addrs6 *cmd)
+{
+	unsigned int i;
+
+	if (cmd->addr_length !=
+	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
+		dev_err_ratelimited(&card->gdev->dev,
+				    "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
+				    cmd->addr_length);
+		return;
+	}
+
+	spin_lock(&card->local_addrs6_lock);
+	for (i = 0; i < cmd->count; i++) {
+		u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
+		struct qeth_local_addr *addr;
+		bool duplicate = false;
+
+		hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
+			if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
+				duplicate = true;
+				break;
+			}
+		}
+
+		if (duplicate)
+			continue;
+
+		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
+		if (!addr) {
+			dev_err(&card->gdev->dev,
+				"Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
+				&cmd->addrs[i].addr);
+			continue;
+		}
+
+		addr->addr = cmd->addrs[i].addr;
+		hash_add_rcu(card->local_addrs6, &addr->hnode, key);
+	}
+	spin_unlock(&card->local_addrs6_lock);
+}
+
+static void qeth_del_local_addrs4(struct qeth_card *card,
+				  struct qeth_ipacmd_local_addrs4 *cmd)
+{
+	unsigned int i;
+
+	if (cmd->addr_length !=
+	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
+		dev_err_ratelimited(&card->gdev->dev,
+				    "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
+				    cmd->addr_length);
+		return;
+	}
+
+	spin_lock(&card->local_addrs4_lock);
+	for (i = 0; i < cmd->count; i++) {
+		struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
+		unsigned int key = ipv4_addr_hash(addr->addr);
+		struct qeth_local_addr *tmp;
+
+		hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
+			if (tmp->addr.s6_addr32[3] == addr->addr) {
+				hash_del_rcu(&tmp->hnode);
+				kfree_rcu(tmp, rcu);
+				break;
+			}
+		}
+	}
+	spin_unlock(&card->local_addrs4_lock);
+}
+
+static void qeth_del_local_addrs6(struct qeth_card *card,
+				  struct qeth_ipacmd_local_addrs6 *cmd)
+{
+	unsigned int i;
+
+	if (cmd->addr_length !=
+	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
+		dev_err_ratelimited(&card->gdev->dev,
+				    "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
+				    cmd->addr_length);
+		return;
+	}
+
+	spin_lock(&card->local_addrs6_lock);
+	for (i = 0; i < cmd->count; i++) {
+		struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
+		u32 key = ipv6_addr_hash(&addr->addr);
+		struct qeth_local_addr *tmp;
+
+		hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
+			if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
+				hash_del_rcu(&tmp->hnode);
+				kfree_rcu(tmp, rcu);
+				break;
+			}
+		}
+	}
+	spin_unlock(&card->local_addrs6_lock);
+}
+
+static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
+				      struct sk_buff *skb)
+{
+	struct qeth_local_addr *tmp;
+	bool is_local = false;
+	unsigned int key;
+	__be32 next_hop;
+
+	if (hash_empty(card->local_addrs4))
+		return false;
+
+	rcu_read_lock();
+	next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4));
+	key = ipv4_addr_hash(next_hop);
+
+	hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
+		if (tmp->addr.s6_addr32[3] == next_hop) {
+			is_local = true;
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	return is_local;
+}
+
+static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
+				      struct sk_buff *skb)
+{
+	struct qeth_local_addr *tmp;
+	struct in6_addr *next_hop;
+	bool is_local = false;
+	u32 key;
+
+	if (hash_empty(card->local_addrs6))
+		return false;
+
+	rcu_read_lock();
+	next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6));
+	key = ipv6_addr_hash(next_hop);
+
+	hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
+		if (ipv6_addr_equal(&tmp->addr, next_hop)) {
+			is_local = true;
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	return is_local;
+}
+
+static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
+{
+	struct qeth_card *card = m->private;
+	struct qeth_local_addr *tmp;
+	unsigned int i;
+
+	rcu_read_lock();
+	hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
+		seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
+	hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
+		seq_printf(m, "%pI6c\n", &tmp->addr);
+	rcu_read_unlock();
+
+	return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
+
 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
 		struct qeth_card *card)
 {
 	const char *ipa_name;
 	int com = cmd->hdr.command;
+
 	ipa_name = qeth_get_ipa_cmd_name(com);
 
 	if (rc)
@@ -605,12 +888,12 @@
 		if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
 			dev_err(&card->gdev->dev,
 				"Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
-				QETH_CARD_IFNAME(card));
+				netdev_name(card->dev));
 			schedule_work(&card->close_dev_work);
 		} else {
 			dev_warn(&card->gdev->dev,
 				 "The link for interface %s on CHPID 0x%X failed\n",
-				 QETH_CARD_IFNAME(card), card->info.chpid);
+				 netdev_name(card->dev), card->info.chpid);
 			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
 			netif_carrier_off(card->dev);
 		}
@@ -618,7 +901,7 @@
 	case IPA_CMD_STARTLAN:
 		dev_info(&card->gdev->dev,
 			 "The link for %s on CHPID 0x%X has been restored\n",
-			 QETH_CARD_IFNAME(card), card->info.chpid);
+			 netdev_name(card->dev), card->info.chpid);
 		if (card->info.hwtrap)
 			card->info.hwtrap = 2;
 		qeth_schedule_recovery(card);
@@ -632,9 +915,19 @@
 	case IPA_CMD_MODCCID:
 		return cmd;
 	case IPA_CMD_REGISTER_LOCAL_ADDR:
+		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+			qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
+		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+			qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
+
 		QETH_CARD_TEXT(card, 3, "irla");
 		return NULL;
 	case IPA_CMD_UNREGISTER_LOCAL_ADDR:
+		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+			qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
+		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+			qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
+
 		QETH_CARD_TEXT(card, 3, "urla");
 		return NULL;
 	default:
@@ -643,7 +936,7 @@
 	}
 }
 
-void qeth_clear_ipacmd_list(struct qeth_card *card)
+static void qeth_clear_ipacmd_list(struct qeth_card *card)
 {
 	struct qeth_cmd_buffer *iob;
 	unsigned long flags;
@@ -652,10 +945,9 @@
 
 	spin_lock_irqsave(&card->lock, flags);
 	list_for_each_entry(iob, &card->cmd_waiter_list, list)
-		qeth_notify_cmd(iob, -EIO);
+		qeth_notify_cmd(iob, -ECANCELED);
 	spin_unlock_irqrestore(&card->lock, flags);
 }
-EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
 
 static int qeth_check_idx_response(struct qeth_card *card,
 	unsigned char *buffer)
@@ -749,14 +1041,14 @@
 		break;
 	case -EIO:
 		qeth_schedule_recovery(card);
-		/* fall through */
+		fallthrough;
 	default:
 		qeth_clear_ipacmd_list(card);
-		goto out;
+		goto err_idx;
 	}
 
-	if (IS_IPA(iob->data)) {
-		cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
+	cmd = __ipa_reply(iob);
+	if (cmd) {
 		cmd = qeth_check_ipa_data(card, cmd);
 		if (!cmd)
 			goto out;
@@ -765,17 +1057,12 @@
 			card->osn_info.assist_cb(card->dev, cmd);
 			goto out;
 		}
-	} else {
-		/* non-IPA commands should only flow during initialization */
-		if (card->state != CARD_STATE_DOWN)
-			goto out;
 	}
 
 	/* match against pending cmd requests */
 	spin_lock_irqsave(&card->lock, flags);
 	list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
-		if (!IS_IPA(tmp->data) ||
-		    __ipa_cmd(tmp)->hdr.seqno == cmd->hdr.seqno) {
+		if (tmp->match && tmp->match(tmp, iob)) {
 			request = tmp;
 			/* take the object outside the lock */
 			qeth_get_cmd(request);
@@ -810,27 +1097,31 @@
 	memcpy(&card->seqno.pdu_hdr_ack,
 		QETH_PDU_HEADER_SEQ_NO(iob->data),
 		QETH_SEQ_NO_LENGTH);
-	qeth_put_cmd(iob);
 	__qeth_issue_next_read(card);
+err_idx:
+	qeth_put_cmd(iob);
 }
 
 static int qeth_set_thread_start_bit(struct qeth_card *card,
 		unsigned long thread)
 {
 	unsigned long flags;
+	int rc = 0;
 
 	spin_lock_irqsave(&card->thread_mask_lock, flags);
-	if (!(card->thread_allowed_mask & thread) ||
-	      (card->thread_start_mask & thread)) {
-		spin_unlock_irqrestore(&card->thread_mask_lock, flags);
-		return -EPERM;
-	}
-	card->thread_start_mask |= thread;
+	if (!(card->thread_allowed_mask & thread))
+		rc = -EPERM;
+	else if (card->thread_start_mask & thread)
+		rc = -EBUSY;
+	else
+		card->thread_start_mask |= thread;
 	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
-	return 0;
+
+	return rc;
 }
 
-void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
+static void qeth_clear_thread_start_bit(struct qeth_card *card,
+					unsigned long thread)
 {
 	unsigned long flags;
 
@@ -839,9 +1130,9 @@
 	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
 	wake_up(&card->wait_q);
 }
-EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
 
-void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
+static void qeth_clear_thread_running_bit(struct qeth_card *card,
+					  unsigned long thread)
 {
 	unsigned long flags;
 
@@ -850,7 +1141,6 @@
 	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
 	wake_up_all(&card->wait_q);
 }
-EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
 
 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
 {
@@ -871,7 +1161,7 @@
 	return rc;
 }
 
-int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
+static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
 {
 	int rc = 0;
 
@@ -879,15 +1169,19 @@
 		   (rc = __qeth_do_run_thread(card, thread)) >= 0);
 	return rc;
 }
-EXPORT_SYMBOL_GPL(qeth_do_run_thread);
 
-void qeth_schedule_recovery(struct qeth_card *card)
+int qeth_schedule_recovery(struct qeth_card *card)
 {
+	int rc;
+
 	QETH_CARD_TEXT(card, 2, "startrec");
-	if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
+
+	rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
+	if (!rc)
 		schedule_work(&card->kernel_thread_starter);
+
+	return rc;
 }
-EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
 
 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
 			    struct irb *irb)
@@ -932,7 +1226,7 @@
 			return 0;
 		}
 		QETH_CARD_TEXT(card, 2, "DGENCHK");
-			return -EIO;
+		return -EIO;
 	}
 	return 0;
 }
@@ -978,8 +1272,6 @@
 	/* while we hold the ccwdev lock, this stays valid: */
 	gdev = dev_get_drvdata(&cdev->dev);
 	card = dev_get_drvdata(&gdev->dev);
-	if (!card)
-		return;
 
 	QETH_CARD_TEXT(card, 5, "irq");
 
@@ -1009,24 +1301,25 @@
 	}
 
 	channel->active_cmd = NULL;
+	qeth_unlock_channel(card, channel);
 
 	rc = qeth_check_irb_error(card, cdev, irb);
 	if (rc) {
 		/* IO was terminated, free its resources. */
 		if (iob)
 			qeth_cancel_cmd(iob, rc);
-		atomic_set(&channel->irq_pending, 0);
-		wake_up(&card->wait_q);
 		return;
 	}
 
-	atomic_set(&channel->irq_pending, 0);
-
-	if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
+	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
 		channel->state = CH_STATE_STOPPED;
+		wake_up(&card->wait_q);
+	}
 
-	if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
+	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
 		channel->state = CH_STATE_HALTED;
+		wake_up(&card->wait_q);
+	}
 
 	if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
 					  SCSW_FCTL_HALT_FUNC))) {
@@ -1060,7 +1353,7 @@
 				qeth_cancel_cmd(iob, rc);
 			qeth_clear_ipacmd_list(card);
 			qeth_schedule_recovery(card);
-			goto out;
+			return;
 		}
 	}
 
@@ -1068,16 +1361,12 @@
 		/* sanity check: */
 		if (irb->scsw.cmd.count > iob->length) {
 			qeth_cancel_cmd(iob, -EIO);
-			goto out;
+			return;
 		}
 		if (iob->callback)
 			iob->callback(card, iob,
 				      iob->length - irb->scsw.cmd.count);
 	}
-
-out:
-	wake_up(&card->wait_q);
-	return;
 }
 
 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
@@ -1100,26 +1389,26 @@
 	struct qeth_qdio_out_q *queue = buf->q;
 	struct sk_buff *skb;
 
-	if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
-		qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
-
 	/* Empty buffer? */
 	if (buf->next_element_to_fill == 0)
 		return;
 
 	QETH_TXQ_STAT_INC(queue, bufs);
 	QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
+	if (error) {
+		QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
+	} else {
+		QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
+		QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
+	}
+
 	while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
 		unsigned int bytes = qdisc_pkt_len(skb);
 		bool is_tso = skb_is_gso(skb);
 		unsigned int packets;
 
 		packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
-		if (error) {
-			QETH_TXQ_STAT_ADD(queue, tx_errors, packets);
-		} else {
-			QETH_TXQ_STAT_ADD(queue, tx_packets, packets);
-			QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes);
+		if (!error) {
 			if (skb->ip_summed == CHECKSUM_PARTIAL)
 				QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
 			if (skb_is_nonlinear(skb))
@@ -1147,26 +1436,52 @@
 	qeth_tx_complete_buf(buf, error, budget);
 
 	for (i = 0; i < queue->max_elements; ++i) {
-		if (buf->buffer->element[i].addr && buf->is_header[i])
-			kmem_cache_free(qeth_core_header_cache,
-				buf->buffer->element[i].addr);
+		void *data = phys_to_virt(buf->buffer->element[i].addr);
+
+		if (data && buf->is_header[i])
+			kmem_cache_free(qeth_core_header_cache, data);
 		buf->is_header[i] = 0;
 	}
 
 	qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
 	buf->next_element_to_fill = 0;
+	buf->frames = 0;
 	buf->bytes = 0;
 	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
 }
 
+static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
+					  struct qeth_qdio_out_q *queue,
+					  bool drain)
+{
+	struct qeth_qdio_out_buffer *buf, *tmp;
+
+	list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
+		if (drain || atomic_read(&buf->state) == QETH_QDIO_BUF_EMPTY) {
+			QETH_CARD_TEXT(card, 5, "fp");
+			QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
+
+			if (drain)
+				qeth_notify_skbs(queue, buf,
+						 TX_NOTIFY_GENERALERROR);
+			qeth_tx_complete_buf(buf, drain, 0);
+
+			list_del(&buf->list_entry);
+			kmem_cache_free(qeth_qdio_outbuf_cache, buf);
+		}
+	}
+}
+
 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
 {
 	int j;
 
+	qeth_tx_complete_pending_bufs(q->card, q, true);
+
 	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
 		if (!q->bufs[j])
 			continue;
-		qeth_cleanup_handled_pending(q, j, 1);
+
 		qeth_clear_output_buffer(q, q->bufs[j], true, 0);
 		if (free) {
 			kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
@@ -1175,7 +1490,7 @@
 	}
 }
 
-void qeth_drain_output_queues(struct qeth_card *card)
+static void qeth_drain_output_queues(struct qeth_card *card)
 {
 	int i;
 
@@ -1186,76 +1501,27 @@
 			qeth_drain_output_queue(card->qdio.out_qs[i], false);
 	}
 }
-EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
 
-static void qeth_free_buffer_pool(struct qeth_card *card)
+static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
 {
-	struct qeth_buffer_pool_entry *pool_entry, *tmp;
-	int i = 0;
-	list_for_each_entry_safe(pool_entry, tmp,
-				 &card->qdio.init_pool.entry_list, init_list){
-		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
-			free_page((unsigned long)pool_entry->elements[i]);
-		list_del(&pool_entry->init_list);
-		kfree(pool_entry);
-	}
-}
+	unsigned int max = single ? 1 : card->dev->num_tx_queues;
 
-static void qeth_clean_channel(struct qeth_channel *channel)
-{
-	struct ccw_device *cdev = channel->ccwdev;
-
-	QETH_DBF_TEXT(SETUP, 2, "freech");
-
-	spin_lock_irq(get_ccwdev_lock(cdev));
-	cdev->handler = NULL;
-	spin_unlock_irq(get_ccwdev_lock(cdev));
-}
-
-static void qeth_setup_channel(struct qeth_channel *channel)
-{
-	struct ccw_device *cdev = channel->ccwdev;
-
-	QETH_DBF_TEXT(SETUP, 2, "setupch");
-
-	channel->state = CH_STATE_DOWN;
-	atomic_set(&channel->irq_pending, 0);
-
-	spin_lock_irq(get_ccwdev_lock(cdev));
-	cdev->handler = qeth_irq;
-	spin_unlock_irq(get_ccwdev_lock(cdev));
-}
-
-static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
-{
-	unsigned int count = single ? 1 : card->dev->num_tx_queues;
-	int rc;
-
-	rtnl_lock();
-	rc = netif_set_real_num_tx_queues(card->dev, count);
-	rtnl_unlock();
-
-	if (rc)
-		return rc;
-
-	if (card->qdio.no_out_queues == count)
-		return 0;
+	if (card->qdio.no_out_queues == max)
+		return;
 
 	if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
 		qeth_free_qdio_queues(card);
 
-	if (count == 1)
+	if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
 		dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
 
-	card->qdio.no_out_queues = count;
-	return 0;
+	card->qdio.no_out_queues = max;
 }
 
 static int qeth_update_from_chp_desc(struct qeth_card *card)
 {
 	struct ccw_device *ccwdev;
 	struct channel_path_desc_fmt0 *chp_dsc;
-	int rc = 0;
 
 	QETH_CARD_TEXT(card, 2, "chp_desc");
 
@@ -1268,12 +1534,12 @@
 
 	if (IS_OSD(card) || IS_OSX(card))
 		/* CHPP field bit 6 == 1 -> single queue */
-		rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
+		qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
 
 	kfree(chp_dsc);
 	QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
 	QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
-	return rc;
+	return 0;
 }
 
 static void qeth_init_qdio_info(struct qeth_card *card)
@@ -1299,7 +1565,6 @@
 {
 	card->options.route4.type = NO_ROUTER;
 	card->options.route6.type = NO_ROUTER;
-	card->options.rx_sg_cb = QETH_RX_SG_CB;
 	card->options.isolation = ISOLATION_MODE_NONE;
 	card->options.cq = QETH_CQ_DISABLED;
 	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
@@ -1320,19 +1585,19 @@
 	return rc;
 }
 
+static int qeth_do_reset(void *data);
 static void qeth_start_kernel_thread(struct work_struct *work)
 {
 	struct task_struct *ts;
 	struct qeth_card *card = container_of(work, struct qeth_card,
 					kernel_thread_starter);
-	QETH_CARD_TEXT(card , 2, "strthrd");
+	QETH_CARD_TEXT(card, 2, "strthrd");
 
 	if (card->read.state != CH_STATE_UP &&
 	    card->write.state != CH_STATE_UP)
 		return;
 	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
-		ts = kthread_run(card->discipline->recover, (void *)card,
-				"qeth_recover");
+		ts = kthread_run(qeth_do_reset, card, "qeth_recover");
 		if (IS_ERR(ts)) {
 			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
 			qeth_clear_thread_running_bit(card,
@@ -1361,6 +1626,11 @@
 	qeth_init_qdio_info(card);
 	INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
 	INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
+	hash_init(card->rx_mode_addrs);
+	hash_init(card->local_addrs4);
+	hash_init(card->local_addrs6);
+	spin_lock_init(&card->local_addrs4_lock);
+	spin_lock_init(&card->local_addrs6_lock);
 }
 
 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
@@ -1397,9 +1667,11 @@
 	if (!card->read_cmd)
 		goto out_read_cmd;
 
-	qeth_setup_channel(&card->read);
-	qeth_setup_channel(&card->write);
-	qeth_setup_channel(&card->data);
+	card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
+					   qeth_debugfs_root);
+	debugfs_create_file("local_addrs", 0400, card->debugfs, card,
+			    &qeth_debugfs_local_addr_fops);
+
 	card->qeth_service_level.seq_print = qeth_core_sl_print;
 	register_service_level(&card->qeth_service_level);
 	return card;
@@ -1456,7 +1728,7 @@
 	return 0;
 }
 
-int qeth_stop_channel(struct qeth_channel *channel)
+static int qeth_stop_channel(struct qeth_channel *channel)
 {
 	struct ccw_device *cdev = channel->ccwdev;
 	int rc;
@@ -1469,11 +1741,36 @@
 			channel->active_cmd);
 		channel->active_cmd = NULL;
 	}
+	cdev->handler = NULL;
 	spin_unlock_irq(get_ccwdev_lock(cdev));
 
 	return rc;
 }
-EXPORT_SYMBOL_GPL(qeth_stop_channel);
+
+static int qeth_start_channel(struct qeth_channel *channel)
+{
+	struct ccw_device *cdev = channel->ccwdev;
+	int rc;
+
+	channel->state = CH_STATE_DOWN;
+	atomic_set(&channel->irq_pending, 0);
+
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	cdev->handler = qeth_irq;
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+
+	rc = ccw_device_set_online(cdev);
+	if (rc)
+		goto err;
+
+	return 0;
+
+err:
+	spin_lock_irq(get_ccwdev_lock(cdev));
+	cdev->handler = NULL;
+	spin_unlock_irq(get_ccwdev_lock(cdev));
+	return rc;
+}
 
 static int qeth_halt_channels(struct qeth_card *card)
 {
@@ -1518,7 +1815,7 @@
 	return qeth_clear_channels(card);
 }
 
-int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
+static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
 {
 	int rc = 0;
 
@@ -1544,10 +1841,8 @@
 	rc = qeth_clear_halt_card(card, use_halt);
 	if (rc)
 		QETH_CARD_TEXT_(card, 3, "2err%d", rc);
-	card->state = CARD_STATE_DOWN;
 	return rc;
 }
-EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
 
 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
 {
@@ -1645,17 +1940,16 @@
 	}
 }
 
-static void qeth_init_tokens(struct qeth_card *card)
+static void qeth_idx_init(struct qeth_card *card)
 {
+	memset(&card->seqno, 0, sizeof(card->seqno));
+
 	card->token.issuer_rm_w = 0x00010103UL;
 	card->token.cm_filter_w = 0x00010108UL;
 	card->token.cm_connection_w = 0x0001010aUL;
 	card->token.ulp_filter_w = 0x0001010bUL;
 	card->token.ulp_connection_w = 0x0001010dUL;
-}
 
-static void qeth_init_func_level(struct qeth_card *card)
-{
 	switch (card->info.type) {
 	case QETH_CARD_TYPE_IQD:
 		card->info.func_level =	QETH_IDX_FUNC_LEVEL_IQD;
@@ -1701,8 +1995,15 @@
 	iob->callback = qeth_release_buffer_cb;
 }
 
+static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
+				 struct qeth_cmd_buffer *reply)
+{
+	/* MPC cmds are issued strictly in sequence. */
+	return !IS_IPA(reply->data);
+}
+
 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
-						  void *data,
+						  const void *data,
 						  unsigned int data_length)
 {
 	struct qeth_cmd_buffer *iob;
@@ -1715,6 +2016,7 @@
 	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
 		       iob->data);
 	iob->finalize = qeth_mpc_finalize_cmd;
+	iob->match = qeth_mpc_match_reply;
 	return iob;
 }
 
@@ -1787,8 +2089,7 @@
 		QETH_CARD_TEXT_(card, 2, " err%d", rc);
 		qeth_dequeue_cmd(card, iob);
 		qeth_put_cmd(iob);
-		atomic_set(&channel->irq_pending, 0);
-		wake_up(&card->wait_q);
+		qeth_unlock_channel(card, channel);
 		goto out;
 	}
 
@@ -1982,20 +2283,19 @@
 	u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
 	u8 port = ((u8)card->dev->dev_port) | 0x80;
 	struct ccw1 *ccw = __ccw_from_cmd(iob);
-	struct ccw_dev_id dev_id;
 
 	qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
 		       iob->data);
 	qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
-	ccw_device_get_id(CARD_DDEV(card), &dev_id);
 	iob->finalize = qeth_idx_finalize_cmd;
 
+	port |= QETH_IDX_ACT_INVAL_FRAME;
 	memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
 	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
 	       &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
 	memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
 	       &card->info.func_level, 2);
-	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2);
+	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
 	memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
 }
 
@@ -2112,6 +2412,17 @@
 	return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
 }
 
+static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
+{
+	if (link_type == QETH_LINK_TYPE_LANE_TR ||
+	    link_type == QETH_LINK_TYPE_HSTR) {
+		dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
+		return false;
+	}
+
+	return true;
+}
+
 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
 {
 	struct net_device *dev = card->dev;
@@ -2171,8 +2482,8 @@
 {
 	__u16 mtu, framesize;
 	__u16 len;
-	__u8 link_type;
 	struct qeth_cmd_buffer *iob;
+	u8 link_type = 0;
 
 	QETH_CARD_TEXT(card, 2, "ulpenacb");
 
@@ -2192,9 +2503,11 @@
 	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
 		memcpy(&link_type,
 		       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
-		card->info.link_type = link_type;
-	} else
-		card->info.link_type = 0;
+		if (!qeth_is_supported_link_type(card, link_type))
+			return -EPROTONOSUPPORT;
+	}
+
+	card->info.link_type = link_type;
 	QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
 	return 0;
 }
@@ -2256,7 +2569,6 @@
 {
 	__u16 temp;
 	struct qeth_cmd_buffer *iob;
-	struct ccw_dev_id dev_id;
 
 	QETH_CARD_TEXT(card, 2, "ulpsetup");
 
@@ -2271,8 +2583,7 @@
 	memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
 	       &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
 
-	ccw_device_get_id(CARD_DDEV(card), &dev_id);
-	memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
+	memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
 	temp = (card->info.cula << 8) + card->info.unit_addr2;
 	memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
 	return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
@@ -2290,7 +2601,6 @@
 	skb_queue_head_init(&newbuf->skb_list);
 	lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
 	newbuf->q = q;
-	newbuf->next_pending = q->bufs[bidx];
 	atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
 	q->bufs[bidx] = newbuf;
 	return 0;
@@ -2309,15 +2619,28 @@
 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
 {
 	struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
+	unsigned int i;
 
 	if (!q)
 		return NULL;
 
-	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
-		kfree(q);
-		return NULL;
+	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
+		goto err_qdio_bufs;
+
+	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
+		if (qeth_init_qdio_out_buf(q, i))
+			goto err_out_bufs;
 	}
+
 	return q;
+
+err_out_bufs:
+	while (i > 0)
+		kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[--i]);
+	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
+err_qdio_bufs:
+	kfree(q);
+	return NULL;
 }
 
 static void qeth_tx_completion_timer(struct timer_list *timer)
@@ -2330,7 +2653,7 @@
 
 static int qeth_alloc_qdio_queues(struct qeth_card *card)
 {
-	int i, j;
+	unsigned int i;
 
 	QETH_CARD_TEXT(card, 2, "allcqdbf");
 
@@ -2359,14 +2682,12 @@
 		card->qdio.out_qs[i] = queue;
 		queue->card = card;
 		queue->queue_no = i;
+		INIT_LIST_HEAD(&queue->pending_bufs);
+		spin_lock_init(&queue->lock);
 		timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
-
-		/* give outbound qeth_qdio_buffers their qdio_buffers */
-		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
-			WARN_ON(queue->bufs[j]);
-			if (qeth_init_qdio_out_buf(queue, j))
-				goto out_freeoutqbufs;
-		}
+		queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
+		queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
+		queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
 	}
 
 	/* completion */
@@ -2375,13 +2696,6 @@
 
 	return 0;
 
-out_freeoutqbufs:
-	while (j > 0) {
-		--j;
-		kmem_cache_free(qeth_qdio_outbuf_cache,
-				card->qdio.out_qs[i]->bufs[j]);
-		card->qdio.out_qs[i]->bufs[j] = NULL;
-	}
 out_freeoutq:
 	while (i > 0) {
 		qeth_free_output_queue(card->qdio.out_qs[--i]);
@@ -2405,7 +2719,6 @@
 		return;
 
 	qeth_free_cq(card);
-	cancel_delayed_work_sync(&card->buffer_reclaim_work);
 	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
 		if (card->qdio.in_q->bufs[j].rx_skb)
 			dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
@@ -2421,30 +2734,44 @@
 	}
 }
 
-static void qeth_create_qib_param_field(struct qeth_card *card,
-		char *param_field)
+static void qeth_fill_qib_parms(struct qeth_card *card,
+				struct qeth_qib_parms *parms)
 {
+	struct qeth_qdio_out_q *queue;
+	unsigned int i;
 
-	param_field[0] = _ascebc['P'];
-	param_field[1] = _ascebc['C'];
-	param_field[2] = _ascebc['I'];
-	param_field[3] = _ascebc['T'];
-	*((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
-	*((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
-	*((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
-}
+	parms->pcit_magic[0] = 'P';
+	parms->pcit_magic[1] = 'C';
+	parms->pcit_magic[2] = 'I';
+	parms->pcit_magic[3] = 'T';
+	ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
+	parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
+	parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
+	parms->pcit_c = QETH_PCI_TIMER_VALUE(card);
 
-static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
-		char *param_field)
-{
-	param_field[16] = _ascebc['B'];
-	param_field[17] = _ascebc['L'];
-	param_field[18] = _ascebc['K'];
-	param_field[19] = _ascebc['T'];
-	*((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
-	*((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
-	*((unsigned int *) (&param_field[28])) =
-		card->info.blkt.inter_packet_jumbo;
+	parms->blkt_magic[0] = 'B';
+	parms->blkt_magic[1] = 'L';
+	parms->blkt_magic[2] = 'K';
+	parms->blkt_magic[3] = 'T';
+	ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
+	parms->blkt_total = card->info.blkt.time_total;
+	parms->blkt_inter_packet = card->info.blkt.inter_packet;
+	parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
+
+	/* Prio-queueing implicitly uses the default priorities: */
+	if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
+		return;
+
+	parms->pque_magic[0] = 'P';
+	parms->pque_magic[1] = 'Q';
+	parms->pque_magic[2] = 'U';
+	parms->pque_magic[3] = 'E';
+	ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
+	parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
+	parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;
+
+	qeth_for_each_output_queue(card, queue, i)
+		parms->pque_priority[i] = queue->priority;
 }
 
 static int qeth_qdio_activate(struct qeth_card *card)
@@ -2526,7 +2853,7 @@
 	return 0;
 }
 
-void qeth_print_status_message(struct qeth_card *card)
+static void qeth_print_status_message(struct qeth_card *card)
 {
 	switch (card->info.type) {
 	case QETH_CARD_TYPE_OSD:
@@ -2542,7 +2869,7 @@
 				card->info.mcl_level[3]);
 			break;
 		}
-		/* fallthrough */
+		fallthrough;
 	case QETH_CARD_TYPE_IQD:
 		if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
 			card->info.mcl_level[0] = (char) _ebcasc[(__u8)
@@ -2567,7 +2894,6 @@
 		 (card->info.mcl_level[0]) ? ")" : "",
 		 qeth_get_cardname_short(card));
 }
-EXPORT_SYMBOL_GPL(qeth_print_status_message);
 
 static void qeth_initialize_working_pool_list(struct qeth_card *card)
 {
@@ -2584,19 +2910,16 @@
 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
 					struct qeth_card *card)
 {
-	struct list_head *plh;
 	struct qeth_buffer_pool_entry *entry;
 	int i, free;
-	struct page *page;
 
 	if (list_empty(&card->qdio.in_buf_pool.entry_list))
 		return NULL;
 
-	list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
-		entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
+	list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
 		free = 1;
 		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
-			if (page_count(virt_to_page(entry->elements[i])) > 1) {
+			if (page_count(entry->elements[i]) > 1) {
 				free = 0;
 				break;
 			}
@@ -2608,18 +2931,18 @@
 	}
 
 	/* no free buffer in pool so take first one and swap pages */
-	entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
-			struct qeth_buffer_pool_entry, list);
+	entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
+				 struct qeth_buffer_pool_entry, list);
 	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
-		if (page_count(virt_to_page(entry->elements[i])) > 1) {
-			page = alloc_page(GFP_ATOMIC);
-			if (!page) {
+		if (page_count(entry->elements[i]) > 1) {
+			struct page *page = dev_alloc_page();
+
+			if (!page)
 				return NULL;
-			} else {
-				free_page((unsigned long)entry->elements[i]);
-				entry->elements[i] = page_address(page);
-				QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
-			}
+
+			__free_page(entry->elements[i]);
+			entry->elements[i] = page;
+			QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
 		}
 	}
 	list_del_init(&entry->list);
@@ -2629,19 +2952,24 @@
 static int qeth_init_input_buffer(struct qeth_card *card,
 		struct qeth_qdio_buffer *buf)
 {
-	struct qeth_buffer_pool_entry *pool_entry;
+	struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
 	int i;
 
 	if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
 		buf->rx_skb = netdev_alloc_skb(card->dev,
-					       QETH_RX_PULL_LEN + ETH_HLEN);
+					       ETH_HLEN +
+					       sizeof(struct ipv6hdr));
 		if (!buf->rx_skb)
 			return -ENOMEM;
 	}
 
-	pool_entry = qeth_find_free_buffer_pool_entry(card);
-	if (!pool_entry)
-		return -ENOBUFS;
+	if (!pool_entry) {
+		pool_entry = qeth_find_free_buffer_pool_entry(card);
+		if (!pool_entry)
+			return -ENOBUFS;
+
+		buf->pool_entry = pool_entry;
+	}
 
 	/*
 	 * since the buffer is accessed only from the input_tasklet
@@ -2649,11 +2977,10 @@
 	 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
 	 * buffers
 	 */
-
-	buf->pool_entry = pool_entry;
 	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
 		buf->buffer->element[i].length = PAGE_SIZE;
-		buf->buffer->element[i].addr =  pool_entry->elements[i];
+		buf->buffer->element[i].addr =
+			page_to_phys(pool_entry->elements[i]);
 		if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
 			buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
 		else
@@ -2663,8 +2990,21 @@
 	return 0;
 }
 
-int qeth_init_qdio_queues(struct qeth_card *card)
+static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
+					    struct qeth_qdio_out_q *queue)
 {
+	if (!IS_IQD(card) ||
+	    qeth_iqd_is_mcast_queue(card, queue) ||
+	    card->options.cq == QETH_CQ_ENABLED ||
+	    qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
+		return 1;
+
+	return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
+}
+
+static int qeth_init_qdio_queues(struct qeth_card *card)
+{
+	unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
 	unsigned int i;
 	int rc;
 
@@ -2676,16 +3016,14 @@
 
 	qeth_initialize_working_pool_list(card);
 	/*give only as many buffers to hardware as we have buffer pool entries*/
-	for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) {
+	for (i = 0; i < rx_bufs; i++) {
 		rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
 		if (rc)
 			return rc;
 	}
 
-	card->qdio.in_q->next_buf_to_init =
-		card->qdio.in_buf_pool.buf_count - 1;
-	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
-		     card->qdio.in_buf_pool.buf_count - 1);
+	card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
+	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs);
 	if (rc) {
 		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
 		return rc;
@@ -2706,15 +3044,16 @@
 		queue->next_buf_to_fill = 0;
 		queue->do_pack = 0;
 		queue->prev_hdr = NULL;
+		queue->coalesced_frames = 0;
 		queue->bulk_start = 0;
+		queue->bulk_count = 0;
+		queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
 		atomic_set(&queue->used_buffers, 0);
 		atomic_set(&queue->set_pci_flags_count, 0);
-		atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
 		netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
 	}
 	return 0;
 }
-EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
 
 static void qeth_ipa_finalize_cmd(struct qeth_card *card,
 				  struct qeth_cmd_buffer *iob)
@@ -2726,7 +3065,9 @@
 }
 
 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
-			  u16 cmd_length)
+			  u16 cmd_length,
+			  bool (*match)(struct qeth_cmd_buffer *iob,
+					struct qeth_cmd_buffer *reply))
 {
 	u8 prot_type = qeth_mpc_select_prot_type(card);
 	u16 total_length = iob->length;
@@ -2734,6 +3075,7 @@
 	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
 		       iob->data);
 	iob->finalize = qeth_ipa_finalize_cmd;
+	iob->match = match;
 
 	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
 	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
@@ -2746,12 +3088,19 @@
 }
 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
 
+static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
+				 struct qeth_cmd_buffer *reply)
+{
+	struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
+
+	return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
+}
+
 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
 					   enum qeth_ipa_cmds cmd_code,
 					   enum qeth_prot_versions prot,
 					   unsigned int data_length)
 {
-	enum qeth_link_types link_type = card->info.link_type;
 	struct qeth_cmd_buffer *iob;
 	struct qeth_ipacmd_hdr *hdr;
 
@@ -2761,13 +3110,13 @@
 	if (!iob)
 		return NULL;
 
-	qeth_prepare_ipa_cmd(card, iob, data_length);
+	qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
 
 	hdr = &__ipa_cmd(iob)->hdr;
 	hdr->command = cmd_code;
 	hdr->initiator = IPA_CMD_INITIATOR_HOST;
 	/* hdr->seqno is set by qeth_send_control_data() */
-	hdr->adapter_type = (link_type == QETH_LINK_TYPE_HSTR) ? 2 : 1;
+	hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
 	hdr->rel_adapter_no = (u8) card->dev->dev_port;
 	hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
 	hdr->param_count = 1;
@@ -2850,18 +3199,22 @@
 		struct qeth_reply *reply, unsigned long data)
 {
 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+	struct qeth_query_cmds_supp *query_cmd;
 
 	QETH_CARD_TEXT(card, 3, "quyadpcb");
 	if (qeth_setadpparms_inspect_rc(cmd))
 		return -EIO;
 
-	if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
-		card->info.link_type =
-		      cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
+	query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
+	if (query_cmd->lan_type & 0x7f) {
+		if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
+			return -EPROTONOSUPPORT;
+
+		card->info.link_type = query_cmd->lan_type;
 		QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
 	}
-	card->options.adp.supported_funcs =
-		cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
+
+	card->options.adp.supported = query_cmd->supported_cmds;
 	return 0;
 }
 
@@ -2916,8 +3269,8 @@
 	case IPA_RC_NOTSUPP:
 	case IPA_RC_L2_UNSUPPORTED_CMD:
 		QETH_CARD_TEXT(card, 2, "ipaunsup");
-		card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
-		card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
+		card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
+		card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
 		return -EOPNOTSUPP;
 	default:
 		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
@@ -2925,13 +3278,11 @@
 		return -EIO;
 	}
 
-	if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
-		card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
-		card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
-	} else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
-		card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
-		card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
-	} else
+	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+		card->options.ipa4 = cmd->hdr.assists;
+	else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+		card->options.ipa6 = cmd->hdr.assists;
+	else
 		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
 				 CARD_DEVID(card));
 	return 0;
@@ -3055,7 +3406,6 @@
 		memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
 	}
 	free_page(info);
-	return;
 }
 
 static int qeth_hw_trap_cb(struct qeth_card *card,
@@ -3099,7 +3449,6 @@
 	}
 	return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
 }
-EXPORT_SYMBOL_GPL(qeth_hw_trap);
 
 static int qeth_check_qdio_errors(struct qeth_card *card,
 				  struct qdio_buffer *buf,
@@ -3114,7 +3463,7 @@
 			       buf->element[14].sflags);
 		QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
 		if ((buf->element[15].sflags) == 0x12) {
-			QETH_CARD_STAT_INC(card, rx_dropped);
+			QETH_CARD_STAT_INC(card, rx_fifo_errors);
 			return 0;
 		} else
 			return 1;
@@ -3122,26 +3471,21 @@
 	return 0;
 }
 
-static void qeth_queue_input_buffer(struct qeth_card *card, int index)
+static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
+					 unsigned int count)
 {
 	struct qeth_qdio_q *queue = card->qdio.in_q;
 	struct list_head *lh;
-	int count;
 	int i;
 	int rc;
 	int newcount = 0;
 
-	count = (index < queue->next_buf_to_init)?
-		card->qdio.in_buf_pool.buf_count -
-		(queue->next_buf_to_init - index) :
-		card->qdio.in_buf_pool.buf_count -
-		(queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
 	/* only requeue at a certain threshold to avoid SIGAs */
 	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
 		for (i = queue->next_buf_to_init;
 		     i < queue->next_buf_to_init + count; ++i) {
 			if (qeth_init_input_buffer(card,
-				&queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
+				&queue->bufs[QDIO_BUFNR(i)])) {
 				break;
 			} else {
 				newcount++;
@@ -3163,38 +3507,36 @@
 				i++;
 			if (i == card->qdio.in_buf_pool.buf_count) {
 				QETH_CARD_TEXT(card, 2, "qsarbw");
-				card->reclaim_index = index;
 				schedule_delayed_work(
 					&card->buffer_reclaim_work,
 					QETH_RECLAIM_WORK_TIME);
 			}
-			return;
+			return 0;
 		}
 
-		/*
-		 * according to old code it should be avoided to requeue all
-		 * 128 buffers in order to benefit from PCI avoidance.
-		 * this function keeps at least one buffer (the buffer at
-		 * 'index') un-requeued -> this buffer is the first buffer that
-		 * will be requeued the next time
-		 */
 		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
 			     queue->next_buf_to_init, count);
 		if (rc) {
 			QETH_CARD_TEXT(card, 2, "qinberr");
 		}
-		queue->next_buf_to_init = (queue->next_buf_to_init + count) %
-					  QDIO_MAX_BUFFERS_PER_Q;
+		queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
+						     count);
+		return count;
 	}
+
+	return 0;
 }
 
 static void qeth_buffer_reclaim_work(struct work_struct *work)
 {
-	struct qeth_card *card = container_of(work, struct qeth_card,
-		buffer_reclaim_work.work);
+	struct qeth_card *card = container_of(to_delayed_work(work),
+					      struct qeth_card,
+					      buffer_reclaim_work);
 
-	QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
-	qeth_queue_input_buffer(card, card->reclaim_index);
+	local_bh_disable();
+	napi_schedule(&card->napi);
+	/* kick-start the NAPI softirq: */
+	local_bh_enable();
 }
 
 static void qeth_handle_send_error(struct qeth_card *card,
@@ -3232,7 +3574,7 @@
 		/* it's a packing buffer */
 		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
 		queue->next_buf_to_fill =
-			(queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
+			QDIO_BUFNR(queue->next_buf_to_fill + 1);
 		return 1;
 	}
 	return 0;
@@ -3279,24 +3621,31 @@
 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
 			       int count)
 {
+	struct qeth_qdio_out_buffer *buf = queue->bufs[index];
+	unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
 	struct qeth_card *card = queue->card;
-	struct qeth_qdio_out_buffer *buf;
 	int rc;
 	int i;
-	unsigned int qdio_flags;
 
 	for (i = index; i < index + count; ++i) {
-		int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
+		unsigned int bidx = QDIO_BUFNR(i);
+		struct sk_buff *skb;
+
 		buf = queue->bufs[bidx];
 		buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
 				SBAL_EFLAGS_LAST_ENTRY;
+		queue->coalesced_frames += buf->frames;
 
 		if (queue->bufstates)
 			queue->bufstates[bidx].user = buf;
 
-		if (IS_IQD(queue->card))
-			continue;
+		if (IS_IQD(card)) {
+			skb_queue_walk(&buf->skb_list, skb)
+				skb_tx_timestamp(skb);
+		}
+	}
 
+	if (!IS_IQD(card)) {
 		if (!queue->do_pack) {
 			if ((atomic_read(&queue->used_buffers) >=
 				(QETH_HIGH_WATERMARK_PACK -
@@ -3321,17 +3670,28 @@
 				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
 			}
 		}
+
+		if (atomic_read(&queue->set_pci_flags_count))
+			qdio_flags |= QDIO_FLAG_PCI_OUT;
 	}
 
-	qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
-	if (atomic_read(&queue->set_pci_flags_count))
-		qdio_flags |= QDIO_FLAG_PCI_OUT;
+	QETH_TXQ_STAT_INC(queue, doorbell);
 	rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
 		     queue->queue_no, index, count);
 
 	/* Fake the TX completion interrupt: */
-	if (IS_IQD(card))
-		napi_schedule(&queue->napi);
+	if (IS_IQD(card)) {
+		unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
+		unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
+
+		if (frames && queue->coalesced_frames >= frames) {
+			napi_schedule(&queue->napi);
+			queue->coalesced_frames = 0;
+			QETH_TXQ_STAT_INC(queue, coal_frames);
+		} else if (usecs) {
+			qeth_tx_arm_timer(queue, usecs);
+		}
+	}
 
 	if (rc) {
 		/* ignore temporary SIGA errors without busy condition */
@@ -3352,55 +3712,48 @@
 
 static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
 {
-	qeth_flush_buffers(queue, queue->bulk_start, 1);
+	qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
 
-	queue->bulk_start = QDIO_BUFNR(queue->bulk_start + 1);
+	queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
 	queue->prev_hdr = NULL;
+	queue->bulk_count = 0;
 }
 
 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
 {
-	int index;
-	int flush_cnt = 0;
-	int q_was_packing = 0;
-
 	/*
 	 * check if weed have to switch to non-packing mode or if
 	 * we have to get a pci flag out on the queue
 	 */
 	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
 	    !atomic_read(&queue->set_pci_flags_count)) {
-		if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
-				QETH_OUT_Q_UNLOCKED) {
-			/*
-			 * If we get in here, there was no action in
-			 * do_send_packet. So, we check if there is a
-			 * packing buffer to be flushed here.
-			 */
-			index = queue->next_buf_to_fill;
-			q_was_packing = queue->do_pack;
-			/* queue->do_pack may change */
-			barrier();
-			flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
-			if (!flush_cnt &&
-			    !atomic_read(&queue->set_pci_flags_count))
-				flush_cnt += qeth_prep_flush_pack_buffer(queue);
+		unsigned int index, flush_cnt;
+		bool q_was_packing;
+
+		spin_lock(&queue->lock);
+
+		index = queue->next_buf_to_fill;
+		q_was_packing = queue->do_pack;
+
+		flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
+		if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
+			flush_cnt = qeth_prep_flush_pack_buffer(queue);
+
+		if (flush_cnt) {
+			qeth_flush_buffers(queue, index, flush_cnt);
 			if (q_was_packing)
 				QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
-			if (flush_cnt)
-				qeth_flush_buffers(queue, index, flush_cnt);
-			atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
 		}
+
+		spin_unlock(&queue->lock);
 	}
 }
 
-static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
-				 unsigned long card_ptr)
+static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
 {
 	struct qeth_card *card = (struct qeth_card *)card_ptr;
 
-	if (card->dev->flags & IFF_UP)
-		napi_schedule(&card->napi);
+	napi_schedule_irqoff(&card->napi);
 }
 
 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
@@ -3434,9 +3787,6 @@
 	int i;
 	int rc;
 
-	if (!qeth_is_cq(card, queue))
-		return;
-
 	QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
 	QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
 	QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
@@ -3448,15 +3798,13 @@
 	}
 
 	for (i = first_element; i < first_element + count; ++i) {
-		int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
-		struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
+		struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
 		int e = 0;
 
 		while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
 		       buffer->element[e].addr) {
-			unsigned long phys_aob_addr;
+			unsigned long phys_aob_addr = buffer->element[e].addr;
 
-			phys_aob_addr = (unsigned long) buffer->element[e].addr;
 			qeth_qdio_handle_aob(card, phys_aob_addr);
 			++e;
 		}
@@ -3470,8 +3818,8 @@
 			"QDIO reported an error, rc=%i\n", rc);
 		QETH_CARD_TEXT(card, 2, "qcqherr");
 	}
-	card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
-				   + count) % QDIO_MAX_BUFFERS_PER_Q;
+
+	cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
 }
 
 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
@@ -3484,9 +3832,7 @@
 	QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
 	QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
 
-	if (qeth_is_cq(card, queue))
-		qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
-	else if (qdio_err)
+	if (qdio_err)
 		qeth_schedule_recovery(card);
 }
 
@@ -3497,7 +3843,6 @@
 {
 	struct qeth_card *card        = (struct qeth_card *) card_ptr;
 	struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
-	struct qeth_qdio_out_buffer *buffer;
 	struct net_device *dev = card->dev;
 	struct netdev_queue *txq;
 	int i;
@@ -3511,10 +3856,10 @@
 	}
 
 	for (i = first_element; i < (first_element + count); ++i) {
-		int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
-		buffer = queue->bufs[bidx];
-		qeth_handle_send_error(card, buffer, qdio_error);
-		qeth_clear_output_buffer(queue, buffer, qdio_error, 0);
+		struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
+
+		qeth_handle_send_error(card, buf, qdio_error);
+		qeth_clear_output_buffer(queue, buf, qdio_error, 0);
 	}
 
 	atomic_sub(count, &queue->used_buffers);
@@ -3570,6 +3915,8 @@
 			return ~ntohs(veth->h_vlan_TCI) >>
 			       (VLAN_PRIO_SHIFT + 1) & 3;
 		break;
+	case QETH_PRIO_Q_ING_FIXED:
+		return card->qdio.default_out_queue;
 	default:
 		break;
 	}
@@ -3642,6 +3989,7 @@
 			      unsigned int hdr_len, unsigned int proto_len,
 			      unsigned int *elements)
 {
+	gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
 	const unsigned int contiguous = proto_len ? proto_len : 1;
 	const unsigned int max_elements = queue->max_elements;
 	unsigned int __elements;
@@ -3697,10 +4045,11 @@
 		*hdr = skb_push(skb, hdr_len);
 		return hdr_len;
 	}
-	/* fall back */
+
+	/* Fall back to cache element with known-good alignment: */
 	if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
 		return -E2BIG;
-	*hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
+	*hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
 	if (!*hdr)
 		return -ENOMEM;
 	/* Copy protocol headers behind HW header: */
@@ -3709,10 +4058,10 @@
 }
 
 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
-			      struct qeth_qdio_out_buffer *buffer,
 			      struct sk_buff *curr_skb,
 			      struct qeth_hdr *curr_hdr)
 {
+	struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
 	struct qeth_hdr *prev_hdr = queue->prev_hdr;
 
 	if (!prev_hdr)
@@ -3731,22 +4080,54 @@
 	       qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
 }
 
-static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
-				       struct qeth_qdio_out_buffer *buf,
-				       bool is_first_elem, unsigned int offset)
+/**
+ * qeth_fill_buffer() - map skb into an output buffer
+ * @buf:	buffer to transport the skb
+ * @skb:	skb to map into the buffer
+ * @hdr:	qeth_hdr for this skb. Either at skb->data, or allocated
+ *		from qeth_core_header_cache.
+ * @offset:	when mapping the skb, start at skb->data + offset
+ * @hd_len:	if > 0, build a dedicated header element of this size
+ */
+static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
+				     struct sk_buff *skb, struct qeth_hdr *hdr,
+				     unsigned int offset, unsigned int hd_len)
 {
 	struct qdio_buffer *buffer = buf->buffer;
 	int element = buf->next_element_to_fill;
 	int length = skb_headlen(skb) - offset;
 	char *data = skb->data + offset;
 	unsigned int elem_length, cnt;
+	bool is_first_elem = true;
+
+	__skb_queue_tail(&buf->skb_list, skb);
+
+	/* build dedicated element for HW Header */
+	if (hd_len) {
+		is_first_elem = false;
+
+		buffer->element[element].addr = virt_to_phys(hdr);
+		buffer->element[element].length = hd_len;
+		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
+
+		/* HW header is allocated from cache: */
+		if ((void *)hdr != skb->data)
+			buf->is_header[element] = 1;
+		/* HW header was pushed and is contiguous with linear part: */
+		else if (length > 0 && !PAGE_ALIGNED(data) &&
+			 (data == (char *)hdr + hd_len))
+			buffer->element[element].eflags |=
+				SBAL_EFLAGS_CONTIGUOUS;
+
+		element++;
+	}
 
 	/* map linear part into buffer element(s) */
 	while (length > 0) {
 		elem_length = min_t(unsigned int, length,
 				    PAGE_SIZE - offset_in_page(data));
 
-		buffer->element[element].addr = data;
+		buffer->element[element].addr = virt_to_phys(data);
 		buffer->element[element].length = elem_length;
 		length -= elem_length;
 		if (is_first_elem) {
@@ -3776,7 +4157,7 @@
 			elem_length = min_t(unsigned int, length,
 					    PAGE_SIZE - offset_in_page(data));
 
-			buffer->element[element].addr = data;
+			buffer->element[element].addr = virt_to_phys(data);
 			buffer->element[element].length = elem_length;
 			buffer->element[element].eflags =
 				SBAL_EFLAGS_MIDDLE_FRAG;
@@ -3793,52 +4174,19 @@
 	return element;
 }
 
-/**
- * qeth_fill_buffer() - map skb into an output buffer
- * @buf:	buffer to transport the skb
- * @skb:	skb to map into the buffer
- * @hdr:	qeth_hdr for this skb. Either at skb->data, or allocated
- *		from qeth_core_header_cache.
- * @offset:	when mapping the skb, start at skb->data + offset
- * @hd_len:	if > 0, build a dedicated header element of this size
- */
-static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
-				     struct sk_buff *skb, struct qeth_hdr *hdr,
-				     unsigned int offset, unsigned int hd_len)
-{
-	struct qdio_buffer *buffer = buf->buffer;
-	bool is_first_elem = true;
-
-	__skb_queue_tail(&buf->skb_list, skb);
-
-	/* build dedicated header element */
-	if (hd_len) {
-		int element = buf->next_element_to_fill;
-		is_first_elem = false;
-
-		buffer->element[element].addr = hdr;
-		buffer->element[element].length = hd_len;
-		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
-		/* remember to free cache-allocated qeth_hdr: */
-		buf->is_header[element] = ((void *)hdr != skb->data);
-		buf->next_element_to_fill++;
-	}
-
-	return __qeth_fill_buffer(skb, buf, is_first_elem, offset);
-}
-
 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
 		       struct sk_buff *skb, unsigned int elements,
 		       struct qeth_hdr *hdr, unsigned int offset,
 		       unsigned int hd_len)
 {
-	struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
 	unsigned int bytes = qdisc_pkt_len(skb);
+	struct qeth_qdio_out_buffer *buffer;
 	unsigned int next_element;
 	struct netdev_queue *txq;
 	bool stopped = false;
 	bool flush;
 
+	buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
 	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
 
 	/* Just a sanity check, the wake/stop logic should ensure that we always
@@ -3847,11 +4195,23 @@
 	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
 		return -EBUSY;
 
-	if ((buffer->next_element_to_fill + elements > queue->max_elements) ||
-	    !qeth_iqd_may_bulk(queue, buffer, skb, hdr)) {
-		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
-		qeth_flush_queue(queue);
-		buffer = queue->bufs[queue->bulk_start];
+	flush = !qeth_iqd_may_bulk(queue, skb, hdr);
+
+	if (flush ||
+	    (buffer->next_element_to_fill + elements > queue->max_elements)) {
+		if (buffer->next_element_to_fill > 0) {
+			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+			queue->bulk_count++;
+		}
+
+		if (queue->bulk_count >= queue->bulk_max)
+			flush = true;
+
+		if (flush)
+			qeth_flush_queue(queue);
+
+		buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
+						queue->bulk_count)];
 
 		/* Sanity-check again: */
 		if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
@@ -3870,6 +4230,7 @@
 
 	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
 	buffer->bytes += bytes;
+	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
 	queue->prev_hdr = hdr;
 
 	flush = __netdev_tx_sent_queue(txq, bytes,
@@ -3877,7 +4238,13 @@
 
 	if (flush || next_element >= queue->max_elements) {
 		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
-		qeth_flush_queue(queue);
+		queue->bulk_count++;
+
+		if (queue->bulk_count >= queue->bulk_max)
+			flush = true;
+
+		if (flush)
+			qeth_flush_queue(queue);
 	}
 
 	if (stopped && !qeth_out_queue_is_full(queue))
@@ -3890,29 +4257,22 @@
 			unsigned int offset, unsigned int hd_len,
 			int elements_needed)
 {
+	unsigned int start_index = queue->next_buf_to_fill;
 	struct qeth_qdio_out_buffer *buffer;
 	unsigned int next_element;
 	struct netdev_queue *txq;
 	bool stopped = false;
-	int start_index;
 	int flush_count = 0;
 	int do_pack = 0;
-	int tmp;
 	int rc = 0;
 
-	/* spin until we get the queue ... */
-	while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
-			      QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
-	start_index = queue->next_buf_to_fill;
 	buffer = queue->bufs[queue->next_buf_to_fill];
 
 	/* Just a sanity check, the wake/stop logic should ensure that we always
 	 * get a free buffer.
 	 */
-	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
-		atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
 		return -EBUSY;
-	}
 
 	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
 
@@ -3927,8 +4287,7 @@
 			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
 			flush_count++;
 			queue->next_buf_to_fill =
-				(queue->next_buf_to_fill + 1) %
-				QDIO_MAX_BUFFERS_PER_Q;
+				QDIO_BUFNR(queue->next_buf_to_fill + 1);
 			buffer = queue->bufs[queue->next_buf_to_fill];
 
 			/* We stepped forward, so sanity-check again: */
@@ -3936,8 +4295,6 @@
 			    QETH_QDIO_BUF_EMPTY) {
 				qeth_flush_buffers(queue, start_index,
 							   flush_count);
-				atomic_set(&queue->state,
-						QETH_OUT_Q_UNLOCKED);
 				rc = -EBUSY;
 				goto out;
 			}
@@ -3955,43 +4312,22 @@
 	}
 
 	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
+	buffer->bytes += qdisc_pkt_len(skb);
+	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
 
 	if (queue->do_pack)
 		QETH_TXQ_STAT_INC(queue, skbs_pack);
 	if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
 		flush_count++;
 		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
-		queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
-					  QDIO_MAX_BUFFERS_PER_Q;
+		queue->next_buf_to_fill =
+				QDIO_BUFNR(queue->next_buf_to_fill + 1);
 	}
 
 	if (flush_count)
 		qeth_flush_buffers(queue, start_index, flush_count);
-	else if (!atomic_read(&queue->set_pci_flags_count))
-		atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
-	/*
-	 * queue->state will go from LOCKED -> UNLOCKED or from
-	 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
-	 * (switch packing state or flush buffer to get another pci flag out).
-	 * In that case we will enter this loop
-	 */
-	while (atomic_dec_return(&queue->state)) {
-		start_index = queue->next_buf_to_fill;
-		/* check if we can go back to non-packing state */
-		tmp = qeth_switch_to_nonpacking_if_needed(queue);
-		/*
-		 * check if we need to flush a packing buffer to get a pci
-		 * flag out on the queue
-		 */
-		if (!tmp && !atomic_read(&queue->set_pci_flags_count))
-			tmp = qeth_prep_flush_pack_buffer(queue);
-		if (tmp) {
-			qeth_flush_buffers(queue, start_index, tmp);
-			flush_count += tmp;
-		}
-	}
+
 out:
-	/* at this point the queue is UNLOCKED again */
 	if (do_pack)
 		QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
 
@@ -4065,8 +4401,10 @@
 	} else {
 		/* TODO: drop skb_orphan() once TX completion is fast enough */
 		skb_orphan(skb);
+		spin_lock(&queue->lock);
 		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
 					 hd_len, elements);
+		spin_unlock(&queue->lock);
 	}
 
 	if (rc && !push_len)
@@ -4163,7 +4501,6 @@
 {
 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
 	struct qeth_set_access_ctrl *access_ctrl_req;
-	int fallback = *(int *)reply->param;
 
 	QETH_CARD_TEXT(card, 4, "setaccb");
 
@@ -4177,70 +4514,54 @@
 				 cmd->data.setadapterparms.hdr.return_code);
 	switch (qeth_setadpparms_inspect_rc(cmd)) {
 	case SET_ACCESS_CTRL_RC_SUCCESS:
-		if (card->options.isolation == ISOLATION_MODE_NONE) {
+		if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
 			dev_info(&card->gdev->dev,
 			    "QDIO data connection isolation is deactivated\n");
-		} else {
+		else
 			dev_info(&card->gdev->dev,
 			    "QDIO data connection isolation is activated\n");
-		}
-		break;
+		return 0;
 	case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
 		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
 				 CARD_DEVID(card));
-		if (fallback)
-			card->options.isolation = card->options.prev_isolation;
-		break;
+		return 0;
 	case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
 		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
 				 CARD_DEVID(card));
-		if (fallback)
-			card->options.isolation = card->options.prev_isolation;
-		break;
+		return 0;
 	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
 		dev_err(&card->gdev->dev, "Adapter does not "
 			"support QDIO data connection isolation\n");
-		break;
+		return -EOPNOTSUPP;
 	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
 		dev_err(&card->gdev->dev,
 			"Adapter is dedicated. "
 			"QDIO data connection isolation not supported\n");
-		if (fallback)
-			card->options.isolation = card->options.prev_isolation;
-		break;
+		return -EOPNOTSUPP;
 	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
 		dev_err(&card->gdev->dev,
 			"TSO does not permit QDIO data connection isolation\n");
-		if (fallback)
-			card->options.isolation = card->options.prev_isolation;
-		break;
+		return -EPERM;
 	case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
 		dev_err(&card->gdev->dev, "The adjacent switch port does not "
 			"support reflective relay mode\n");
-		if (fallback)
-			card->options.isolation = card->options.prev_isolation;
-		break;
+		return -EOPNOTSUPP;
 	case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
 		dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
 					"enabled at the adjacent switch port");
-		if (fallback)
-			card->options.isolation = card->options.prev_isolation;
-		break;
+		return -EREMOTEIO;
 	case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
 		dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
 					"at the adjacent switch failed\n");
-		break;
+		/* benign error while disabling ISOLATION_MODE_FWD */
+		return 0;
 	default:
-		/* this should never happen */
-		if (fallback)
-			card->options.isolation = card->options.prev_isolation;
-		break;
+		return -EIO;
 	}
-	return (cmd->hdr.return_code) ? -EIO : 0;
 }
 
-static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
-		enum qeth_ipa_isolation_modes isolation, int fallback)
+int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
+				     enum qeth_ipa_isolation_modes mode)
 {
 	int rc;
 	struct qeth_cmd_buffer *iob;
@@ -4249,47 +4570,32 @@
 
 	QETH_CARD_TEXT(card, 4, "setacctl");
 
+	if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
+		dev_err(&card->gdev->dev,
+			"Adapter does not support QDIO data connection isolation\n");
+		return -EOPNOTSUPP;
+	}
+
 	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
 				   SETADP_DATA_SIZEOF(set_access_ctrl));
 	if (!iob)
 		return -ENOMEM;
 	cmd = __ipa_cmd(iob);
 	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
-	access_ctrl_req->subcmd_code = isolation;
+	access_ctrl_req->subcmd_code = mode;
 
 	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
-			       &fallback);
-	QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
-	return rc;
-}
-
-int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
-{
-	int rc = 0;
-
-	QETH_CARD_TEXT(card, 4, "setactlo");
-
-	if ((IS_OSD(card) || IS_OSX(card)) &&
-	    qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
-		rc = qeth_setadpparms_set_access_ctrl(card,
-			card->options.isolation, fallback);
-		if (rc) {
-			QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
-					 rc, CARD_DEVID(card));
-			rc = -EOPNOTSUPP;
-		}
-	} else if (card->options.isolation != ISOLATION_MODE_NONE) {
-		card->options.isolation = ISOLATION_MODE_NONE;
-
-		dev_err(&card->gdev->dev, "Adapter does not "
-			"support QDIO data connection isolation\n");
-		rc = -EOPNOTSUPP;
+			       NULL);
+	if (rc) {
+		QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
+		QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
+				 rc, CARD_DEVID(card));
 	}
+
 	return rc;
 }
-EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
 
-void qeth_tx_timeout(struct net_device *dev)
+void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
 {
 	struct qeth_card *card;
 
@@ -4342,7 +4648,9 @@
 	case MII_NWAYTEST: /* N-way auto-neg test register */
 		break;
 	case MII_RERRCOUNTER: /* rx error counter */
-		rc = card->stats.rx_errors;
+		rc = card->stats.rx_length_errors +
+		     card->stats.rx_frame_errors +
+		     card->stats.rx_fifo_errors;
 		break;
 	case MII_SREVISION: /* silicon revision */
 		break;
@@ -4471,26 +4779,24 @@
 }
 
 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
-		struct qeth_reply *reply, unsigned long data)
+					 struct qeth_reply *reply,
+					 unsigned long data)
 {
 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
-	struct qeth_qoat_priv *priv;
-	char *resdata;
+	struct qeth_qoat_priv *priv = reply->param;
 	int resdatalen;
 
 	QETH_CARD_TEXT(card, 3, "qoatcb");
 	if (qeth_setadpparms_inspect_rc(cmd))
 		return -EIO;
 
-	priv = (struct qeth_qoat_priv *)reply->param;
 	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
-	resdata = (char *)data + 28;
 
 	if (resdatalen > (priv->buffer_len - priv->response_len))
 		return -ENOSPC;
 
-	memcpy((priv->buffer + priv->response_len), resdata,
-		resdatalen);
+	memcpy(priv->buffer + priv->response_len,
+	       &cmd->data.setadapterparms.hdr, resdatalen);
 	priv->response_len += resdatalen;
 
 	if (cmd->data.setadapterparms.hdr.seq_no <
@@ -4511,24 +4817,17 @@
 
 	QETH_CARD_TEXT(card, 3, "qoatcmd");
 
-	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
-		rc = -EOPNOTSUPP;
-		goto out;
-	}
+	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
+		return -EOPNOTSUPP;
 
-	if (copy_from_user(&oat_data, udata,
-	    sizeof(struct qeth_query_oat_data))) {
-			rc = -EFAULT;
-			goto out;
-	}
+	if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
+		return -EFAULT;
 
 	priv.buffer_len = oat_data.buffer_len;
 	priv.response_len = 0;
 	priv.buffer = vzalloc(oat_data.buffer_len);
-	if (!priv.buffer) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!priv.buffer)
+		return -ENOMEM;
 
 	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
 				   SETADP_DATA_SIZEOF(query_oat));
@@ -4540,30 +4839,19 @@
 	oat_req = &cmd->data.setadapterparms.data.query_oat;
 	oat_req->subcmd_code = oat_data.command;
 
-	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
-			       &priv);
+	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
 	if (!rc) {
-		if (is_compat_task())
-			tmp = compat_ptr(oat_data.ptr);
-		else
-			tmp = (void __user *)(unsigned long)oat_data.ptr;
-
-		if (copy_to_user(tmp, priv.buffer,
-		    priv.response_len)) {
-			rc = -EFAULT;
-			goto out_free;
-		}
-
+		tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
+					 u64_to_user_ptr(oat_data.ptr);
 		oat_data.response_len = priv.response_len;
 
-		if (copy_to_user(udata, &oat_data,
-		    sizeof(struct qeth_query_oat_data)))
+		if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
+		    copy_to_user(udata, &oat_data, sizeof(oat_data)))
 			rc = -EFAULT;
 	}
 
 out_free:
 	vfree(priv.buffer);
-out:
 	return rc;
 }
 
@@ -4612,7 +4900,6 @@
 {
 	struct diag26c_mac_resp *response;
 	struct diag26c_mac_req *request;
-	struct ccw_dev_id id;
 	int rc;
 
 	QETH_CARD_TEXT(card, 2, "vmreqmac");
@@ -4624,11 +4911,10 @@
 		goto out;
 	}
 
-	ccw_device_get_id(CARD_DDEV(card), &id);
 	request->resp_buf_len = sizeof(*response);
 	request->resp_version = DIAG26C_VERSION2;
 	request->op_code = DIAG26C_GET_MAC;
-	request->devno = id.devno;
+	request->devno = card->info.ddev_devno;
 
 	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
 	rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
@@ -4668,7 +4954,7 @@
 	QETH_CARD_TEXT(card, 2, "detcapab");
 	if (!ddev->online) {
 		ddev_offline = 1;
-		rc = ccw_device_set_online(ddev);
+		rc = qeth_start_channel(channel);
 		if (rc) {
 			QETH_CARD_TEXT_(card, 2, "3err%d", rc);
 			goto out;
@@ -4701,7 +4987,6 @@
 		card->options.cq = QETH_CQ_NOTAVAILABLE;
 	}
 
-
 out_offline:
 	if (ddev_offline == 1)
 		qeth_stop_channel(channel);
@@ -4709,94 +4994,69 @@
 	return;
 }
 
-static void qeth_qdio_establish_cq(struct qeth_card *card,
-				   struct qdio_buffer **in_sbal_ptrs,
-				   void (**queue_start_poll)
-					(struct ccw_device *, int,
-					 unsigned long))
+static void qeth_read_ccw_conf_data(struct qeth_card *card)
 {
-	int i;
+	struct qeth_card_info *info = &card->info;
+	struct ccw_device *cdev = CARD_DDEV(card);
+	struct ccw_dev_id dev_id;
 
-	if (card->options.cq == QETH_CQ_ENABLED) {
-		int offset = QDIO_MAX_BUFFERS_PER_Q *
-			     (card->qdio.no_in_queues - 1);
+	QETH_CARD_TEXT(card, 2, "ccwconfd");
+	ccw_device_get_id(cdev, &dev_id);
 
-		for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
-			in_sbal_ptrs[offset + i] =
-				card->qdio.c_q->bufs[i].buffer;
+	info->ddev_devno = dev_id.devno;
+	info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
+			  !ccw_device_get_iid(cdev, &info->iid) &&
+			  !ccw_device_get_chid(cdev, 0, &info->chid);
+	info->ssid = dev_id.ssid;
 
-		queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
-	}
+	dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
+		 info->chid, info->chpid);
+
+	QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
+	QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
+	QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
+	QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
+	QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
+	QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
+	QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
 }
 
 static int qeth_qdio_establish(struct qeth_card *card)
 {
+	struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
+	struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
+	struct qeth_qib_parms *qib_parms = NULL;
 	struct qdio_initialize init_data;
-	char *qib_param_field;
-	struct qdio_buffer **in_sbal_ptrs;
-	void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
-	struct qdio_buffer **out_sbal_ptrs;
-	int i, j, k;
+	unsigned int i;
 	int rc = 0;
 
 	QETH_CARD_TEXT(card, 2, "qdioest");
 
-	qib_param_field = kzalloc(FIELD_SIZEOF(struct qib, parm), GFP_KERNEL);
-	if (!qib_param_field) {
-		rc =  -ENOMEM;
-		goto out_free_nothing;
+	if (!IS_IQD(card) && !IS_VM_NIC(card)) {
+		qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
+		if (!qib_parms)
+			return -ENOMEM;
+
+		qeth_fill_qib_parms(card, qib_parms);
 	}
 
-	qeth_create_qib_param_field(card, qib_param_field);
-	qeth_create_qib_param_field_blkt(card, qib_param_field);
+	in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
+	if (card->options.cq == QETH_CQ_ENABLED)
+		in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
 
-	in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q,
-			       sizeof(void *),
-			       GFP_KERNEL);
-	if (!in_sbal_ptrs) {
-		rc = -ENOMEM;
-		goto out_free_qib_param;
-	}
-
-	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
-		in_sbal_ptrs[i] = card->qdio.in_q->bufs[i].buffer;
-
-	queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
-				   GFP_KERNEL);
-	if (!queue_start_poll) {
-		rc = -ENOMEM;
-		goto out_free_in_sbals;
-	}
-	for (i = 0; i < card->qdio.no_in_queues; ++i)
-		queue_start_poll[i] = qeth_qdio_start_poll;
-
-	qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
-
-	out_sbal_ptrs =
-		kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
-			sizeof(void *),
-			GFP_KERNEL);
-	if (!out_sbal_ptrs) {
-		rc = -ENOMEM;
-		goto out_free_queue_start_poll;
-	}
-
-	for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
-		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++, k++)
-			out_sbal_ptrs[k] =
-				card->qdio.out_qs[i]->bufs[j]->buffer;
+	for (i = 0; i < card->qdio.no_out_queues; i++)
+		out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
 
 	memset(&init_data, 0, sizeof(struct qdio_initialize));
-	init_data.cdev                   = CARD_DDEV(card);
 	init_data.q_format		 = IS_IQD(card) ? QDIO_IQDIO_QFMT :
 							  QDIO_QETH_QFMT;
 	init_data.qib_param_field_format = 0;
-	init_data.qib_param_field        = qib_param_field;
+	init_data.qib_param_field	 = (void *)qib_parms;
 	init_data.no_input_qs            = card->qdio.no_in_queues;
 	init_data.no_output_qs           = card->qdio.no_out_queues;
 	init_data.input_handler		 = qeth_qdio_input_handler;
 	init_data.output_handler	 = qeth_qdio_output_handler;
-	init_data.queue_start_poll_array = queue_start_poll;
+	init_data.irq_poll		 = qeth_qdio_poll;
 	init_data.int_parm               = (unsigned long) card;
 	init_data.input_sbal_addr_array  = in_sbal_ptrs;
 	init_data.output_sbal_addr_array = out_sbal_ptrs;
@@ -4805,12 +5065,13 @@
 
 	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
 		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
-		rc = qdio_allocate(&init_data);
+		rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
+				   init_data.no_output_qs);
 		if (rc) {
 			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
 			goto out;
 		}
-		rc = qdio_establish(&init_data);
+		rc = qdio_establish(CARD_DDEV(card), &init_data);
 		if (rc) {
 			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
 			qdio_free(CARD_DDEV(card));
@@ -4827,33 +5088,25 @@
 	default:
 		break;
 	}
+
 out:
-	kfree(out_sbal_ptrs);
-out_free_queue_start_poll:
-	kfree(queue_start_poll);
-out_free_in_sbals:
-	kfree(in_sbal_ptrs);
-out_free_qib_param:
-	kfree(qib_param_field);
-out_free_nothing:
+	kfree(qib_parms);
 	return rc;
 }
 
 static void qeth_core_free_card(struct qeth_card *card)
 {
 	QETH_CARD_TEXT(card, 2, "freecrd");
-	qeth_clean_channel(&card->read);
-	qeth_clean_channel(&card->write);
-	qeth_clean_channel(&card->data);
+
+	unregister_service_level(&card->qeth_service_level);
+	debugfs_remove_recursive(card->debugfs);
 	qeth_put_cmd(card->read_cmd);
 	destroy_workqueue(card->event_wq);
-	qeth_free_qdio_queues(card);
-	unregister_service_level(&card->qeth_service_level);
 	dev_set_drvdata(&card->gdev->dev, NULL);
 	kfree(card);
 }
 
-void qeth_trace_features(struct qeth_card *card)
+static void qeth_trace_features(struct qeth_card *card)
 {
 	QETH_CARD_TEXT(card, 2, "features");
 	QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
@@ -4862,19 +5115,22 @@
 	QETH_CARD_HEX(card, 2, &card->info.diagass_support,
 		      sizeof(card->info.diagass_support));
 }
-EXPORT_SYMBOL_GPL(qeth_trace_features);
 
 static struct ccw_device_id qeth_ids[] = {
 	{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
 					.driver_info = QETH_CARD_TYPE_OSD},
 	{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
 					.driver_info = QETH_CARD_TYPE_IQD},
+#ifdef CONFIG_QETH_OSN
 	{CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
 					.driver_info = QETH_CARD_TYPE_OSN},
+#endif
 	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
 					.driver_info = QETH_CARD_TYPE_OSM},
+#ifdef CONFIG_QETH_OSX
 	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
 					.driver_info = QETH_CARD_TYPE_OSX},
+#endif
 	{},
 };
 MODULE_DEVICE_TABLE(ccw, qeth_ids);
@@ -4889,7 +5145,7 @@
 	.remove = ccwgroup_remove_ccwdev,
 };
 
-int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
+static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
 {
 	int retries = 3;
 	int rc;
@@ -4908,13 +5164,14 @@
 	qeth_stop_channel(&card->write);
 	qeth_stop_channel(&card->read);
 	qdio_free(CARD_DDEV(card));
-	rc = ccw_device_set_online(CARD_RDEV(card));
+
+	rc = qeth_start_channel(&card->read);
 	if (rc)
 		goto retriable;
-	rc = ccw_device_set_online(CARD_WDEV(card));
+	rc = qeth_start_channel(&card->write);
 	if (rc)
 		goto retriable;
-	rc = ccw_device_set_online(CARD_DDEV(card));
+	rc = qeth_start_channel(&card->data);
 	if (rc)
 		goto retriable;
 retriable:
@@ -4928,9 +5185,10 @@
 		else
 			goto retry;
 	}
+
 	qeth_determine_capabilities(card);
-	qeth_init_tokens(card);
-	qeth_init_func_level(card);
+	qeth_read_ccw_conf_data(card);
+	qeth_idx_init(card);
 
 	rc = qeth_idx_activate_read_channel(card);
 	if (rc == -EINTR) {
@@ -4975,9 +5233,9 @@
 		*carrier_ok = true;
 	}
 
-	card->options.ipa4.supported_funcs = 0;
-	card->options.ipa6.supported_funcs = 0;
-	card->options.adp.supported_funcs = 0;
+	card->options.ipa4.supported = 0;
+	card->options.ipa6.supported = 0;
+	card->options.adp.supported = 0;
 	card->options.sbp.supported_funcs = 0;
 	card->info.diagass_support = 0;
 	rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
@@ -5000,6 +5258,26 @@
 		if (rc)
 			QETH_CARD_TEXT_(card, 2, "8err%d", rc);
 	}
+
+	qeth_trace_features(card);
+
+	if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
+	    (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
+		card->info.hwtrap = 0;
+
+	if (card->options.isolation != ISOLATION_MODE_NONE) {
+		rc = qeth_setadpparms_set_access_ctrl(card,
+						      card->options.isolation);
+		if (rc)
+			goto out;
+	}
+
+	rc = qeth_init_qdio_queues(card);
+	if (rc) {
+		QETH_CARD_TEXT_(card, 2, "9err%d", rc);
+		goto out;
+	}
+
 	return 0;
 out:
 	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
@@ -5008,29 +5286,259 @@
 			 CARD_DEVID(card), rc);
 	return rc;
 }
-EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
 
-static void qeth_create_skb_frag(struct qdio_buffer_element *element,
-				 struct sk_buff *skb, int offset, int data_len)
+static int qeth_set_online(struct qeth_card *card,
+			   const struct qeth_discipline *disc)
 {
-	struct page *page = virt_to_page(element->addr);
-	unsigned int next_frag;
+	bool carrier_ok;
+	int rc;
 
-	/* first fill the linear space */
-	if (!skb->len) {
-		unsigned int linear = min(data_len, skb_tailroom(skb));
+	mutex_lock(&card->conf_mutex);
+	QETH_CARD_TEXT(card, 2, "setonlin");
 
-		skb_put_data(skb, element->addr + offset, linear);
-		data_len -= linear;
-		if (!data_len)
-			return;
-		offset += linear;
-		/* fall through to add page frag for remaining data */
+	rc = qeth_hardsetup_card(card, &carrier_ok);
+	if (rc) {
+		QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
+		rc = -ENODEV;
+		goto err_hardsetup;
 	}
 
+	qeth_print_status_message(card);
+
+	if (card->dev->reg_state != NETREG_REGISTERED)
+		/* no need for locking / error handling at this early stage: */
+		qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
+
+	rc = disc->set_online(card, carrier_ok);
+	if (rc)
+		goto err_online;
+
+	/* let user_space know that device is online */
+	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
+
+	mutex_unlock(&card->conf_mutex);
+	return 0;
+
+err_online:
+err_hardsetup:
+	qeth_qdio_clear_card(card, 0);
+	qeth_clear_working_pool_list(card);
+	qeth_flush_local_addrs(card);
+
+	qeth_stop_channel(&card->data);
+	qeth_stop_channel(&card->write);
+	qeth_stop_channel(&card->read);
+	qdio_free(CARD_DDEV(card));
+
+	mutex_unlock(&card->conf_mutex);
+	return rc;
+}
+
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+		     bool resetting)
+{
+	int rc, rc2, rc3;
+
+	mutex_lock(&card->conf_mutex);
+	QETH_CARD_TEXT(card, 3, "setoffl");
+
+	if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
+		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+		card->info.hwtrap = 1;
+	}
+
+	/* cancel any stalled cmd that might block the rtnl: */
+	qeth_clear_ipacmd_list(card);
+
+	rtnl_lock();
+	card->info.open_when_online = card->dev->flags & IFF_UP;
+	dev_close(card->dev);
+	netif_device_detach(card->dev);
+	netif_carrier_off(card->dev);
+	rtnl_unlock();
+
+	cancel_work_sync(&card->rx_mode_work);
+
+	disc->set_offline(card);
+
+	qeth_qdio_clear_card(card, 0);
+	qeth_drain_output_queues(card);
+	qeth_clear_working_pool_list(card);
+	qeth_flush_local_addrs(card);
+	card->info.promisc_mode = 0;
+
+	rc  = qeth_stop_channel(&card->data);
+	rc2 = qeth_stop_channel(&card->write);
+	rc3 = qeth_stop_channel(&card->read);
+	if (!rc)
+		rc = (rc2) ? rc2 : rc3;
+	if (rc)
+		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
+	qdio_free(CARD_DDEV(card));
+
+	/* let user_space know that device is offline */
+	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
+
+	mutex_unlock(&card->conf_mutex);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_set_offline);
+
+static int qeth_do_reset(void *data)
+{
+	const struct qeth_discipline *disc;
+	struct qeth_card *card = data;
+	int rc;
+
+	/* Lock-free, other users will block until we are done. */
+	disc = card->discipline;
+
+	QETH_CARD_TEXT(card, 2, "recover1");
+	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
+		return 0;
+	QETH_CARD_TEXT(card, 2, "recover2");
+	dev_warn(&card->gdev->dev,
+		 "A recovery process has been started for the device\n");
+
+	qeth_set_offline(card, disc, true);
+	rc = qeth_set_online(card, disc);
+	if (!rc) {
+		dev_info(&card->gdev->dev,
+			 "Device successfully recovered!\n");
+	} else {
+		ccwgroup_set_offline(card->gdev);
+		dev_warn(&card->gdev->dev,
+			 "The qeth device driver failed to recover an error on the device\n");
+	}
+	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
+	return 0;
+}
+
+#if IS_ENABLED(CONFIG_QETH_L3)
+static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
+				struct qeth_hdr *hdr)
+{
+	struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
+	struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
+	struct net_device *dev = skb->dev;
+
+	if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
+		dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
+				"FAKELL", skb->len);
+		return;
+	}
+
+	if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
+		u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
+							     ETH_P_IP;
+		unsigned char tg_addr[ETH_ALEN];
+
+		skb_reset_network_header(skb);
+		switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
+		case QETH_CAST_MULTICAST:
+			if (prot == ETH_P_IP)
+				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
+			else
+				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
+			QETH_CARD_STAT_INC(card, rx_multicast);
+			break;
+		case QETH_CAST_BROADCAST:
+			ether_addr_copy(tg_addr, dev->broadcast);
+			QETH_CARD_STAT_INC(card, rx_multicast);
+			break;
+		default:
+			if (card->options.sniffer)
+				skb->pkt_type = PACKET_OTHERHOST;
+			ether_addr_copy(tg_addr, dev->dev_addr);
+		}
+
+		if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
+			dev_hard_header(skb, dev, prot, tg_addr,
+					&l3_hdr->next_hop.rx.src_mac, skb->len);
+		else
+			dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
+					skb->len);
+	}
+
+	/* copy VLAN tag from hdr into skb */
+	if (!card->options.sniffer &&
+	    (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
+				  QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
+		u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
+				l3_hdr->vlan_id :
+				l3_hdr->next_hop.rx.vlan_id;
+
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
+	}
+}
+#endif
+
+static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
+			     struct qeth_hdr *hdr, bool uses_frags)
+{
+	struct napi_struct *napi = &card->napi;
+	bool is_cso;
+
+	switch (hdr->hdr.l2.id) {
+	case QETH_HEADER_TYPE_OSN:
+		skb_push(skb, sizeof(*hdr));
+		skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
+		QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
+		QETH_CARD_STAT_INC(card, rx_packets);
+
+		card->osn_info.data_cb(skb);
+		return;
+#if IS_ENABLED(CONFIG_QETH_L3)
+	case QETH_HEADER_TYPE_LAYER3:
+		qeth_l3_rebuild_skb(card, skb, hdr);
+		is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
+		break;
+#endif
+	case QETH_HEADER_TYPE_LAYER2:
+		is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
+		break;
+	default:
+		/* never happens */
+		if (uses_frags)
+			napi_free_frags(napi);
+		else
+			dev_kfree_skb_any(skb);
+		return;
+	}
+
+	if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		QETH_CARD_STAT_INC(card, rx_skb_csum);
+	} else {
+		skb->ip_summed = CHECKSUM_NONE;
+	}
+
+	QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
+	QETH_CARD_STAT_INC(card, rx_packets);
+	if (skb_is_nonlinear(skb)) {
+		QETH_CARD_STAT_INC(card, rx_sg_skbs);
+		QETH_CARD_STAT_ADD(card, rx_sg_frags,
+				   skb_shinfo(skb)->nr_frags);
+	}
+
+	if (uses_frags) {
+		napi_gro_frags(napi);
+	} else {
+		skb->protocol = eth_type_trans(skb, skb->dev);
+		napi_gro_receive(napi, skb);
+	}
+}
+
+static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
+{
+	struct page *page = virt_to_page(data);
+	unsigned int next_frag;
+
 	next_frag = skb_shinfo(skb)->nr_frags;
 	get_page(page);
-	skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len);
+	skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
+			data_len);
 }
 
 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
@@ -5038,122 +5546,217 @@
 	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
 }
 
-struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
-		struct qeth_qdio_buffer *qethbuffer,
-		struct qdio_buffer_element **__element, int *__offset,
-		struct qeth_hdr **hdr)
+static int qeth_extract_skb(struct qeth_card *card,
+			    struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
+			    int *__offset)
 {
-	struct qdio_buffer_element *element = *__element;
+	struct qeth_priv *priv = netdev_priv(card->dev);
 	struct qdio_buffer *buffer = qethbuffer->buffer;
+	struct napi_struct *napi = &card->napi;
+	struct qdio_buffer_element *element;
+	unsigned int linear_len = 0;
+	bool uses_frags = false;
 	int offset = *__offset;
+	bool use_rx_sg = false;
+	unsigned int headroom;
+	struct qeth_hdr *hdr;
 	struct sk_buff *skb;
 	int skb_len = 0;
-	void *data_ptr;
-	int data_len;
-	int headroom = 0;
-	int use_rx_sg = 0;
 
+	element = &buffer->element[*element_no];
+
+next_packet:
 	/* qeth_hdr must not cross element boundaries */
 	while (element->length < offset + sizeof(struct qeth_hdr)) {
 		if (qeth_is_last_sbale(element))
-			return NULL;
+			return -ENODATA;
 		element++;
 		offset = 0;
 	}
-	*hdr = element->addr + offset;
 
-	offset += sizeof(struct qeth_hdr);
-	switch ((*hdr)->hdr.l2.id) {
+	hdr = phys_to_virt(element->addr) + offset;
+	offset += sizeof(*hdr);
+	skb = NULL;
+
+	switch (hdr->hdr.l2.id) {
 	case QETH_HEADER_TYPE_LAYER2:
-		skb_len = (*hdr)->hdr.l2.pkt_length;
+		skb_len = hdr->hdr.l2.pkt_length;
+		linear_len = ETH_HLEN;
+		headroom = 0;
 		break;
 	case QETH_HEADER_TYPE_LAYER3:
-		skb_len = (*hdr)->hdr.l3.length;
+		skb_len = hdr->hdr.l3.length;
+		if (!IS_LAYER3(card)) {
+			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
+			goto walk_packet;
+		}
+
+		if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
+			linear_len = ETH_HLEN;
+			headroom = 0;
+			break;
+		}
+
+		if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
+			linear_len = sizeof(struct ipv6hdr);
+		else
+			linear_len = sizeof(struct iphdr);
 		headroom = ETH_HLEN;
 		break;
 	case QETH_HEADER_TYPE_OSN:
-		skb_len = (*hdr)->hdr.osn.pdu_length;
+		skb_len = hdr->hdr.osn.pdu_length;
+		if (!IS_OSN(card)) {
+			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
+			goto walk_packet;
+		}
+
+		linear_len = skb_len;
 		headroom = sizeof(struct qeth_hdr);
 		break;
 	default:
-		break;
+		if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
+			QETH_CARD_STAT_INC(card, rx_frame_errors);
+		else
+			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
+
+		/* Can't determine packet length, drop the whole buffer. */
+		return -EPROTONOSUPPORT;
 	}
 
-	if (!skb_len)
-		return NULL;
+	if (skb_len < linear_len) {
+		QETH_CARD_STAT_INC(card, rx_dropped_runt);
+		goto walk_packet;
+	}
 
-	if (((skb_len >= card->options.rx_sg_cb) &&
-	     !IS_OSN(card) &&
-	     (!atomic_read(&card->force_alloc_skb))) ||
-	    (card->options.cq == QETH_CQ_ENABLED))
-		use_rx_sg = 1;
+	use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
+		    (skb_len > READ_ONCE(priv->rx_copybreak) &&
+		     !atomic_read(&card->force_alloc_skb) &&
+		     !IS_OSN(card));
 
-	if (use_rx_sg && qethbuffer->rx_skb) {
+	if (use_rx_sg) {
 		/* QETH_CQ_ENABLED only: */
-		skb = qethbuffer->rx_skb;
-		qethbuffer->rx_skb = NULL;
-	} else {
-		unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len;
+		if (qethbuffer->rx_skb &&
+		    skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
+			skb = qethbuffer->rx_skb;
+			qethbuffer->rx_skb = NULL;
+			goto use_skb;
+		}
 
-		skb = napi_alloc_skb(&card->napi, linear + headroom);
+		skb = napi_get_frags(napi);
+		if (!skb) {
+			/* -ENOMEM, no point in falling back further. */
+			QETH_CARD_STAT_INC(card, rx_dropped_nomem);
+			goto walk_packet;
+		}
+
+		if (skb_tailroom(skb) >= linear_len + headroom) {
+			uses_frags = true;
+			goto use_skb;
+		}
+
+		netdev_info_once(card->dev,
+				 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
+				 linear_len + headroom, skb_tailroom(skb));
+		/* Shouldn't happen. Don't optimize, fall back to linear skb. */
 	}
-	if (!skb)
-		goto no_mem;
+
+	linear_len = skb_len;
+	skb = napi_alloc_skb(napi, linear_len + headroom);
+	if (!skb) {
+		QETH_CARD_STAT_INC(card, rx_dropped_nomem);
+		goto walk_packet;
+	}
+
+use_skb:
 	if (headroom)
 		skb_reserve(skb, headroom);
-
-	data_ptr = element->addr + offset;
+walk_packet:
 	while (skb_len) {
-		data_len = min(skb_len, (int)(element->length - offset));
-		if (data_len) {
-			if (use_rx_sg)
-				qeth_create_skb_frag(element, skb, offset,
-						     data_len);
-			else
-				skb_put_data(skb, data_ptr, data_len);
-		}
+		int data_len = min(skb_len, (int)(element->length - offset));
+		char *data = phys_to_virt(element->addr) + offset;
+
 		skb_len -= data_len;
+		offset += data_len;
+
+		/* Extract data from current element: */
+		if (skb && data_len) {
+			if (linear_len) {
+				unsigned int copy_len;
+
+				copy_len = min_t(unsigned int, linear_len,
+						 data_len);
+
+				skb_put_data(skb, data, copy_len);
+				linear_len -= copy_len;
+				data_len -= copy_len;
+				data += copy_len;
+			}
+
+			if (data_len)
+				qeth_create_skb_frag(skb, data, data_len);
+		}
+
+		/* Step forward to next element: */
 		if (skb_len) {
 			if (qeth_is_last_sbale(element)) {
 				QETH_CARD_TEXT(card, 4, "unexeob");
 				QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
-				dev_kfree_skb_any(skb);
-				QETH_CARD_STAT_INC(card, rx_errors);
-				return NULL;
+				if (skb) {
+					if (uses_frags)
+						napi_free_frags(napi);
+					else
+						dev_kfree_skb_any(skb);
+					QETH_CARD_STAT_INC(card,
+							   rx_length_errors);
+				}
+				return -EMSGSIZE;
 			}
 			element++;
 			offset = 0;
-			data_ptr = element->addr;
-		} else {
-			offset += data_len;
 		}
 	}
-	*__element = element;
+
+	/* This packet was skipped, go get another one: */
+	if (!skb)
+		goto next_packet;
+
+	*element_no = element - &buffer->element[0];
 	*__offset = offset;
-	if (use_rx_sg) {
-		QETH_CARD_STAT_INC(card, rx_sg_skbs);
-		QETH_CARD_STAT_ADD(card, rx_sg_frags,
-				   skb_shinfo(skb)->nr_frags);
-	}
-	return skb;
-no_mem:
-	if (net_ratelimit()) {
-		QETH_CARD_TEXT(card, 2, "noskbmem");
-	}
-	QETH_CARD_STAT_INC(card, rx_dropped);
-	return NULL;
+
+	qeth_receive_skb(card, skb, hdr, uses_frags);
+	return 0;
 }
-EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
 
-int qeth_poll(struct napi_struct *napi, int budget)
+static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
+				      struct qeth_qdio_buffer *buf, bool *done)
 {
-	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
-	int work_done = 0;
-	struct qeth_qdio_buffer *buffer;
-	int done;
-	int new_budget = budget;
+	unsigned int work_done = 0;
 
-	while (1) {
+	while (budget) {
+		if (qeth_extract_skb(card, buf, &card->rx.buf_element,
+				     &card->rx.e_offset)) {
+			*done = true;
+			break;
+		}
+
+		work_done++;
+		budget--;
+	}
+
+	return work_done;
+}
+
+static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
+{
+	struct qeth_rx *ctx = &card->rx;
+	unsigned int work_done = 0;
+
+	while (budget > 0) {
+		struct qeth_qdio_buffer *buffer;
+		unsigned int skbs_done = 0;
+		bool done = false;
+
+		/* Fetch completed RX buffers: */
 		if (!card->rx.b_count) {
 			card->rx.qdio_err = 0;
 			card->rx.b_count = qdio_get_next_buffers(
@@ -5163,52 +5766,83 @@
 				card->rx.b_count = 0;
 				break;
 			}
-			card->rx.b_element =
-				&card->qdio.in_q->bufs[card->rx.b_index]
-				.buffer->element[0];
-			card->rx.e_offset = 0;
 		}
 
-		while (card->rx.b_count) {
-			buffer = &card->qdio.in_q->bufs[card->rx.b_index];
-			if (!(card->rx.qdio_err &&
-			    qeth_check_qdio_errors(card, buffer->buffer,
-			    card->rx.qdio_err, "qinerr")))
-				work_done +=
-					card->discipline->process_rx_buffer(
-						card, new_budget, &done);
-			else
-				done = 1;
+		/* Process one completed RX buffer: */
+		buffer = &card->qdio.in_q->bufs[card->rx.b_index];
+		if (!(card->rx.qdio_err &&
+		      qeth_check_qdio_errors(card, buffer->buffer,
+					     card->rx.qdio_err, "qinerr")))
+			skbs_done = qeth_extract_skbs(card, budget, buffer,
+						      &done);
+		else
+			done = true;
 
-			if (done) {
-				QETH_CARD_STAT_INC(card, rx_bufs);
-				qeth_put_buffer_pool_entry(card,
-					buffer->pool_entry);
-				qeth_queue_input_buffer(card, card->rx.b_index);
-				card->rx.b_count--;
-				if (card->rx.b_count) {
-					card->rx.b_index =
-						(card->rx.b_index + 1) %
-						QDIO_MAX_BUFFERS_PER_Q;
-					card->rx.b_element =
-						&card->qdio.in_q
-						->bufs[card->rx.b_index]
-						.buffer->element[0];
-					card->rx.e_offset = 0;
-				}
-			}
+		work_done += skbs_done;
+		budget -= skbs_done;
 
-			if (work_done >= budget)
-				goto out;
-			else
-				new_budget = budget - work_done;
+		if (done) {
+			QETH_CARD_STAT_INC(card, rx_bufs);
+			qeth_put_buffer_pool_entry(card, buffer->pool_entry);
+			buffer->pool_entry = NULL;
+			card->rx.b_count--;
+			ctx->bufs_refill++;
+			ctx->bufs_refill -= qeth_rx_refill_queue(card,
+								 ctx->bufs_refill);
+
+			/* Step forward to next buffer: */
+			card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
+			card->rx.buf_element = 0;
+			card->rx.e_offset = 0;
 		}
 	}
 
-	napi_complete_done(napi, work_done);
-	if (qdio_start_irq(card->data.ccwdev, 0))
-		napi_schedule(&card->napi);
-out:
+	return work_done;
+}
+
+static void qeth_cq_poll(struct qeth_card *card)
+{
+	unsigned int work_done = 0;
+
+	while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
+		unsigned int start, error;
+		int completed;
+
+		completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
+					       &error);
+		if (completed <= 0)
+			return;
+
+		qeth_qdio_cq_handler(card, error, 1, start, completed);
+		work_done += completed;
+	}
+}
+
+int qeth_poll(struct napi_struct *napi, int budget)
+{
+	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
+	unsigned int work_done;
+
+	work_done = qeth_rx_poll(card, budget);
+
+	if (card->options.cq == QETH_CQ_ENABLED)
+		qeth_cq_poll(card);
+
+	if (budget) {
+		struct qeth_rx *ctx = &card->rx;
+
+		/* Process any substantial refill backlog: */
+		ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);
+
+		/* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
+		if (work_done >= budget)
+			return work_done;
+	}
+
+	if (napi_complete_done(napi, work_done) &&
+	    qdio_start_irq(CARD_DDEV(card)))
+		napi_schedule(napi);
+
 	return work_done;
 }
 EXPORT_SYMBOL_GPL(qeth_poll);
@@ -5224,9 +5858,13 @@
 				 QDIO_OUTBUF_STATE_FLAG_PENDING)) {
 		WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
 
-		if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
-						   QETH_QDIO_BUF_PENDING) ==
-		    QETH_QDIO_BUF_PRIMED) {
+		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
+
+		switch (atomic_cmpxchg(&buffer->state,
+				       QETH_QDIO_BUF_PRIMED,
+				       QETH_QDIO_BUF_PENDING)) {
+		case QETH_QDIO_BUF_PRIMED:
+			/* We have initial ownership, no QAOB (yet): */
 			qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
 
 			/* Handle race with qeth_qdio_handle_aob(): */
@@ -5234,39 +5872,51 @@
 					    QETH_QDIO_BUF_NEED_QAOB)) {
 			case QETH_QDIO_BUF_PENDING:
 				/* No concurrent QAOB notification. */
-				break;
+
+				/* Prepare the queue slot for immediate re-use: */
+				qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
+				if (qeth_init_qdio_out_buf(queue, bidx)) {
+					QETH_CARD_TEXT(card, 2, "outofbuf");
+					qeth_schedule_recovery(card);
+				}
+
+				list_add(&buffer->list_entry,
+					 &queue->pending_bufs);
+				/* Skip clearing the buffer: */
+				return;
 			case QETH_QDIO_BUF_QAOB_OK:
 				qeth_notify_skbs(queue, buffer,
 						 TX_NOTIFY_DELAYED_OK);
-				atomic_set(&buffer->state,
-					   QETH_QDIO_BUF_HANDLED_DELAYED);
+				error = false;
 				break;
 			case QETH_QDIO_BUF_QAOB_ERROR:
 				qeth_notify_skbs(queue, buffer,
 						 TX_NOTIFY_DELAYED_GENERALERROR);
-				atomic_set(&buffer->state,
-					   QETH_QDIO_BUF_HANDLED_DELAYED);
+				error = true;
 				break;
 			default:
 				WARN_ON_ONCE(1);
 			}
+
+			break;
+		case QETH_QDIO_BUF_QAOB_OK:
+			/* qeth_qdio_handle_aob() already received a QAOB: */
+			qeth_notify_skbs(queue, buffer, TX_NOTIFY_OK);
+			error = false;
+			break;
+		case QETH_QDIO_BUF_QAOB_ERROR:
+			/* qeth_qdio_handle_aob() already received a QAOB: */
+			qeth_notify_skbs(queue, buffer, TX_NOTIFY_GENERALERROR);
+			error = true;
+			break;
+		default:
+			WARN_ON_ONCE(1);
 		}
-
-		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
-
-		/* prepare the queue slot for re-use: */
-		qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
-		if (qeth_init_qdio_out_buf(queue, bidx)) {
-			QETH_CARD_TEXT(card, 2, "outofbuf");
-			qeth_schedule_recovery(card);
-		}
-
-		return;
-	}
-
-	if (card->options.cq == QETH_CQ_ENABLED)
+	} else if (card->options.cq == QETH_CQ_ENABLED) {
 		qeth_notify_skbs(queue, buffer,
 				 qeth_compute_cq_notification(sflags, 0));
+	}
+
 	qeth_clear_output_buffer(queue, buffer, error, budget);
 }
 
@@ -5287,6 +5937,8 @@
 		unsigned int bytes = 0;
 		int completed;
 
+		qeth_tx_complete_pending_bufs(card, queue, false);
+
 		if (qeth_out_queue_is_empty(queue)) {
 			napi_complete(napi);
 			return 0;
@@ -5305,7 +5957,7 @@
 		if (completed <= 0) {
 			/* Ensure we see TX completion for pending work: */
 			if (napi_complete_done(napi, 0))
-				qeth_tx_arm_timer(queue);
+				qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS);
 			return 0;
 		}
 
@@ -5314,12 +5966,11 @@
 			unsigned int bidx = QDIO_BUFNR(i);
 
 			buffer = queue->bufs[bidx];
-			packets += skb_queue_len(&buffer->skb_list);
+			packets += buffer->frames;
 			bytes += buffer->bytes;
 
 			qeth_handle_send_error(card, buffer, error);
 			qeth_iqd_tx_complete(queue, bidx, error, budget);
-			qeth_cleanup_handled_pending(queue, bidx, false);
 		}
 
 		netdev_tx_completed_queue(txq, packets, bytes);
@@ -5371,9 +6022,9 @@
 
 	cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
 	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
-		card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
+		card->options.ipa4.enabled = cmd->hdr.assists.enabled;
 	if (cmd->hdr.prot_version == QETH_PROT_IPV6)
-		card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
+		card->options.ipa6.enabled = cmd->hdr.assists.enabled;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
@@ -5428,6 +6079,7 @@
 static void qeth_unregister_dbf_views(void)
 {
 	int x;
+
 	for (x = 0; x < QETH_DBF_INFOS; x++) {
 		debug_unregister(qeth_dbf[x].id);
 		qeth_dbf[x].id = NULL;
@@ -5600,25 +6252,31 @@
 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
 {
 	struct net_device *dev;
+	struct qeth_priv *priv;
 
 	switch (card->info.type) {
 	case QETH_CARD_TYPE_IQD:
-		dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN,
-				       ether_setup, QETH_MAX_QUEUES, 1);
+		dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
+				       ether_setup, QETH_MAX_OUT_QUEUES, 1);
 		break;
 	case QETH_CARD_TYPE_OSM:
-		dev = alloc_etherdev(0);
+		dev = alloc_etherdev(sizeof(*priv));
 		break;
 	case QETH_CARD_TYPE_OSN:
-		dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
+		dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN,
+				   ether_setup);
 		break;
 	default:
-		dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1);
+		dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
 	}
 
 	if (!dev)
 		return NULL;
 
+	priv = netdev_priv(dev);
+	priv->rx_copybreak = QETH_RX_COPYBREAK;
+	priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
+
 	dev->ml_priv = card;
 	dev->watchdog_timeo = QETH_TX_TIMEOUT;
 	dev->min_mtu = IS_OSN(card) ? 64 : 576;
@@ -5635,14 +6293,8 @@
 		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 		dev->hw_features |= NETIF_F_SG;
 		dev->vlan_features |= NETIF_F_SG;
-		if (IS_IQD(card)) {
+		if (IS_IQD(card))
 			dev->features |= NETIF_F_SG;
-			if (netif_set_real_num_tx_queues(dev,
-							 QETH_IQD_MIN_TXQ)) {
-				free_netdev(dev);
-				return NULL;
-			}
-		}
 	}
 
 	return dev;
@@ -5698,12 +6350,13 @@
 		goto err_card;
 	}
 
+	qeth_determine_capabilities(card);
+	qeth_set_blkt_defaults(card);
+
 	card->qdio.no_out_queues = card->dev->num_tx_queues;
 	rc = qeth_update_from_chp_desc(card);
 	if (rc)
 		goto err_chp_desc;
-	qeth_determine_capabilities(card);
-	qeth_set_blkt_defaults(card);
 
 	enforced_disc = qeth_enforce_discipline(card);
 	switch (enforced_disc) {
@@ -5712,6 +6365,7 @@
 		break;
 	default:
 		card->info.layer_enforced = true;
+		/* It's so early that we don't need the discipline_mutex yet. */
 		rc = qeth_core_load_discipline(card, enforced_disc);
 		if (rc)
 			goto err_load;
@@ -5744,10 +6398,14 @@
 
 	QETH_CARD_TEXT(card, 2, "removedv");
 
+	mutex_lock(&card->discipline_mutex);
 	if (card->discipline) {
 		card->discipline->remove(gdev);
 		qeth_core_free_discipline(card);
 	}
+	mutex_unlock(&card->discipline_mutex);
+
+	qeth_free_qdio_queues(card);
 
 	free_netdev(card->dev);
 	qeth_core_free_card(card);
@@ -5760,6 +6418,7 @@
 	int rc = 0;
 	enum qeth_discipline_id def_discipline;
 
+	mutex_lock(&card->discipline_mutex);
 	if (!card->discipline) {
 		def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
 						QETH_DISCIPLINE_LAYER2;
@@ -5772,20 +6431,30 @@
 			goto err;
 		}
 	}
-	rc = card->discipline->set_online(gdev);
+
+	rc = qeth_set_online(card, card->discipline);
+
 err:
+	mutex_unlock(&card->discipline_mutex);
 	return rc;
 }
 
 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-	return card->discipline->set_offline(gdev);
+	int rc;
+
+	mutex_lock(&card->discipline_mutex);
+	rc = qeth_set_offline(card, card->discipline, false);
+	mutex_unlock(&card->discipline_mutex);
+
+	return rc;
 }
 
 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
 	qeth_set_allowed_threads(card, 0, 1);
 	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
 		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
@@ -5794,32 +6463,6 @@
 	qdio_free(CARD_DDEV(card));
 }
 
-static int qeth_suspend(struct ccwgroup_device *gdev)
-{
-	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-
-	qeth_set_allowed_threads(card, 0, 1);
-	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
-	if (gdev->state == CCWGROUP_OFFLINE)
-		return 0;
-
-	card->discipline->set_offline(gdev);
-	return 0;
-}
-
-static int qeth_resume(struct ccwgroup_device *gdev)
-{
-	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-	int rc;
-
-	rc = card->discipline->set_online(gdev);
-
-	qeth_set_allowed_threads(card, 0xffffffff, 0);
-	if (rc)
-		dev_warn(&card->gdev->dev, "The qeth device driver failed to recover an error on the device\n");
-	return rc;
-}
-
 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
 			   size_t count)
 {
@@ -5856,11 +6499,6 @@
 	.set_online = qeth_core_set_online,
 	.set_offline = qeth_core_set_offline,
 	.shutdown = qeth_core_shutdown,
-	.prepare = NULL,
-	.complete = NULL,
-	.freeze = qeth_suspend,
-	.thaw = qeth_resume,
-	.restore = qeth_resume,
 };
 
 struct qeth_card *qeth_get_card_by_busid(char *bus_id)
@@ -5884,9 +6522,6 @@
 	struct mii_ioctl_data *mii_data;
 	int rc = 0;
 
-	if (!card)
-		return -ENODEV;
-
 	switch (cmd) {
 	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
 		rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
@@ -5944,7 +6579,7 @@
 }
 
 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
-			    enum qeth_prot_versions prot)
+			    enum qeth_prot_versions prot, u8 *lp2lp)
 {
 	u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
 	struct qeth_cmd_buffer *iob;
@@ -5996,18 +6631,17 @@
 
 	dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
 		 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
-	if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) &&
-	    cstype == IPA_OUTBOUND_CHECKSUM)
-		dev_warn(&card->gdev->dev,
-			 "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
-			 QETH_CARD_IFNAME(card));
+
+	if (lp2lp)
+		*lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
+
 	return 0;
 }
 
 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
-			     enum qeth_prot_versions prot)
+			     enum qeth_prot_versions prot, u8 *lp2lp)
 {
-	return on ? qeth_set_csum_on(card, cstype, prot) :
+	return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
 		    qeth_set_csum_off(card, cstype, prot);
 }
 
@@ -6095,13 +6729,13 @@
 
 	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
 		rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
-					    QETH_PROT_IPV4);
+					    QETH_PROT_IPV4, NULL);
 	if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
 		/* no/one Offload Assist available, so the rc is trivial */
 		return rc_ipv4;
 
 	rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
-				    QETH_PROT_IPV6);
+				    QETH_PROT_IPV6, NULL);
 
 	if (on)
 		/* enable: success if any Assist is active */
@@ -6137,6 +6771,24 @@
 }
 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
 
+static void qeth_check_restricted_features(struct qeth_card *card,
+					   netdev_features_t changed,
+					   netdev_features_t actual)
+{
+	netdev_features_t ipv6_features = NETIF_F_TSO6;
+	netdev_features_t ipv4_features = NETIF_F_TSO;
+
+	if (!card->info.has_lp2lp_cso_v6)
+		ipv6_features |= NETIF_F_IPV6_CSUM;
+	if (!card->info.has_lp2lp_cso_v4)
+		ipv4_features |= NETIF_F_IP_CSUM;
+
+	if ((changed & ipv6_features) && !(actual & ipv6_features))
+		qeth_flush_local_addrs6(card);
+	if ((changed & ipv4_features) && !(actual & ipv4_features))
+		qeth_flush_local_addrs4(card);
+}
+
 int qeth_set_features(struct net_device *dev, netdev_features_t features)
 {
 	struct qeth_card *card = dev->ml_priv;
@@ -6148,13 +6800,15 @@
 
 	if ((changed & NETIF_F_IP_CSUM)) {
 		rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
-				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
+				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
+				       &card->info.has_lp2lp_cso_v4);
 		if (rc)
 			changed ^= NETIF_F_IP_CSUM;
 	}
 	if (changed & NETIF_F_IPV6_CSUM) {
 		rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
-				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
+				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
+				       &card->info.has_lp2lp_cso_v6);
 		if (rc)
 			changed ^= NETIF_F_IPV6_CSUM;
 	}
@@ -6176,6 +6830,9 @@
 			changed ^= NETIF_F_TSO6;
 	}
 
+	qeth_check_restricted_features(card, dev->features ^ features,
+				       dev->features ^ changed);
+
 	/* everything changed successfully? */
 	if ((dev->features ^ features) == changed)
 		return 0;
@@ -6212,6 +6869,36 @@
 				      struct net_device *dev,
 				      netdev_features_t features)
 {
+	struct qeth_card *card = dev->ml_priv;
+
+	/* Traffic with local next-hop is not eligible for some offloads: */
+	if (skb->ip_summed == CHECKSUM_PARTIAL &&
+	    READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
+		netdev_features_t restricted = 0;
+
+		if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
+			restricted |= NETIF_F_ALL_TSO;
+
+		switch (vlan_get_protocol(skb)) {
+		case htons(ETH_P_IP):
+			if (!card->info.has_lp2lp_cso_v4)
+				restricted |= NETIF_F_IP_CSUM;
+
+			if (restricted && qeth_next_hop_is_local_v4(card, skb))
+				features &= ~restricted;
+			break;
+		case htons(ETH_P_IPV6):
+			if (!card->info.has_lp2lp_cso_v6)
+				restricted |= NETIF_F_IPV6_CSUM;
+
+			if (restricted && qeth_next_hop_is_local_v6(card, skb))
+				features &= ~restricted;
+			break;
+		default:
+			break;
+		}
+	}
+
 	/* GSO segmentation builds skbs with
 	 *	a (small) linear part for the headers, and
 	 *	page frags for the data.
@@ -6244,9 +6931,16 @@
 
 	stats->rx_packets = card->stats.rx_packets;
 	stats->rx_bytes = card->stats.rx_bytes;
-	stats->rx_errors = card->stats.rx_errors;
-	stats->rx_dropped = card->stats.rx_dropped;
+	stats->rx_errors = card->stats.rx_length_errors +
+			   card->stats.rx_frame_errors +
+			   card->stats.rx_fifo_errors;
+	stats->rx_dropped = card->stats.rx_dropped_nomem +
+			    card->stats.rx_dropped_notsupp +
+			    card->stats.rx_dropped_runt;
 	stats->multicast = card->stats.rx_multicast;
+	stats->rx_length_errors = card->stats.rx_length_errors;
+	stats->rx_frame_errors = card->stats.rx_frame_errors;
+	stats->rx_fifo_errors = card->stats.rx_fifo_errors;
 
 	for (i = 0; i < card->qdio.no_out_queues; i++) {
 		queue = card->qdio.out_qs[i];
@@ -6259,12 +6953,60 @@
 }
 EXPORT_SYMBOL_GPL(qeth_get_stats64);
 
+#define TC_IQD_UCAST   0
+static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
+				     unsigned int ucast_txqs)
+{
+	unsigned int prio;
+
+	/* IQD requires mcast traffic to be placed on a dedicated queue, and
+	 * qeth_iqd_select_queue() deals with this.
+	 * For unicast traffic, we defer the queue selection to the stack.
+	 * By installing a trivial prio map that spans over only the unicast
+	 * queues, we can encourage the stack to spread the ucast traffic evenly
+	 * without selecting the mcast queue.
+	 */
+
+	/* One traffic class, spanning over all active ucast queues: */
+	netdev_set_num_tc(dev, 1);
+	netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
+			    QETH_IQD_MIN_UCAST_TXQ);
+
+	/* Map all priorities to this traffic class: */
+	for (prio = 0; prio <= TC_BITMASK; prio++)
+		netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
+}
+
+int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
+{
+	struct net_device *dev = card->dev;
+	int rc;
+
+	/* Per netif_setup_tc(), adjust the mapping first: */
+	if (IS_IQD(card))
+		qeth_iqd_set_prio_tc_map(dev, count - 1);
+
+	rc = netif_set_real_num_tx_queues(dev, count);
+
+	if (rc && IS_IQD(card))
+		qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
+
 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
 			  u8 cast_type, struct net_device *sb_dev)
 {
+	u16 txq;
+
 	if (cast_type != RTN_UNICAST)
 		return QETH_IQD_MCAST_TXQ;
-	return QETH_IQD_MIN_UCAST_TXQ;
+	if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
+		return QETH_IQD_MIN_UCAST_TXQ;
+
+	txq = netdev_pick_tx(dev, skb, sb_dev);
+	return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
 }
 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
 
@@ -6274,15 +7016,10 @@
 
 	QETH_CARD_TEXT(card, 4, "qethopen");
 
-	if (qdio_stop_irq(CARD_DDEV(card), 0) < 0)
-		return -EIO;
-
 	card->data.state = CH_STATE_UP;
 	netif_tx_start_all_queues(dev);
 
-	napi_enable(&card->napi);
 	local_bh_disable();
-	napi_schedule(&card->napi);
 	if (IS_IQD(card)) {
 		struct qeth_qdio_out_q *queue;
 		unsigned int i;
@@ -6294,8 +7031,12 @@
 			napi_schedule(&queue->napi);
 		}
 	}
+
+	napi_enable(&card->napi);
+	napi_schedule(&card->napi);
 	/* kick-start the NAPI softirq: */
 	local_bh_enable();
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(qeth_open);
@@ -6305,27 +7046,31 @@
 	struct qeth_card *card = dev->ml_priv;
 
 	QETH_CARD_TEXT(card, 4, "qethstop");
+
+	napi_disable(&card->napi);
+	cancel_delayed_work_sync(&card->buffer_reclaim_work);
+	qdio_stop_irq(CARD_DDEV(card));
+
 	if (IS_IQD(card)) {
 		struct qeth_qdio_out_q *queue;
 		unsigned int i;
 
 		/* Quiesce the NAPI instances: */
-		qeth_for_each_output_queue(card, queue, i) {
+		qeth_for_each_output_queue(card, queue, i)
 			napi_disable(&queue->napi);
-			del_timer_sync(&queue->timer);
-		}
 
 		/* Stop .ndo_start_xmit, might still access queue->napi. */
 		netif_tx_disable(dev);
 
-		/* Queues may get re-allocated, so remove the NAPIs here. */
-		qeth_for_each_output_queue(card, queue, i)
+		qeth_for_each_output_queue(card, queue, i) {
+			del_timer_sync(&queue->timer);
+			/* Queues may get re-allocated, so remove the NAPIs. */
 			netif_napi_del(&queue->napi);
+		}
 	} else {
 		netif_tx_disable(dev);
 	}
 
-	napi_disable(&card->napi);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(qeth_stop);
@@ -6336,6 +7081,8 @@
 
 	pr_info("loading core functions\n");
 
+	qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
+
 	rc = qeth_register_dbf_views();
 	if (rc)
 		goto dbf_err;
@@ -6377,6 +7124,7 @@
 register_err:
 	qeth_unregister_dbf_views();
 dbf_err:
+	debugfs_remove_recursive(qeth_debugfs_root);
 	pr_err("Initializing the qeth device driver failed\n");
 	return rc;
 }
@@ -6390,6 +7138,7 @@
 	kmem_cache_destroy(qeth_core_header_cache);
 	root_device_unregister(qeth_core_root_dev);
 	qeth_unregister_dbf_views();
+	debugfs_remove_recursive(qeth_debugfs_root);
 	pr_info("core functions removed\n");
 }
 
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index e3f4866..68c2588 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -10,7 +10,7 @@
 #include <asm/cio.h>
 #include "qeth_core_mpc.h"
 
-unsigned char IDX_ACTIVATE_READ[] = {
+const unsigned char IDX_ACTIVATE_READ[] = {
 	0x00, 0x00, 0x80, 0x00,  0x00, 0x00, 0x00, 0x00,
 	0x19, 0x01, 0x01, 0x80,  0x00, 0x00, 0x00, 0x00,
 	0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0xc8, 0xc1,
@@ -18,7 +18,7 @@
 	0x00, 0x00
 };
 
-unsigned char IDX_ACTIVATE_WRITE[] = {
+const unsigned char IDX_ACTIVATE_WRITE[] = {
 	0x00, 0x00, 0x80, 0x00,  0x00, 0x00, 0x00, 0x00,
 	0x15, 0x01, 0x01, 0x80,  0x00, 0x00, 0x00, 0x00,
 	0xff, 0xff, 0x00, 0x00,  0x00, 0x00, 0xc8, 0xc1,
@@ -26,7 +26,7 @@
 	0x00, 0x00
 };
 
-unsigned char CM_ENABLE[] = {
+const unsigned char CM_ENABLE[] = {
 	0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x01,
 	0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x63,
 	0x10, 0x00, 0x00, 0x01,
@@ -45,7 +45,7 @@
 	0xff, 0xff, 0xff
 };
 
-unsigned char CM_SETUP[] = {
+const unsigned char CM_SETUP[] = {
 	0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x02,
 	0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x64,
 	0x10, 0x00, 0x00, 0x01,
@@ -65,7 +65,7 @@
 	0x04, 0x06, 0xc8, 0x00
 };
 
-unsigned char ULP_ENABLE[] = {
+const unsigned char ULP_ENABLE[] = {
 	0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x03,
 	0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x6b,
 	0x10, 0x00, 0x00, 0x01,
@@ -85,7 +85,7 @@
 	0xf1, 0x00, 0x00
 };
 
-unsigned char ULP_SETUP[] = {
+const unsigned char ULP_SETUP[] = {
 	0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x04,
 	0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x6c,
 	0x10, 0x00, 0x00, 0x01,
@@ -107,7 +107,7 @@
 	0x00, 0x00, 0x00, 0x00
 };
 
-unsigned char DM_ACT[] = {
+const unsigned char DM_ACT[] = {
 	0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x05,
 	0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x55,
 	0x10, 0x00, 0x00, 0x01,
@@ -123,7 +123,7 @@
 	0x05, 0x40, 0x01, 0x01,  0x00
 };
 
-unsigned char IPA_PDU_HEADER[] = {
+const unsigned char IPA_PDU_HEADER[] = {
 	0x00, 0xe0, 0x00, 0x00,  0x77, 0x77, 0x77, 0x77,
 	0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x00,
 	0x10, 0x00, 0x00, 0x01,  0x00, 0x00, 0x00, 0x00,
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 6503853..6541bab 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -11,14 +11,15 @@
 
 #include <asm/qeth.h>
 #include <uapi/linux/if_ether.h>
+#include <uapi/linux/in6.h>
 
+extern const unsigned char IPA_PDU_HEADER[];
 #define IPA_PDU_HEADER_SIZE	0x40
 #define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
 #define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer + 0x26)
 #define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer + 0x29)
 #define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer + 0x3a)
 
-extern unsigned char IPA_PDU_HEADER[];
 #define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c)
 
 #define QETH_SEQ_NO_LENGTH	4
@@ -52,6 +53,16 @@
 	return (caps->enabled & mask) == mask;
 }
 
+#define qeth_adp_supported(c, f) \
+	qeth_ipa_caps_supported(&c->options.adp, f)
+#define qeth_is_supported(c, f) \
+	qeth_ipa_caps_supported(&c->options.ipa4, f)
+#define qeth_is_supported6(c, f) \
+	qeth_ipa_caps_supported(&c->options.ipa6, f)
+#define qeth_is_ipafunc_supported(c, prot, f) \
+	 ((prot == QETH_PROT_IPV6) ? qeth_is_supported6(c, f) : \
+				     qeth_is_supported(c, f))
+
 enum qeth_card_types {
 	QETH_CARD_TYPE_OSD     = 1,
 	QETH_CARD_TYPE_IQD     = 5,
@@ -63,8 +74,19 @@
 #define IS_IQD(card)	((card)->info.type == QETH_CARD_TYPE_IQD)
 #define IS_OSD(card)	((card)->info.type == QETH_CARD_TYPE_OSD)
 #define IS_OSM(card)	((card)->info.type == QETH_CARD_TYPE_OSM)
+
+#ifdef CONFIG_QETH_OSN
 #define IS_OSN(card)	((card)->info.type == QETH_CARD_TYPE_OSN)
+#else
+#define IS_OSN(card)	false
+#endif
+
+#ifdef CONFIG_QETH_OSX
 #define IS_OSX(card)	((card)->info.type == QETH_CARD_TYPE_OSX)
+#else
+#define IS_OSX(card)	false
+#endif
+
 #define IS_VM_NIC(card)	((card)->info.is_vm_nic)
 
 #define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
@@ -82,10 +104,6 @@
 	QETH_LINK_TYPE_LANE         = 0x88,
 };
 
-/*
- * Routing stuff
- */
-#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */
 enum qeth_routing_types {
 	/* TODO: set to bit flag used in IPA Command */
 	NO_ROUTER		= 0,
@@ -337,22 +355,21 @@
 
 /* (SET)DELIP(M) IPA stuff ***************************************************/
 struct qeth_ipacmd_setdelip4 {
-	__u8   ip_addr[4];
-	__u8   mask[4];
+	__be32 addr;
+	__be32 mask;
 	__u32  flags;
 } __attribute__ ((packed));
 
 struct qeth_ipacmd_setdelip6 {
-	__u8   ip_addr[16];
-	__u8   mask[16];
+	struct in6_addr addr;
+	struct in6_addr prefix;
 	__u32  flags;
 } __attribute__ ((packed));
 
 struct qeth_ipacmd_setdelipm {
 	__u8 mac[6];
 	__u8 padding[2];
-	__u8 ip6[12];
-	__u8 ip4[4];
+	struct in6_addr ip;
 } __attribute__ ((packed));
 
 struct qeth_ipacmd_layer2setdelmac {
@@ -417,11 +434,10 @@
 		struct qeth_arp_cache_entry arp_entry;
 		struct qeth_arp_query_data query_arp;
 		struct qeth_tso_start_data tso;
-		__u8 ip[16];
 	} data;
 } __attribute__ ((packed));
 
-#define SETASS_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setassparms,\
+#define SETASS_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setassparms,\
 					       data.field)
 
 /* SETRTG IPA Command:    ****************************************************/
@@ -535,13 +551,14 @@
 	} data;
 } __attribute__ ((packed));
 
-#define SETADP_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setadpparms,\
+#define SETADP_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setadpparms,\
 					       data.field)
 
 /* CREATE_ADDR IPA Command:    ***********************************************/
 struct qeth_create_destroy_address {
-	__u8 unique_id[8];
-} __attribute__ ((packed));
+	u8 mac_addr[ETH_ALEN];
+	u16 uid;
+};
 
 /* SET DIAGNOSTIC ASSIST IPA Command:	 *************************************/
 
@@ -648,7 +665,7 @@
 	} data;
 };
 
-#define VNICC_DATA_SIZEOF(field)	FIELD_SIZEOF(struct qeth_ipacmd_vnicc,\
+#define VNICC_DATA_SIZEOF(field)	sizeof_field(struct qeth_ipacmd_vnicc,\
 						     data.field)
 
 /* SETBRIDGEPORT IPA Command:	 *********************************************/
@@ -702,15 +719,8 @@
 		struct net_if_token token;
 } __packed;
 
-struct qeth_sbp_query_ports {
-	__u8 primary_bp_supported;
-	__u8 secondary_bp_supported;
-	__u8 num_entries;
-	__u8 entry_length;
-	struct qeth_sbp_port_entry entry[];
-} __packed;
-
-struct qeth_sbp_state_change {
+/* For IPA_SBP_QUERY_BRIDGE_PORTS, IPA_SBP_BRIDGE_PORT_STATE_CHANGE */
+struct qeth_sbp_port_data {
 	__u8 primary_bp_supported;
 	__u8 secondary_bp_supported;
 	__u8 num_entries;
@@ -724,12 +734,11 @@
 	union {
 		struct qeth_sbp_query_cmds_supp query_cmds_supp;
 		struct qeth_sbp_set_primary set_primary;
-		struct qeth_sbp_query_ports query_ports;
-		struct qeth_sbp_state_change state_change;
+		struct qeth_sbp_port_data port_data;
 	} data;
 } __packed;
 
-#define SBP_DATA_SIZEOF(field)	FIELD_SIZEOF(struct qeth_ipacmd_setbridgeport,\
+#define SBP_DATA_SIZEOF(field)	sizeof_field(struct qeth_ipacmd_setbridgeport,\
 					     data.field)
 
 /* ADDRESS_CHANGE_NOTIFICATION adapter-initiated "command" *******************/
@@ -755,6 +764,29 @@
 	struct qeth_ipacmd_addr_change_entry entry[];
 } __packed;
 
+/* [UN]REGISTER_LOCAL_ADDRESS notifications */
+struct qeth_ipacmd_local_addr4 {
+	__be32 addr;
+	u32 flags;
+};
+
+struct qeth_ipacmd_local_addrs4 {
+	u32 count;
+	u32 addr_length;
+	struct qeth_ipacmd_local_addr4 addrs[];
+};
+
+struct qeth_ipacmd_local_addr6 {
+	struct in6_addr addr;
+	u32 flags;
+};
+
+struct qeth_ipacmd_local_addrs6 {
+	u32 count;
+	u32 addr_length;
+	struct qeth_ipacmd_local_addr6 addrs[];
+};
+
 /* Header for each IPA command */
 struct qeth_ipacmd_hdr {
 	__u8   command;
@@ -766,8 +798,7 @@
 	__u8   prim_version_no;
 	__u8   param_count;
 	__u16  prot_version;
-	__u32  ipa_supported;
-	__u32  ipa_enabled;
+	struct qeth_ipa_caps assists;
 } __attribute__ ((packed));
 
 /* The IPA command itself */
@@ -787,10 +818,12 @@
 		struct qeth_ipacmd_setbridgeport	sbp;
 		struct qeth_ipacmd_addr_change		addrchange;
 		struct qeth_ipacmd_vnicc		vnicc;
+		struct qeth_ipacmd_local_addrs4		local_addrs4;
+		struct qeth_ipacmd_local_addrs6		local_addrs6;
 	} data;
 } __attribute__ ((packed));
 
-#define IPA_DATA_SIZEOF(field)	FIELD_SIZEOF(struct qeth_ipa_cmd, data.field)
+#define IPA_DATA_SIZEOF(field)	sizeof_field(struct qeth_ipa_cmd, data.field)
 
 /*
  * special command for ARP processing.
@@ -817,7 +850,7 @@
 /* END OF   IP Assist related definitions                                    */
 /*****************************************************************************/
 
-extern unsigned char CM_ENABLE[];
+extern const unsigned char CM_ENABLE[];
 #define CM_ENABLE_SIZE 0x63
 #define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer + 0x2c)
 #define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53)
@@ -827,7 +860,7 @@
 		(PDU_ENCAPSULATION(buffer) + 0x13)
 
 
-extern unsigned char CM_SETUP[];
+extern const unsigned char CM_SETUP[];
 #define CM_SETUP_SIZE 0x64
 #define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
 #define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
@@ -836,7 +869,7 @@
 #define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \
 		(PDU_ENCAPSULATION(buffer) + 0x1a)
 
-extern unsigned char ULP_ENABLE[];
+extern const unsigned char ULP_ENABLE[];
 #define ULP_ENABLE_SIZE 0x6b
 #define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer + 0x61)
 #define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer + 0x2c)
@@ -857,7 +890,7 @@
 #define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer + 0x50)
 #define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer + 0x19)
 
-extern unsigned char ULP_SETUP[];
+extern const unsigned char ULP_SETUP[];
 #define ULP_SETUP_SIZE 0x6c
 #define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
 #define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
@@ -869,7 +902,7 @@
 		(PDU_ENCAPSULATION(buffer) + 0x1a)
 
 
-extern unsigned char DM_ACT[];
+extern const unsigned char DM_ACT[];
 #define DM_ACT_SIZE 0x55
 #define QETH_DM_ACT_DEST_ADDR(buffer) (buffer + 0x2c)
 #define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer + 0x51)
@@ -880,12 +913,12 @@
 #define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer + 0x1c)
 #define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer + 0x20)
 
-extern unsigned char IDX_ACTIVATE_READ[];
-extern unsigned char IDX_ACTIVATE_WRITE[];
-
+extern const unsigned char IDX_ACTIVATE_READ[];
+extern const unsigned char IDX_ACTIVATE_WRITE[];
 #define IDX_ACTIVATE_SIZE	0x22
 #define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b)
 #define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c)
+#define QETH_IDX_ACT_INVAL_FRAME	0x40
 #define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b] & 0x80)
 #define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer + 0x10)
 #define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer + 0x16)
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 4c3e222..4441b33 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -20,14 +20,10 @@
 				struct device_attribute *attr, char *buf)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
-	if (!card)
-		return -EINVAL;
 
 	switch (card->state) {
 	case CARD_STATE_DOWN:
 		return sprintf(buf, "DOWN\n");
-	case CARD_STATE_HARDSETUP:
-		return sprintf(buf, "HARDSETUP\n");
 	case CARD_STATE_SOFTSETUP:
 		if (card->dev->flags & IFF_UP)
 			return sprintf(buf, "UP (LAN %s)\n",
@@ -45,8 +41,6 @@
 				struct device_attribute *attr, char *buf)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
-	if (!card)
-		return -EINVAL;
 
 	return sprintf(buf, "%02X\n", card->info.chpid);
 }
@@ -57,9 +51,8 @@
 				struct device_attribute *attr, char *buf)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
-	if (!card)
-		return -EINVAL;
-	return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
+
+	return sprintf(buf, "%s\n", netdev_name(card->dev));
 }
 
 static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
@@ -68,8 +61,6 @@
 				struct device_attribute *attr, char *buf)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
-	if (!card)
-		return -EINVAL;
 
 	return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
 }
@@ -94,8 +85,6 @@
 				struct device_attribute *attr, char *buf)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
-	if (!card)
-		return -EINVAL;
 
 	return sprintf(buf, "%s\n", qeth_get_bufsize_str(card));
 }
@@ -106,8 +95,6 @@
 			struct device_attribute *attr, char *buf)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
-	if (!card)
-		return -EINVAL;
 
 	return sprintf(buf, "%i\n", card->dev->dev_port);
 }
@@ -116,11 +103,13 @@
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
-	char *tmp;
 	unsigned int portno, limit;
 	int rc = 0;
 
-	if (!card)
+	rc = kstrtouint(buf, 16, &portno);
+	if (rc)
+		return rc;
+	if (portno > QETH_MAX_PORTNO)
 		return -EINVAL;
 
 	mutex_lock(&card->conf_mutex);
@@ -129,11 +118,6 @@
 		goto out;
 	}
 
-	portno = simple_strtoul(buf, &tmp, 16);
-	if (portno > QETH_MAX_PORTNO) {
-		rc = -EINVAL;
-		goto out;
-	}
 	limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
 	if (portno > limit) {
 		rc = -EINVAL;
@@ -171,9 +155,6 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
 	switch (card->qdio.do_prio_queueing) {
 	case QETH_PRIO_Q_ING_PREC:
 		return sprintf(buf, "%s\n", "by precedence");
@@ -183,9 +164,11 @@
 		return sprintf(buf, "%s\n", "by skb-priority");
 	case QETH_PRIO_Q_ING_VLAN:
 		return sprintf(buf, "%s\n", "by VLAN headers");
-	default:
+	case QETH_PRIO_Q_ING_FIXED:
 		return sprintf(buf, "always queue %i\n",
 			       card->qdio.default_out_queue);
+	default:
+		return sprintf(buf, "disabled\n");
 	}
 }
 
@@ -195,10 +178,7 @@
 	struct qeth_card *card = dev_get_drvdata(dev);
 	int rc = 0;
 
-	if (!card)
-		return -EINVAL;
-
-	if (IS_IQD(card))
+	if (IS_IQD(card) || IS_VM_NIC(card))
 		return -EOPNOTSUPP;
 
 	mutex_lock(&card->conf_mutex);
@@ -233,16 +213,16 @@
 		card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
 		card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
 	} else if (sysfs_streq(buf, "no_prio_queueing:0")) {
-		card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+		card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
 		card->qdio.default_out_queue = 0;
 	} else if (sysfs_streq(buf, "no_prio_queueing:1")) {
-		card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+		card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
 		card->qdio.default_out_queue = 1;
 	} else if (sysfs_streq(buf, "no_prio_queueing:2")) {
-		card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+		card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
 		card->qdio.default_out_queue = 2;
 	} else if (sysfs_streq(buf, "no_prio_queueing:3")) {
-		card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+		card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
 		card->qdio.default_out_queue = 3;
 	} else if (sysfs_streq(buf, "no_prio_queueing")) {
 		card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
@@ -262,9 +242,6 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
 	return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
 }
 
@@ -272,12 +249,12 @@
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
-	char *tmp;
-	int cnt, old_cnt;
+	unsigned int cnt;
 	int rc = 0;
 
-	if (!card)
-		return -EINVAL;
+	rc = kstrtouint(buf, 10, &cnt);
+	if (rc)
+		return rc;
 
 	mutex_lock(&card->conf_mutex);
 	if (card->state != CARD_STATE_DOWN) {
@@ -285,13 +262,9 @@
 		goto out;
 	}
 
-	old_cnt = card->qdio.in_buf_pool.buf_count;
-	cnt = simple_strtoul(buf, &tmp, 10);
-	cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
-		((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
-	if (old_cnt != cnt) {
-		rc = qeth_realloc_buffer_pool(card, cnt);
-	}
+	cnt = clamp(cnt, QETH_IN_BUF_COUNT_MIN, QETH_IN_BUF_COUNT_MAX);
+	rc = qeth_resize_buffer_pool(card, cnt);
+
 out:
 	mutex_unlock(&card->conf_mutex);
 	return rc ? rc : count;
@@ -304,20 +277,20 @@
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
-	char *tmp;
-	int i;
+	bool reset;
+	int rc;
 
-	if (!card)
-		return -EINVAL;
+	rc = kstrtobool(buf, &reset);
+	if (rc)
+		return rc;
 
 	if (!qeth_card_hw_is_reachable(card))
 		return -EPERM;
 
-	i = simple_strtoul(buf, &tmp, 16);
-	if (i == 1)
-		qeth_schedule_recovery(card);
+	if (reset)
+		rc = qeth_schedule_recovery(card);
 
-	return count;
+	return rc ? rc : count;
 }
 
 static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
@@ -325,11 +298,6 @@
 static ssize_t qeth_dev_performance_stats_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
 	return sprintf(buf, "1\n");
 }
 
@@ -342,9 +310,6 @@
 	bool reset;
 	int rc;
 
-	if (!card)
-		return -EINVAL;
-
 	rc = kstrtobool(buf, &reset);
 	if (rc)
 		return rc;
@@ -370,9 +335,6 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
 	return sprintf(buf, "%i\n", card->options.layer);
 }
 
@@ -381,21 +343,15 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 	struct net_device *ndev;
-	char *tmp;
-	int i, rc = 0;
 	enum qeth_discipline_id newdis;
+	unsigned int input;
+	int rc;
 
-	if (!card)
-		return -EINVAL;
+	rc = kstrtouint(buf, 16, &input);
+	if (rc)
+		return rc;
 
-	mutex_lock(&card->discipline_mutex);
-	if (card->state != CARD_STATE_DOWN) {
-		rc = -EPERM;
-		goto out;
-	}
-
-	i = simple_strtoul(buf, &tmp, 16);
-	switch (i) {
+	switch (input) {
 	case 0:
 		newdis = QETH_DISCIPLINE_LAYER3;
 		break;
@@ -403,7 +359,12 @@
 		newdis = QETH_DISCIPLINE_LAYER2;
 		break;
 	default:
-		rc = -EINVAL;
+		return -EINVAL;
+	}
+
+	mutex_lock(&card->discipline_mutex);
+	if (card->state != CARD_STATE_DOWN) {
+		rc = -EPERM;
 		goto out;
 	}
 
@@ -453,9 +414,6 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
 	switch (card->options.isolation) {
 	case ISOLATION_MODE_NONE:
 		return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE);
@@ -475,9 +433,6 @@
 	enum qeth_ipa_isolation_modes isolation;
 	int rc = 0;
 
-	if (!card)
-		return -EINVAL;
-
 	mutex_lock(&card->conf_mutex);
 	if (!IS_OSD(card) && !IS_OSX(card)) {
 		rc = -EOPNOTSUPP;
@@ -497,19 +452,17 @@
 		rc = -EINVAL;
 		goto out;
 	}
-	rc = count;
 
-	/* defer IP assist if device is offline (until discipline->set_online)*/
-	card->options.prev_isolation = card->options.isolation;
-	card->options.isolation = isolation;
-	if (qeth_card_hw_is_reachable(card)) {
-		int ipa_rc = qeth_set_access_ctrl_online(card, 1);
-		if (ipa_rc != 0)
-			rc = ipa_rc;
-	}
+	if (qeth_card_hw_is_reachable(card))
+		rc = qeth_setadpparms_set_access_ctrl(card, isolation);
+
+	if (!rc)
+		WRITE_ONCE(card->options.isolation, isolation);
+
 out:
 	mutex_unlock(&card->conf_mutex);
-	return rc;
+
+	return rc ? rc : count;
 }
 
 static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
@@ -522,9 +475,6 @@
 	struct qeth_switch_info sw_info;
 	int	rc = 0;
 
-	if (!card)
-		return -EINVAL;
-
 	if (!qeth_card_hw_is_reachable(card))
 		return sprintf(buf, "n/a\n");
 
@@ -555,8 +505,6 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
 	if (card->info.hwtrap)
 		return snprintf(buf, 5, "arm\n");
 	else
@@ -570,9 +518,6 @@
 	int rc = 0;
 	int state = 0;
 
-	if (!card)
-		return -EINVAL;
-
 	mutex_lock(&card->conf_mutex);
 	if (qeth_card_hw_is_reachable(card))
 		state = 1;
@@ -607,35 +552,24 @@
 static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show,
 		   qeth_hw_trap_store);
 
-static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
-{
-
-	if (!card)
-		return -EINVAL;
-
-	return sprintf(buf, "%i\n", value);
-}
-
 static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
 		const char *buf, size_t count, int *value, int max_value)
 {
-	char *tmp;
-	int i, rc = 0;
+	unsigned int input;
+	int rc;
 
-	if (!card)
+	rc = kstrtouint(buf, 10, &input);
+	if (rc)
+		return rc;
+
+	if (input > max_value)
 		return -EINVAL;
 
 	mutex_lock(&card->conf_mutex);
-	if (card->state != CARD_STATE_DOWN) {
+	if (card->state != CARD_STATE_DOWN)
 		rc = -EPERM;
-		goto out;
-	}
-	i = simple_strtoul(buf, &tmp, 10);
-	if (i <= max_value)
-		*value = i;
 	else
-		rc = -EINVAL;
-out:
+		*value = input;
 	mutex_unlock(&card->conf_mutex);
 	return rc ? rc : count;
 }
@@ -645,7 +579,7 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total);
+	return sprintf(buf, "%i\n", card->info.blkt.time_total);
 }
 
 static ssize_t qeth_dev_blkt_total_store(struct device *dev,
@@ -657,8 +591,6 @@
 				   &card->info.blkt.time_total, 5000);
 }
 
-
-
 static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
 		   qeth_dev_blkt_total_store);
 
@@ -667,7 +599,7 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet);
+	return sprintf(buf, "%i\n", card->info.blkt.inter_packet);
 }
 
 static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
@@ -687,8 +619,7 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	return qeth_dev_blkt_show(buf, card,
-				  card->info.blkt.inter_packet_jumbo);
+	return sprintf(buf, "%i\n", card->info.blkt.inter_packet_jumbo);
 }
 
 static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 096698d..b5caa72 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -39,6 +39,8 @@
 	QETH_TXQ_STAT("TSO bytes", tso_bytes),
 	QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
 	QETH_TXQ_STAT("Queue stopped", stopped),
+	QETH_TXQ_STAT("Doorbell", doorbell),
+	QETH_TXQ_STAT("IRQ for frames", coal_frames),
 	QETH_TXQ_STAT("Completion yield", completion_yield),
 	QETH_TXQ_STAT("Completion timer", completion_timer),
 };
@@ -49,6 +51,9 @@
 	QETH_CARD_STAT("rx0 SG skbs", rx_sg_skbs),
 	QETH_CARD_STAT("rx0 SG page frags", rx_sg_frags),
 	QETH_CARD_STAT("rx0 SG page allocs", rx_sg_alloc_page),
+	QETH_CARD_STAT("rx0 dropped, no memory", rx_dropped_nomem),
+	QETH_CARD_STAT("rx0 dropped, bad format", rx_dropped_notsupp),
+	QETH_CARD_STAT("rx0 dropped, runt", rx_dropped_runt),
 };
 
 #define TXQ_STATS_LEN	ARRAY_SIZE(txq_stats)
@@ -105,6 +110,38 @@
 				   txq_stats, TXQ_STATS_LEN);
 }
 
+static void __qeth_set_coalesce(struct net_device *dev,
+				struct qeth_qdio_out_q *queue,
+				struct ethtool_coalesce *coal)
+{
+	WRITE_ONCE(queue->coalesce_usecs, coal->tx_coalesce_usecs);
+	WRITE_ONCE(queue->max_coalesced_frames, coal->tx_max_coalesced_frames);
+
+	if (coal->tx_coalesce_usecs &&
+	    netif_running(dev) &&
+	    !qeth_out_queue_is_empty(queue))
+		qeth_tx_arm_timer(queue, coal->tx_coalesce_usecs);
+}
+
+static int qeth_set_coalesce(struct net_device *dev,
+			     struct ethtool_coalesce *coal)
+{
+	struct qeth_card *card = dev->ml_priv;
+	struct qeth_qdio_out_q *queue;
+	unsigned int i;
+
+	if (!IS_IQD(card))
+		return -EOPNOTSUPP;
+
+	if (!coal->tx_coalesce_usecs && !coal->tx_max_coalesced_frames)
+		return -EINVAL;
+
+	qeth_for_each_output_queue(card, queue, i)
+		__qeth_set_coalesce(dev, queue, coal);
+
+	return 0;
+}
+
 static void qeth_get_ringparam(struct net_device *dev,
 			       struct ethtool_ringparam *param)
 {
@@ -150,7 +187,6 @@
 
 	strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
 		sizeof(info->driver));
-	strlcpy(info->version, "1.0", sizeof(info->version));
 	strlcpy(info->fw_version, card->info.mcl_level,
 		sizeof(info->fw_version));
 	snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
@@ -172,6 +208,118 @@
 	channels->combined_count = 0;
 }
 
+static int qeth_set_channels(struct net_device *dev,
+			     struct ethtool_channels *channels)
+{
+	struct qeth_priv *priv = netdev_priv(dev);
+	struct qeth_card *card = dev->ml_priv;
+	int rc;
+
+	if (channels->rx_count == 0 || channels->tx_count == 0)
+		return -EINVAL;
+	if (channels->tx_count > card->qdio.no_out_queues)
+		return -EINVAL;
+
+	/* Prio-queueing needs all TX queues: */
+	if (qeth_uses_tx_prio_queueing(card))
+		return -EPERM;
+
+	if (IS_IQD(card)) {
+		if (channels->tx_count < QETH_IQD_MIN_TXQ)
+			return -EINVAL;
+
+		/* Reject downgrade while running. It could push displaced
+		 * ucast flows onto txq0, which is reserved for mcast.
+		 */
+		if (netif_running(dev) &&
+		    channels->tx_count < dev->real_num_tx_queues)
+			return -EPERM;
+	}
+
+	rc = qeth_set_real_num_tx_queues(card, channels->tx_count);
+	if (!rc)
+		priv->tx_wanted_queues = channels->tx_count;
+
+	return rc;
+}
+
+static int qeth_get_ts_info(struct net_device *dev,
+			    struct ethtool_ts_info *info)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	if (!IS_IQD(card))
+		return -EOPNOTSUPP;
+
+	return ethtool_op_get_ts_info(dev, info);
+}
+
+static int qeth_get_tunable(struct net_device *dev,
+			    const struct ethtool_tunable *tuna, void *data)
+{
+	struct qeth_priv *priv = netdev_priv(dev);
+
+	switch (tuna->id) {
+	case ETHTOOL_RX_COPYBREAK:
+		*(u32 *)data = priv->rx_copybreak;
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int qeth_set_tunable(struct net_device *dev,
+			    const struct ethtool_tunable *tuna,
+			    const void *data)
+{
+	struct qeth_priv *priv = netdev_priv(dev);
+
+	switch (tuna->id) {
+	case ETHTOOL_RX_COPYBREAK:
+		WRITE_ONCE(priv->rx_copybreak, *(u32 *)data);
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int qeth_get_per_queue_coalesce(struct net_device *dev, u32 __queue,
+				       struct ethtool_coalesce *coal)
+{
+	struct qeth_card *card = dev->ml_priv;
+	struct qeth_qdio_out_q *queue;
+
+	if (!IS_IQD(card))
+		return -EOPNOTSUPP;
+
+	if (__queue >= card->qdio.no_out_queues)
+		return -EINVAL;
+
+	queue = card->qdio.out_qs[__queue];
+
+	coal->tx_coalesce_usecs = queue->coalesce_usecs;
+	coal->tx_max_coalesced_frames = queue->max_coalesced_frames;
+	return 0;
+}
+
+static int qeth_set_per_queue_coalesce(struct net_device *dev, u32 queue,
+				       struct ethtool_coalesce *coal)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	if (!IS_IQD(card))
+		return -EOPNOTSUPP;
+
+	if (queue >= card->qdio.no_out_queues)
+		return -EINVAL;
+
+	if (!coal->tx_coalesce_usecs && !coal->tx_max_coalesced_frames)
+		return -EINVAL;
+
+	__qeth_set_coalesce(dev, card->qdio.out_qs[queue], coal);
+	return 0;
+}
+
 /* Helper function to fill 'advertising' and 'supported' which are the same. */
 /* Autoneg and full-duplex are supported and advertised unconditionally.     */
 /* Always advertise and support all speeds up to specified, and only one     */
@@ -214,7 +362,7 @@
 						     10000baseT_Full);
 		ethtool_link_ksettings_add_link_mode(cmd, advertising,
 						     10000baseT_Full);
-		/* fall through */
+		fallthrough;
 	case SPEED_1000:
 		ethtool_link_ksettings_add_link_mode(cmd, supported,
 						     1000baseT_Full);
@@ -224,7 +372,7 @@
 						     1000baseT_Half);
 		ethtool_link_ksettings_add_link_mode(cmd, advertising,
 						     1000baseT_Half);
-		/* fall through */
+		fallthrough;
 	case SPEED_100:
 		ethtool_link_ksettings_add_link_mode(cmd, supported,
 						     100baseT_Full);
@@ -234,7 +382,7 @@
 						     100baseT_Half);
 		ethtool_link_ksettings_add_link_mode(cmd, advertising,
 						     100baseT_Half);
-		/* fall through */
+		fallthrough;
 	case SPEED_10:
 		ethtool_link_ksettings_add_link_mode(cmd, supported,
 						     10baseT_Full);
@@ -371,13 +519,22 @@
 }
 
 const struct ethtool_ops qeth_ethtool_ops = {
+	.supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS |
+				     ETHTOOL_COALESCE_TX_MAX_FRAMES,
 	.get_link = ethtool_op_get_link,
+	.set_coalesce = qeth_set_coalesce,
 	.get_ringparam = qeth_get_ringparam,
 	.get_strings = qeth_get_strings,
 	.get_ethtool_stats = qeth_get_ethtool_stats,
 	.get_sset_count = qeth_get_sset_count,
 	.get_drvinfo = qeth_get_drvinfo,
 	.get_channels = qeth_get_channels,
+	.set_channels = qeth_set_channels,
+	.get_ts_info = qeth_get_ts_info,
+	.get_tunable = qeth_get_tunable,
+	.set_tunable = qeth_set_tunable,
+	.get_per_queue_coalesce = qeth_get_per_queue_coalesce,
+	.set_per_queue_coalesce = qeth_set_per_queue_coalesce,
 	.get_link_ksettings = qeth_get_link_ksettings,
 };
 
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index ddc615b..296d73d 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -13,7 +13,6 @@
 
 int qeth_l2_create_device_attributes(struct device *);
 void qeth_l2_remove_device_attributes(struct device *);
-void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
 int qeth_bridgeport_query_ports(struct qeth_card *card,
 				enum qeth_sbp_roles *role,
 				enum qeth_sbp_states *state);
@@ -24,7 +23,7 @@
 int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state);
 int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout);
 int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout);
-bool qeth_l2_vnicc_is_in_use(struct qeth_card *card);
+bool qeth_bridgeport_allowed(struct qeth_card *card);
 
 struct qeth_mac {
 	u8 mac_addr[ETH_ALEN];
@@ -32,4 +31,11 @@
 	struct hlist_node hnode;
 };
 
+static inline bool qeth_bridgeport_is_in_use(struct qeth_card *card)
+{
+	return card->options.sbp.role ||
+	       card->options.sbp.reflect_promisc ||
+	       card->options.sbp.hostnotification;
+}
+
 #endif /* __QETH_L2_H__ */
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8c46136..cfc931f 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -17,24 +17,17 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
 #include <linux/list.h>
 #include <linux/hash.h>
 #include <linux/hashtable.h>
+#include <net/switchdev.h>
+#include <asm/chsc.h>
+#include <asm/css_chars.h>
 #include <asm/setup.h>
 #include "qeth_core.h"
 #include "qeth_l2.h"
 
-static int qeth_l2_set_offline(struct ccwgroup_device *);
-static void qeth_bridgeport_query_support(struct qeth_card *card);
-static void qeth_bridge_state_change(struct qeth_card *card,
-					struct qeth_ipa_cmd *cmd);
-static void qeth_bridge_host_event(struct qeth_card *card,
-					struct qeth_ipa_cmd *cmd);
-static void qeth_l2_vnicc_set_defaults(struct qeth_card *card);
-static void qeth_l2_vnicc_init(struct qeth_card *card);
-static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
-					  u32 *timeout);
-
 static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode)
 {
 	int rc;
@@ -53,11 +46,11 @@
 		break;
 	case IPA_RC_L2_DUP_MAC:
 	case IPA_RC_L2_DUP_LAYER3_MAC:
-		rc = -EEXIST;
+		rc = -EADDRINUSE;
 		break;
 	case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
 	case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
-		rc = -EPERM;
+		rc = -EADDRNOTAVAIL;
 		break;
 	case IPA_RC_L2_MAC_NOT_FOUND:
 		rc = -ENOENT;
@@ -106,11 +99,11 @@
 			 "MAC address %pM successfully registered\n", mac);
 	} else {
 		switch (rc) {
-		case -EEXIST:
+		case -EADDRINUSE:
 			dev_warn(&card->gdev->dev,
 				"MAC address %pM already exists\n", mac);
 			break;
-		case -EPERM:
+		case -EADDRNOTAVAIL:
 			dev_warn(&card->gdev->dev,
 				"MAC address %pM is not authorized\n", mac);
 			break;
@@ -127,7 +120,7 @@
 
 	QETH_CARD_TEXT(card, 2, "L2Wmac");
 	rc = qeth_l2_send_setdelmac(card, mac, cmd);
-	if (rc == -EEXIST)
+	if (rc == -EADDRINUSE)
 		QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n",
 				 CARD_DEVID(card));
 	else if (rc)
@@ -156,7 +149,7 @@
 	struct hlist_node *tmp;
 	int i;
 
-	hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
+	hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) {
 		hash_del(&mac->hnode);
 		kfree(mac);
 	}
@@ -190,7 +183,7 @@
 	/* VSWITCH relies on the VLAN
 	 * information to be present in
 	 * the QDIO header */
-	if (veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
+	if (veth->h_vlan_proto == htons(ETH_P_8021Q)) {
 		hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
 		hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
 	}
@@ -273,78 +266,31 @@
 	return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
 }
 
-static void qeth_l2_stop_card(struct qeth_card *card)
+static void qeth_l2_set_pnso_mode(struct qeth_card *card,
+				  enum qeth_pnso_mode mode)
 {
-	QETH_CARD_TEXT(card, 2, "stopcard");
+	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
+	WRITE_ONCE(card->info.pnso_mode, mode);
+	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
 
-	qeth_set_allowed_threads(card, 0, 1);
-
-	cancel_work_sync(&card->rx_mode_work);
-	qeth_l2_drain_rx_mode_cache(card);
-
-	if (card->state == CARD_STATE_SOFTSETUP) {
-		qeth_clear_ipacmd_list(card);
-		card->state = CARD_STATE_HARDSETUP;
-	}
-	if (card->state == CARD_STATE_HARDSETUP) {
-		qeth_drain_output_queues(card);
-		qeth_clear_working_pool_list(card);
-		card->state = CARD_STATE_DOWN;
-	}
-
-	qeth_qdio_clear_card(card, 0);
-	flush_workqueue(card->event_wq);
-	card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
-	card->info.promisc_mode = 0;
+	if (mode == QETH_PNSO_NONE)
+		drain_workqueue(card->event_wq);
 }
 
-static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
-				int budget, int *done)
+static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card)
 {
-	int work_done = 0;
-	struct sk_buff *skb;
-	struct qeth_hdr *hdr;
-	unsigned int len;
+	struct switchdev_notifier_fdb_info info;
 
-	*done = 0;
-	WARN_ON_ONCE(!budget);
-	while (budget) {
-		skb = qeth_core_get_next_skb(card,
-			&card->qdio.in_q->bufs[card->rx.b_index],
-			&card->rx.b_element, &card->rx.e_offset, &hdr);
-		if (!skb) {
-			*done = 1;
-			break;
-		}
-		switch (hdr->hdr.l2.id) {
-		case QETH_HEADER_TYPE_LAYER2:
-			skb->protocol = eth_type_trans(skb, skb->dev);
-			qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]);
-			len = skb->len;
-			napi_gro_receive(&card->napi, skb);
-			break;
-		case QETH_HEADER_TYPE_OSN:
-			if (IS_OSN(card)) {
-				skb_push(skb, sizeof(struct qeth_hdr));
-				skb_copy_to_linear_data(skb, hdr,
-						sizeof(struct qeth_hdr));
-				len = skb->len;
-				card->osn_info.data_cb(skb);
-				break;
-			}
-			/* Else, fall through */
-		default:
-			dev_kfree_skb_any(skb);
-			QETH_CARD_TEXT(card, 3, "inbunkno");
-			QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
-			continue;
-		}
-		work_done++;
-		budget--;
-		QETH_CARD_STAT_INC(card, rx_packets);
-		QETH_CARD_STAT_ADD(card, rx_bytes, len);
-	}
-	return work_done;
+	QETH_CARD_TEXT(card, 2, "fdbflush");
+
+	info.addr = NULL;
+	/* flush all VLANs: */
+	info.vid = 0;
+	info.added_by_user = false;
+	info.offloaded = true;
+
+	call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
+				 card->dev, &info.info, NULL);
 }
 
 static int qeth_l2_request_initial_mac(struct qeth_card *card)
@@ -389,14 +335,16 @@
 		qeth_l2_request_initial_mac(card);
 
 	if (!IS_OSN(card) && !qeth_l2_send_setmac(card, card->dev->dev_addr))
-		card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+		card->info.dev_addr_is_registered = 1;
+	else
+		card->info.dev_addr_is_registered = 0;
 }
 
 static int qeth_l2_validate_addr(struct net_device *dev)
 {
 	struct qeth_card *card = dev->ml_priv;
 
-	if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
+	if (card->info.dev_addr_is_registered)
 		return eth_validate_addr(dev);
 
 	QETH_CARD_TEXT(card, 4, "nomacadr");
@@ -422,7 +370,7 @@
 
 	/* don't register the same address twice */
 	if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
-	    (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
+	    card->info.dev_addr_is_registered)
 		return 0;
 
 	/* add the new address, switch over, drop the old */
@@ -432,9 +380,9 @@
 	ether_addr_copy(old_addr, dev->dev_addr);
 	ether_addr_copy(dev->dev_addr, addr->sa_data);
 
-	if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
+	if (card->info.dev_addr_is_registered)
 		qeth_l2_remove_mac(card, old_addr);
-	card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+	card->info.dev_addr_is_registered = 1;
 	return 0;
 }
 
@@ -487,7 +435,7 @@
 	u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2]));
 	struct qeth_mac *mac;
 
-	hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) {
+	hash_for_each_possible(card->rx_mode_addrs, mac, hnode, mac_hash) {
 		if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) {
 			mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
 			return;
@@ -501,7 +449,7 @@
 	ether_addr_copy(mac->mac_addr, ha->addr);
 	mac->disp_flag = QETH_DISP_ADDR_ADD;
 
-	hash_add(card->mac_htable, &mac->hnode, mac_hash);
+	hash_add(card->rx_mode_addrs, &mac->hnode, mac_hash);
 }
 
 static void qeth_l2_rx_mode_work(struct work_struct *work)
@@ -524,7 +472,7 @@
 		qeth_l2_add_mac(card, ha);
 	netif_addr_unlock_bh(dev);
 
-	hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
+	hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) {
 		switch (mac->disp_flag) {
 		case QETH_DISP_ADDR_DELETE:
 			qeth_l2_remove_mac(card, mac->mac_addr);
@@ -538,7 +486,7 @@
 				kfree(mac);
 				break;
 			}
-			/* fall through */
+			fallthrough;
 		default:
 			/* for next call to set_rx_mode(): */
 			mac->disp_flag = QETH_DISP_ADDR_DELETE;
@@ -551,6 +499,7 @@
 static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
 			    struct qeth_qdio_out_q *queue)
 {
+	gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
 	struct qeth_hdr *hdr = (struct qeth_hdr *)skb->data;
 	addr_t end = (addr_t)(skb->data + sizeof(*hdr));
 	addr_t start = (addr_t)skb->data;
@@ -563,7 +512,7 @@
 
 	if (qeth_get_elements_for_range(start, end) > 1) {
 		/* Misaligned HW header, move it to its own buffer element. */
-		hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
+		hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
 		if (!hdr)
 			return -ENOMEM;
 		hd_len = sizeof(*hdr);
@@ -622,48 +571,10 @@
 		return qeth_iqd_select_queue(dev, skb,
 					     qeth_get_ether_cast_type(skb),
 					     sb_dev);
-	return qeth_get_priority_queue(card, skb);
-}
+	if (qeth_uses_tx_prio_queueing(card))
+		return qeth_get_priority_queue(card, skb);
 
-static const struct device_type qeth_l2_devtype = {
-	.name = "qeth_layer2",
-	.groups = qeth_l2_attr_groups,
-};
-
-static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
-{
-	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-	int rc;
-
-	qeth_l2_vnicc_set_defaults(card);
-	mutex_init(&card->sbp_lock);
-
-	if (gdev->dev.type == &qeth_generic_devtype) {
-		rc = qeth_l2_create_device_attributes(&gdev->dev);
-		if (rc)
-			return rc;
-	}
-
-	hash_init(card->mac_htable);
-	INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
-	return 0;
-}
-
-static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
-{
-	struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
-
-	if (cgdev->dev.type == &qeth_generic_devtype)
-		qeth_l2_remove_device_attributes(&cgdev->dev);
-	qeth_set_allowed_threads(card, 0, 1);
-	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
-
-	if (cgdev->state == CCWGROUP_ONLINE)
-		qeth_l2_set_offline(cgdev);
-
-	cancel_work_sync(&card->close_dev_work);
-	if (qeth_netdev_is_registered(card->dev))
-		unregister_netdev(card->dev);
+	return netdev_pick_tx(dev, skb, sb_dev);
 }
 
 static void qeth_l2_set_rx_mode(struct net_device *dev)
@@ -673,6 +584,286 @@
 	schedule_work(&card->rx_mode_work);
 }
 
+/**
+ *	qeth_l2_pnso() - perform network subchannel operation
+ *	@card: qeth_card structure pointer
+ *	@oc: Operation Code
+ *	@cnc: Boolean Change-Notification Control
+ *	@cb: Callback function will be executed for each element
+ *		of the address list
+ *	@priv: Pointer to pass to the callback function.
+ *
+ *	Collects network information in a network address list and calls the
+ *	callback function for every entry in the list. If "change-notification-
+ *	control" is set, further changes in the address list will be reported
+ *	via the IPA command.
+ */
+static int qeth_l2_pnso(struct qeth_card *card, u8 oc, int cnc,
+			void (*cb)(void *priv, struct chsc_pnso_naid_l2 *entry),
+			void *priv)
+{
+	struct ccw_device *ddev = CARD_DDEV(card);
+	struct chsc_pnso_area *rr;
+	u32 prev_instance = 0;
+	int isfirstblock = 1;
+	int i, size, elems;
+	int rc;
+
+	rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
+	if (rr == NULL)
+		return -ENOMEM;
+	do {
+		QETH_CARD_TEXT(card, 2, "PNSO");
+		/* on the first iteration, naihdr.resume_token will be zero */
+		rc = ccw_device_pnso(ddev, rr, oc, rr->naihdr.resume_token,
+				     cnc);
+		if (rc)
+			continue;
+		if (cb == NULL)
+			continue;
+
+		size = rr->naihdr.naids;
+		if (size != sizeof(struct chsc_pnso_naid_l2)) {
+			WARN_ON_ONCE(1);
+			continue;
+		}
+
+		elems = (rr->response.length - sizeof(struct chsc_header) -
+			 sizeof(struct chsc_pnso_naihdr)) / size;
+
+		if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
+			/* Inform the caller that they need to scrap */
+			/* the data that was already reported via cb */
+			rc = -EAGAIN;
+			break;
+		}
+		isfirstblock = 0;
+		prev_instance = rr->naihdr.instance;
+		for (i = 0; i < elems; i++)
+			(*cb)(priv, &rr->entries[i]);
+	} while ((rc == -EBUSY) || (!rc && /* list stored */
+		   /* resume token is non-zero => list incomplete */
+		   (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
+
+	if (rc)
+		QETH_CARD_TEXT_(card, 2, "PNrp%04x", rr->response.code);
+
+	free_page((unsigned long)rr);
+	return rc;
+}
+
+static bool qeth_is_my_net_if_token(struct qeth_card *card,
+				    struct net_if_token *token)
+{
+	return ((card->info.ddev_devno == token->devnum) &&
+		(card->info.cssid == token->cssid) &&
+		(card->info.iid == token->iid) &&
+		(card->info.ssid == token->ssid) &&
+		(card->info.chpid == token->chpid) &&
+		(card->info.chid == token->chid));
+}
+
+/**
+ *	qeth_l2_dev2br_fdb_notify() - update fdb of master bridge
+ *	@card:	qeth_card structure pointer
+ *	@code:	event bitmask: high order bit 0x80 set to
+ *				1 - removal of an object
+ *				0 - addition of an object
+ *			       Object type(s):
+ *				0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC
+ *	@token: "network token" structure identifying 'physical' location
+ *		of the target
+ *	@addr_lnid: structure with MAC address and VLAN ID of the target
+ */
+static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code,
+				      struct net_if_token *token,
+				      struct mac_addr_lnid *addr_lnid)
+{
+	struct switchdev_notifier_fdb_info info;
+	u8 ntfy_mac[ETH_ALEN];
+
+	ether_addr_copy(ntfy_mac, addr_lnid->mac);
+	/* Ignore VLAN only changes */
+	if (!(code & IPA_ADDR_CHANGE_CODE_MACADDR))
+		return;
+	/* Ignore mcast entries */
+	if (is_multicast_ether_addr(ntfy_mac))
+		return;
+	/* Ignore my own addresses */
+	if (qeth_is_my_net_if_token(card, token))
+		return;
+
+	info.addr = ntfy_mac;
+	/* don't report VLAN IDs */
+	info.vid = 0;
+	info.added_by_user = false;
+	info.offloaded = true;
+
+	if (code & IPA_ADDR_CHANGE_CODE_REMOVAL) {
+		call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+					 card->dev, &info.info, NULL);
+		QETH_CARD_TEXT(card, 4, "andelmac");
+		QETH_CARD_TEXT_(card, 4,
+				"mc%012lx", ether_addr_to_u64(ntfy_mac));
+	} else {
+		call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+					 card->dev, &info.info, NULL);
+		QETH_CARD_TEXT(card, 4, "anaddmac");
+		QETH_CARD_TEXT_(card, 4,
+				"mc%012lx", ether_addr_to_u64(ntfy_mac));
+	}
+}
+
+static void qeth_l2_dev2br_an_set_cb(void *priv,
+				     struct chsc_pnso_naid_l2 *entry)
+{
+	u8 code = IPA_ADDR_CHANGE_CODE_MACADDR;
+	struct qeth_card *card = priv;
+
+	if (entry->addr_lnid.lnid < VLAN_N_VID)
+		code |= IPA_ADDR_CHANGE_CODE_VLANID;
+	qeth_l2_dev2br_fdb_notify(card, code,
+				  (struct net_if_token *)&entry->nit,
+				  (struct mac_addr_lnid *)&entry->addr_lnid);
+}
+
+/**
+ *	qeth_l2_dev2br_an_set() -
+ *	Enable or disable 'dev to bridge network address notification'
+ *	@card: qeth_card structure pointer
+ *	@enable: Enable or disable 'dev to bridge network address notification'
+ *
+ *	Returns negative errno-compatible error indication or 0 on success.
+ *
+ *	On enable, emits a series of address notifications for all
+ *	currently registered hosts.
+ *
+ *	Must be called under rtnl_lock
+ */
+static int qeth_l2_dev2br_an_set(struct qeth_card *card, bool enable)
+{
+	int rc;
+
+	if (enable) {
+		QETH_CARD_TEXT(card, 2, "anseton");
+		rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 1,
+				  qeth_l2_dev2br_an_set_cb, card);
+		if (rc == -EAGAIN)
+			/* address notification enabled, but inconsistent
+			 * addresses reported -> disable address notification
+			 */
+			qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0,
+				     NULL, NULL);
+	} else {
+		QETH_CARD_TEXT(card, 2, "ansetoff");
+		rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, NULL, NULL);
+	}
+
+	return rc;
+}
+
+static int qeth_l2_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+				  struct net_device *dev, u32 filter_mask,
+				  int nlflags)
+{
+	struct qeth_priv *priv = netdev_priv(dev);
+	struct qeth_card *card = dev->ml_priv;
+	u16 mode = BRIDGE_MODE_UNDEF;
+
+	/* Do not even show qeth devs that cannot do bridge_setlink */
+	if (!priv->brport_hw_features || !netif_device_present(dev) ||
+	    qeth_bridgeport_is_in_use(card))
+		return -EOPNOTSUPP;
+
+	return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
+				       mode, priv->brport_features,
+				       priv->brport_hw_features,
+				       nlflags, filter_mask, NULL);
+}
+
+static const struct nla_policy qeth_brport_policy[IFLA_BRPORT_MAX + 1] = {
+	[IFLA_BRPORT_LEARNING_SYNC]	= { .type = NLA_U8 },
+};
+
+/**
+ *	qeth_l2_bridge_setlink() - set bridgeport attributes
+ *	@dev: netdevice
+ *	@nlh: netlink message header
+ *	@flags: bridge flags (here: BRIDGE_FLAGS_SELF)
+ *	@extack: extended ACK report struct
+ *
+ *	Called under rtnl_lock
+ */
+static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+				  u16 flags, struct netlink_ext_ack *extack)
+{
+	struct qeth_priv *priv = netdev_priv(dev);
+	struct nlattr *bp_tb[IFLA_BRPORT_MAX + 1];
+	struct qeth_card *card = dev->ml_priv;
+	struct nlattr *attr, *nested_attr;
+	bool enable, has_protinfo = false;
+	int rem1, rem2;
+	int rc;
+
+	if (!netif_device_present(dev))
+		return -ENODEV;
+	if (!(priv->brport_hw_features))
+		return -EOPNOTSUPP;
+
+	nlmsg_for_each_attr(attr, nlh, sizeof(struct ifinfomsg), rem1) {
+		if (nla_type(attr) == IFLA_PROTINFO) {
+			rc = nla_parse_nested(bp_tb, IFLA_BRPORT_MAX, attr,
+					      qeth_brport_policy, extack);
+			if (rc)
+				return rc;
+			has_protinfo = true;
+		} else if (nla_type(attr) == IFLA_AF_SPEC) {
+			nla_for_each_nested(nested_attr, attr, rem2) {
+				if (nla_type(nested_attr) == IFLA_BRIDGE_FLAGS)
+					continue;
+				NL_SET_ERR_MSG_ATTR(extack, nested_attr,
+						    "Unsupported attribute");
+				return -EINVAL;
+			}
+		} else {
+			NL_SET_ERR_MSG_ATTR(extack, attr, "Unsupported attribute");
+			return -EINVAL;
+		}
+	}
+	if (!has_protinfo)
+		return 0;
+	if (!bp_tb[IFLA_BRPORT_LEARNING_SYNC])
+		return -EINVAL;
+	enable = !!nla_get_u8(bp_tb[IFLA_BRPORT_LEARNING_SYNC]);
+
+	if (enable == !!(priv->brport_features & BR_LEARNING_SYNC))
+		return 0;
+
+	mutex_lock(&card->sbp_lock);
+	/* do not change anything if BridgePort is enabled */
+	if (qeth_bridgeport_is_in_use(card)) {
+		NL_SET_ERR_MSG(extack, "n/a (BridgePort)");
+		rc = -EBUSY;
+	} else if (enable) {
+		qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
+		rc = qeth_l2_dev2br_an_set(card, true);
+		if (rc)
+			qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+		else
+			priv->brport_features |= BR_LEARNING_SYNC;
+	} else {
+		rc = qeth_l2_dev2br_an_set(card, false);
+		if (!rc) {
+			qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+			priv->brport_features ^= BR_LEARNING_SYNC;
+			qeth_l2_dev2br_fdb_flush(card);
+		}
+	}
+	mutex_unlock(&card->sbp_lock);
+
+	return rc;
+}
+
 static const struct net_device_ops qeth_l2_netdev_ops = {
 	.ndo_open		= qeth_open,
 	.ndo_stop		= qeth_stop,
@@ -686,9 +877,11 @@
 	.ndo_set_mac_address    = qeth_l2_set_mac_address,
 	.ndo_vlan_rx_add_vid	= qeth_l2_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid   = qeth_l2_vlan_rx_kill_vid,
-	.ndo_tx_timeout	   	= qeth_tx_timeout,
+	.ndo_tx_timeout		= qeth_tx_timeout,
 	.ndo_fix_features	= qeth_fix_features,
-	.ndo_set_features	= qeth_set_features
+	.ndo_set_features	= qeth_set_features,
+	.ndo_bridge_getlink	= qeth_l2_bridge_getlink,
+	.ndo_bridge_setlink	= qeth_l2_bridge_setlink,
 };
 
 static const struct net_device_ops qeth_osn_netdev_ops = {
@@ -700,10 +893,8 @@
 	.ndo_tx_timeout		= qeth_tx_timeout,
 };
 
-static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
+static int qeth_l2_setup_netdev(struct qeth_card *card)
 {
-	int rc;
-
 	if (IS_OSN(card)) {
 		card->dev->netdev_ops = &qeth_osn_netdev_ops;
 		card->dev->flags |= IFF_NOARP;
@@ -750,27 +941,14 @@
 
 	if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) {
 		card->dev->needed_headroom = sizeof(struct qeth_hdr_tso);
+		netif_keep_dst(card->dev);
 		netif_set_gso_max_size(card->dev,
 				       PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
 	}
 
 add_napi:
 	netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
-	rc = register_netdev(card->dev);
-	if (!rc && carrier_ok)
-		netif_carrier_on(card->dev);
-
-	if (rc)
-		card->dev->netdev_ops = NULL;
-	return rc;
-}
-
-static int qeth_l2_start_ipassists(struct qeth_card *card)
-{
-	/* configure isolation level */
-	if (qeth_set_access_ctrl_online(card, 0))
-		return -ENODEV;
-	return 0;
+	return register_netdev(card->dev);
 }
 
 static void qeth_l2_trace_features(struct qeth_card *card)
@@ -785,224 +963,82 @@
 		      sizeof(card->options.vnicc.sup_chars));
 }
 
-static int qeth_l2_set_online(struct ccwgroup_device *gdev)
+static void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
 {
-	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-	struct net_device *dev = card->dev;
-	int rc = 0;
-	bool carrier_ok;
-
-	mutex_lock(&card->discipline_mutex);
-	mutex_lock(&card->conf_mutex);
-	QETH_CARD_TEXT(card, 2, "setonlin");
-
-	rc = qeth_core_hardsetup_card(card, &carrier_ok);
-	if (rc) {
-		QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
-		rc = -ENODEV;
-		goto out_remove;
+	if (!card->options.sbp.reflect_promisc &&
+	    card->options.sbp.role != QETH_SBP_ROLE_NONE) {
+		/* Conditional to avoid spurious error messages */
+		qeth_bridgeport_setrole(card, card->options.sbp.role);
+		/* Let the callback function refresh the stored role value. */
+		qeth_bridgeport_query_ports(card, &card->options.sbp.role,
+					    NULL);
 	}
-
-	if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
-		if (card->info.hwtrap &&
-		    qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
-			card->info.hwtrap = 0;
-	} else
-		card->info.hwtrap = 0;
-
-	mutex_lock(&card->sbp_lock);
-	qeth_bridgeport_query_support(card);
-	if (card->options.sbp.supported_funcs)
-		dev_info(&card->gdev->dev,
-		"The device represents a Bridge Capable Port\n");
-	mutex_unlock(&card->sbp_lock);
-
-	qeth_l2_register_dev_addr(card);
-
-	/* for the rx_bcast characteristic, init VNICC after setmac */
-	qeth_l2_vnicc_init(card);
-
-	qeth_trace_features(card);
-	qeth_l2_trace_features(card);
-
-	qeth_l2_setup_bridgeport_attrs(card);
-
-	card->state = CARD_STATE_HARDSETUP;
-	qeth_print_status_message(card);
-
-	/* softsetup */
-	QETH_CARD_TEXT(card, 2, "softsetp");
-
-	if (IS_OSD(card) || IS_OSX(card)) {
-		rc = qeth_l2_start_ipassists(card);
-		if (rc)
-			goto out_remove;
+	if (card->options.sbp.hostnotification) {
+		if (qeth_bridgeport_an_set(card, 1))
+			card->options.sbp.hostnotification = 0;
 	}
+}
 
-	rc = qeth_init_qdio_queues(card);
-	if (rc) {
-		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
-		rc = -ENODEV;
-		goto out_remove;
-	}
-	card->state = CARD_STATE_SOFTSETUP;
+/**
+ *	qeth_l2_detect_dev2br_support() -
+ *	Detect whether this card supports 'dev to bridge fdb network address
+ *	change notification' and thus can support the learning_sync bridgeport
+ *	attribute
+ *	@card: qeth_card structure pointer
+ */
+static void qeth_l2_detect_dev2br_support(struct qeth_card *card)
+{
+	struct qeth_priv *priv = netdev_priv(card->dev);
+	bool dev2br_supported;
 
-	qeth_set_allowed_threads(card, 0xffffffff, 0);
+	QETH_CARD_TEXT(card, 2, "d2brsup");
+	if (!IS_IQD(card))
+		return;
 
-	if (!qeth_netdev_is_registered(dev)) {
-		rc = qeth_l2_setup_netdev(card, carrier_ok);
-		if (rc)
-			goto out_remove;
-	} else {
-		rtnl_lock();
-		if (carrier_ok)
-			netif_carrier_on(dev);
-		else
-			netif_carrier_off(dev);
+	/* dev2br requires valid cssid,iid,chid */
+	dev2br_supported = card->info.ids_valid &&
+			   css_general_characteristics.enarf;
+	QETH_CARD_TEXT_(card, 2, "D2Bsup%02x", dev2br_supported);
 
-		netif_device_attach(dev);
-		qeth_enable_hw_features(dev);
+	if (dev2br_supported)
+		priv->brport_hw_features |= BR_LEARNING_SYNC;
+	else
+		priv->brport_hw_features &= ~BR_LEARNING_SYNC;
+}
 
-		if (card->info.open_when_online) {
-			card->info.open_when_online = 0;
-			dev_open(dev, NULL);
+static void qeth_l2_enable_brport_features(struct qeth_card *card)
+{
+	struct qeth_priv *priv = netdev_priv(card->dev);
+	int rc;
+
+	if (priv->brport_features & BR_LEARNING_SYNC) {
+		if (priv->brport_hw_features & BR_LEARNING_SYNC) {
+			qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
+			rc = qeth_l2_dev2br_an_set(card, true);
+			if (rc == -EAGAIN) {
+				/* Recoverable error, retry once */
+				qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+				qeth_l2_dev2br_fdb_flush(card);
+				qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
+				rc = qeth_l2_dev2br_an_set(card, true);
+			}
+			if (rc) {
+				netdev_err(card->dev,
+					   "failed to enable bridge learning_sync: %d\n",
+					   rc);
+				qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+				qeth_l2_dev2br_fdb_flush(card);
+				priv->brport_features ^= BR_LEARNING_SYNC;
+			}
+		} else {
+			dev_warn(&card->gdev->dev,
+				"bridge learning_sync not supported\n");
+			priv->brport_features ^= BR_LEARNING_SYNC;
 		}
-		rtnl_unlock();
-	}
-	/* let user_space know that device is online */
-	kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
-	mutex_unlock(&card->conf_mutex);
-	mutex_unlock(&card->discipline_mutex);
-	return 0;
-
-out_remove:
-	qeth_l2_stop_card(card);
-	qeth_stop_channel(&card->data);
-	qeth_stop_channel(&card->write);
-	qeth_stop_channel(&card->read);
-	qdio_free(CARD_DDEV(card));
-
-	mutex_unlock(&card->conf_mutex);
-	mutex_unlock(&card->discipline_mutex);
-	return rc;
-}
-
-static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
-					int recovery_mode)
-{
-	struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
-	int rc = 0, rc2 = 0, rc3 = 0;
-
-	mutex_lock(&card->discipline_mutex);
-	mutex_lock(&card->conf_mutex);
-	QETH_CARD_TEXT(card, 3, "setoffl");
-
-	if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
-		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
-		card->info.hwtrap = 1;
-	}
-
-	rtnl_lock();
-	card->info.open_when_online = card->dev->flags & IFF_UP;
-	dev_close(card->dev);
-	netif_device_detach(card->dev);
-	netif_carrier_off(card->dev);
-	rtnl_unlock();
-
-	qeth_l2_stop_card(card);
-	rc  = qeth_stop_channel(&card->data);
-	rc2 = qeth_stop_channel(&card->write);
-	rc3 = qeth_stop_channel(&card->read);
-	if (!rc)
-		rc = (rc2) ? rc2 : rc3;
-	if (rc)
-		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
-	qdio_free(CARD_DDEV(card));
-
-	/* let user_space know that device is offline */
-	kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
-	mutex_unlock(&card->conf_mutex);
-	mutex_unlock(&card->discipline_mutex);
-	return 0;
-}
-
-static int qeth_l2_set_offline(struct ccwgroup_device *cgdev)
-{
-	return __qeth_l2_set_offline(cgdev, 0);
-}
-
-static int qeth_l2_recover(void *ptr)
-{
-	struct qeth_card *card;
-	int rc = 0;
-
-	card = (struct qeth_card *) ptr;
-	QETH_CARD_TEXT(card, 2, "recover1");
-	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
-		return 0;
-	QETH_CARD_TEXT(card, 2, "recover2");
-	dev_warn(&card->gdev->dev,
-		"A recovery process has been started for the device\n");
-	__qeth_l2_set_offline(card->gdev, 1);
-	rc = qeth_l2_set_online(card->gdev);
-	if (!rc)
-		dev_info(&card->gdev->dev,
-			"Device successfully recovered!\n");
-	else {
-		ccwgroup_set_offline(card->gdev);
-		dev_warn(&card->gdev->dev, "The qeth device driver "
-				"failed to recover an error on the device\n");
-	}
-	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
-	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
-	return 0;
-}
-
-static int __init qeth_l2_init(void)
-{
-	pr_info("register layer 2 discipline\n");
-	return 0;
-}
-
-static void __exit qeth_l2_exit(void)
-{
-	pr_info("unregister layer 2 discipline\n");
-}
-
-/* Returns zero if the command is successfully "consumed" */
-static int qeth_l2_control_event(struct qeth_card *card,
-					struct qeth_ipa_cmd *cmd)
-{
-	switch (cmd->hdr.command) {
-	case IPA_CMD_SETBRIDGEPORT_OSA:
-	case IPA_CMD_SETBRIDGEPORT_IQD:
-		if (cmd->data.sbp.hdr.command_code ==
-				IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
-			qeth_bridge_state_change(card, cmd);
-			return 0;
-		} else
-			return 1;
-	case IPA_CMD_ADDRESS_CHANGE_NOTIF:
-		qeth_bridge_host_event(card, cmd);
-		return 0;
-	default:
-		return 1;
 	}
 }
 
-struct qeth_discipline qeth_l2_discipline = {
-	.devtype = &qeth_l2_devtype,
-	.process_rx_buffer = qeth_l2_process_inbound_buffer,
-	.recover = qeth_l2_recover,
-	.setup = qeth_l2_probe_device,
-	.remove = qeth_l2_remove_device,
-	.set_online = qeth_l2_set_online,
-	.set_offline = qeth_l2_set_offline,
-	.do_ioctl = NULL,
-	.control_event_handler = qeth_l2_control_event,
-};
-EXPORT_SYMBOL_GPL(qeth_l2_discipline);
-
+#ifdef CONFIG_QETH_OSN
 static void qeth_osn_assist_cb(struct qeth_card *card,
 			       struct qeth_cmd_buffer *iob,
 			       unsigned int data_length)
@@ -1032,7 +1068,8 @@
 	if (!iob)
 		return -ENOMEM;
 
-	qeth_prepare_ipa_cmd(card, iob, (u16) data_len);
+	qeth_prepare_ipa_cmd(card, iob, (u16) data_len, NULL);
+
 	memcpy(__ipa_cmd(iob), data, data_len);
 	iob->callback = qeth_osn_assist_cb;
 	return qeth_send_ipa_cmd(card, iob, NULL, NULL);
@@ -1075,9 +1112,9 @@
 	QETH_CARD_TEXT(card, 2, "osndereg");
 	card->osn_info.assist_cb = NULL;
 	card->osn_info.data_cb = NULL;
-	return;
 }
 EXPORT_SYMBOL(qeth_osn_deregister);
+#endif
 
 /* SETBRIDGEPORT support, async notifications */
 
@@ -1099,8 +1136,10 @@
  * for all currently registered addresses.
  */
 static void qeth_bridge_emit_host_event(struct qeth_card *card,
-	enum qeth_an_event_type evtype,
-	u8 code, struct net_if_token *token, struct mac_addr_lnid *addr_lnid)
+					enum qeth_an_event_type evtype,
+					u8 code,
+					struct net_if_token *token,
+					struct mac_addr_lnid *addr_lnid)
 {
 	char str[7][32];
 	char *env[8];
@@ -1149,15 +1188,14 @@
 struct qeth_bridge_state_data {
 	struct work_struct worker;
 	struct qeth_card *card;
-	struct qeth_sbp_state_change qports;
+	u8 role;
+	u8 state;
 };
 
 static void qeth_bridge_state_change_worker(struct work_struct *work)
 {
 	struct qeth_bridge_state_data *data =
 		container_of(work, struct qeth_bridge_state_data, worker);
-	/* We are only interested in the first entry - local port */
-	struct qeth_sbp_port_entry *entry = &data->qports.entry[0];
 	char env_locrem[32];
 	char env_role[32];
 	char env_state[32];
@@ -1170,14 +1208,14 @@
 
 	snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
 	snprintf(env_role, sizeof(env_role), "ROLE=%s",
-		(entry->role == QETH_SBP_ROLE_NONE) ? "none" :
-		(entry->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
-		(entry->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
+		(data->role == QETH_SBP_ROLE_NONE) ? "none" :
+		(data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
+		(data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
 		"<INVALID>");
 	snprintf(env_state, sizeof(env_state), "STATE=%s",
-		(entry->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
-		(entry->state == QETH_SBP_STATE_STANDBY) ? "standby" :
-		(entry->state == QETH_SBP_STATE_ACTIVE) ? "active" :
+		(data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
+		(data->state == QETH_SBP_STATE_STANDBY) ? "standby" :
+		(data->state == QETH_SBP_STATE_ACTIVE) ? "active" :
 		"<INVALID>");
 	kobject_uevent_env(&data->card->gdev->dev.kobj,
 				KOBJ_CHANGE, env);
@@ -1187,10 +1225,8 @@
 static void qeth_bridge_state_change(struct qeth_card *card,
 					struct qeth_ipa_cmd *cmd)
 {
-	struct qeth_sbp_state_change *qports =
-		 &cmd->data.sbp.data.state_change;
+	struct qeth_sbp_port_data *qports = &cmd->data.sbp.data.port_data;
 	struct qeth_bridge_state_data *data;
-	int extrasize;
 
 	QETH_CARD_TEXT(card, 2, "brstchng");
 	if (qports->num_entries == 0) {
@@ -1201,90 +1237,192 @@
 		QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length);
 		return;
 	}
-	extrasize = sizeof(struct qeth_sbp_port_entry) * qports->num_entries;
-	data = kzalloc(sizeof(struct qeth_bridge_state_data) + extrasize,
-		GFP_ATOMIC);
+
+	data = kzalloc(sizeof(*data), GFP_ATOMIC);
 	if (!data) {
 		QETH_CARD_TEXT(card, 2, "BPSalloc");
 		return;
 	}
 	INIT_WORK(&data->worker, qeth_bridge_state_change_worker);
 	data->card = card;
-	memcpy(&data->qports, qports,
-			sizeof(struct qeth_sbp_state_change) + extrasize);
+	/* Information for the local port: */
+	data->role = qports->entry[0].role;
+	data->state = qports->entry[0].state;
+
 	queue_work(card->event_wq, &data->worker);
 }
 
-struct qeth_bridge_host_data {
-	struct work_struct worker;
+struct qeth_addr_change_data {
+	struct delayed_work dwork;
 	struct qeth_card *card;
-	struct qeth_ipacmd_addr_change hostevs;
+	struct qeth_ipacmd_addr_change ac_event;
 };
 
-static void qeth_bridge_host_event_worker(struct work_struct *work)
+static void qeth_l2_dev2br_worker(struct work_struct *work)
 {
-	struct qeth_bridge_host_data *data =
-		container_of(work, struct qeth_bridge_host_data, worker);
-	int i;
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct qeth_addr_change_data *data;
+	struct qeth_card *card;
+	struct qeth_priv *priv;
+	unsigned int i;
+	int rc;
 
-	if (data->hostevs.lost_event_mask) {
-		dev_info(&data->card->gdev->dev,
-"Address notification from the Bridge Port stopped %s (%s)\n",
-			data->card->dev->name,
-			(data->hostevs.lost_event_mask == 0x01)
-			? "Overflow"
-			: (data->hostevs.lost_event_mask == 0x02)
-			? "Bridge port state change"
-			: "Unknown reason");
-		mutex_lock(&data->card->sbp_lock);
-		data->card->options.sbp.hostnotification = 0;
-		mutex_unlock(&data->card->sbp_lock);
-		qeth_bridge_emit_host_event(data->card, anev_abort,
-			0, NULL, NULL);
-	} else
-		for (i = 0; i < data->hostevs.num_entries; i++) {
-			struct qeth_ipacmd_addr_change_entry *entry =
-					&data->hostevs.entry[i];
-			qeth_bridge_emit_host_event(data->card,
-					anev_reg_unreg,
-					entry->change_code,
-					&entry->token, &entry->addr_lnid);
+	data = container_of(dwork, struct qeth_addr_change_data, dwork);
+	card = data->card;
+	priv = netdev_priv(card->dev);
+
+	QETH_CARD_TEXT(card, 4, "dev2brew");
+
+	if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
+		goto free;
+
+	/* Potential re-config in progress, try again later: */
+	if (!rtnl_trylock()) {
+		queue_delayed_work(card->event_wq, dwork,
+				   msecs_to_jiffies(100));
+		return;
+	}
+	if (!netif_device_present(card->dev))
+		goto out_unlock;
+
+	if (data->ac_event.lost_event_mask) {
+		QETH_DBF_MESSAGE(3,
+				 "Address change notification overflow on device %x\n",
+				 CARD_DEVID(card));
+		/* Card fdb and bridge fdb are out of sync, card has stopped
+		 * notifications (no need to drain_workqueue). Purge all
+		 * 'extern_learn' entries from the parent bridge and restart
+		 * the notifications.
+		 */
+		qeth_l2_dev2br_fdb_flush(card);
+		rc = qeth_l2_dev2br_an_set(card, true);
+		if (rc) {
+			/* TODO: if we want to retry after -EAGAIN, be
+			 * aware there could be stale entries in the
+			 * workqueue now, that need to be drained.
+			 * For now we give up:
+			 */
+			netdev_err(card->dev,
+				   "bridge learning_sync failed to recover: %d\n",
+				   rc);
+			WRITE_ONCE(card->info.pnso_mode,
+				   QETH_PNSO_NONE);
+			/* To remove fdb entries reported by an_set: */
+			qeth_l2_dev2br_fdb_flush(card);
+			priv->brport_features ^= BR_LEARNING_SYNC;
+		} else {
+			QETH_DBF_MESSAGE(3,
+					 "Address Notification resynced on device %x\n",
+					 CARD_DEVID(card));
 		}
+	} else {
+		for (i = 0; i < data->ac_event.num_entries; i++) {
+			struct qeth_ipacmd_addr_change_entry *entry =
+					&data->ac_event.entry[i];
+			qeth_l2_dev2br_fdb_notify(card,
+						  entry->change_code,
+						  &entry->token,
+						  &entry->addr_lnid);
+		}
+	}
+
+out_unlock:
+	rtnl_unlock();
+
+free:
 	kfree(data);
 }
 
-static void qeth_bridge_host_event(struct qeth_card *card,
-					struct qeth_ipa_cmd *cmd)
+static void qeth_addr_change_event_worker(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct qeth_addr_change_data *data;
+	struct qeth_card *card;
+	int i;
+
+	data = container_of(dwork, struct qeth_addr_change_data, dwork);
+	card = data->card;
+
+	QETH_CARD_TEXT(data->card, 4, "adrchgew");
+
+	if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
+		goto free;
+
+	if (data->ac_event.lost_event_mask) {
+		/* Potential re-config in progress, try again later: */
+		if (!mutex_trylock(&card->sbp_lock)) {
+			queue_delayed_work(card->event_wq, dwork,
+					   msecs_to_jiffies(100));
+			return;
+		}
+
+		dev_info(&data->card->gdev->dev,
+			 "Address change notification stopped on %s (%s)\n",
+			 netdev_name(card->dev),
+			(data->ac_event.lost_event_mask == 0x01)
+			? "Overflow"
+			: (data->ac_event.lost_event_mask == 0x02)
+			? "Bridge port state change"
+			: "Unknown reason");
+
+		data->card->options.sbp.hostnotification = 0;
+		card->info.pnso_mode = QETH_PNSO_NONE;
+		mutex_unlock(&data->card->sbp_lock);
+		qeth_bridge_emit_host_event(data->card, anev_abort,
+					    0, NULL, NULL);
+	} else
+		for (i = 0; i < data->ac_event.num_entries; i++) {
+			struct qeth_ipacmd_addr_change_entry *entry =
+					&data->ac_event.entry[i];
+			qeth_bridge_emit_host_event(data->card,
+						    anev_reg_unreg,
+						    entry->change_code,
+						    &entry->token,
+						    &entry->addr_lnid);
+		}
+
+free:
+	kfree(data);
+}
+
+static void qeth_addr_change_event(struct qeth_card *card,
+				   struct qeth_ipa_cmd *cmd)
 {
 	struct qeth_ipacmd_addr_change *hostevs =
 		 &cmd->data.addrchange;
-	struct qeth_bridge_host_data *data;
+	struct qeth_addr_change_data *data;
 	int extrasize;
 
-	QETH_CARD_TEXT(card, 2, "brhostev");
+	if (card->info.pnso_mode == QETH_PNSO_NONE)
+		return;
+
+	QETH_CARD_TEXT(card, 4, "adrchgev");
 	if (cmd->hdr.return_code != 0x0000) {
 		if (cmd->hdr.return_code == 0x0010) {
 			if (hostevs->lost_event_mask == 0x00)
 				hostevs->lost_event_mask = 0xff;
 		} else {
-			QETH_CARD_TEXT_(card, 2, "BPHe%04x",
+			QETH_CARD_TEXT_(card, 2, "ACHN%04x",
 				cmd->hdr.return_code);
 			return;
 		}
 	}
 	extrasize = sizeof(struct qeth_ipacmd_addr_change_entry) *
 						hostevs->num_entries;
-	data = kzalloc(sizeof(struct qeth_bridge_host_data) + extrasize,
-		GFP_ATOMIC);
+	data = kzalloc(sizeof(struct qeth_addr_change_data) + extrasize,
+		       GFP_ATOMIC);
 	if (!data) {
-		QETH_CARD_TEXT(card, 2, "BPHalloc");
+		QETH_CARD_TEXT(card, 2, "ACNalloc");
 		return;
 	}
-	INIT_WORK(&data->worker, qeth_bridge_host_event_worker);
+	if (card->info.pnso_mode == QETH_PNSO_BRIDGEPORT)
+		INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker);
+	else
+		INIT_DELAYED_WORK(&data->dwork, qeth_l2_dev2br_worker);
 	data->card = card;
-	memcpy(&data->hostevs, hostevs,
+	memcpy(&data->ac_event, hostevs,
 			sizeof(struct qeth_ipacmd_addr_change) + extrasize);
-	queue_work(card->event_wq, &data->worker);
+	queue_delayed_work(card->event_wq, &data->dwork, 0);
 }
 
 /* SETBRIDGEPORT support; sending commands */
@@ -1469,8 +1607,8 @@
 	struct qeth_reply *reply, unsigned long data)
 {
 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
-	struct qeth_sbp_query_ports *qports = &cmd->data.sbp.data.query_ports;
 	struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+	struct qeth_sbp_port_data *qports;
 	int rc;
 
 	QETH_CARD_TEXT(card, 2, "brqprtcb");
@@ -1478,6 +1616,7 @@
 	if (rc)
 		return rc;
 
+	qports = &cmd->data.sbp.data.port_data;
 	if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
 		QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length);
 		return -EINVAL;
@@ -1572,63 +1711,18 @@
 	return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, NULL);
 }
 
-/**
- * qeth_anset_makerc() - derive "traditional" error from hardware codes.
- * @card:		      qeth_card structure pointer, for debug messages.
- *
- * Returns negative errno-compatible error indication or 0 on success.
- */
-static int qeth_anset_makerc(struct qeth_card *card, int pnso_rc, u16 response)
-{
-	int rc;
-
-	if (pnso_rc == 0)
-		switch (response) {
-		case 0x0001:
-			rc = 0;
-			break;
-		case 0x0004:
-		case 0x0100:
-		case 0x0106:
-			rc = -EOPNOTSUPP;
-			dev_err(&card->gdev->dev,
-				"Setting address notification failed\n");
-			break;
-		case 0x0107:
-			rc = -EAGAIN;
-			break;
-		default:
-			rc = -EIO;
-		}
-	else
-		rc = -EIO;
-
-	if (rc) {
-		QETH_CARD_TEXT_(card, 2, "SBPp%04x", pnso_rc);
-		QETH_CARD_TEXT_(card, 2, "SBPr%04x", response);
-	}
-	return rc;
-}
-
 static void qeth_bridgeport_an_set_cb(void *priv,
-		enum qdio_brinfo_entry_type type, void *entry)
+				      struct chsc_pnso_naid_l2 *entry)
 {
 	struct qeth_card *card = (struct qeth_card *)priv;
-	struct qdio_brinfo_entry_l2 *l2entry;
 	u8 code;
 
-	if (type != l2_addr_lnid) {
-		WARN_ON_ONCE(1);
-		return;
-	}
-
-	l2entry = (struct qdio_brinfo_entry_l2 *)entry;
 	code = IPA_ADDR_CHANGE_CODE_MACADDR;
-	if (l2entry->addr_lnid.lnid < VLAN_N_VID)
+	if (entry->addr_lnid.lnid < VLAN_N_VID)
 		code |= IPA_ADDR_CHANGE_CODE_VLANID;
 	qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
-		(struct net_if_token *)&l2entry->nit,
-		(struct mac_addr_lnid *)&l2entry->addr_lnid);
+				    (struct net_if_token *)&entry->nit,
+				    (struct mac_addr_lnid *)&entry->addr_lnid);
 }
 
 /**
@@ -1644,30 +1738,22 @@
 int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
 {
 	int rc;
-	u16 response;
-	struct ccw_device *ddev;
-	struct subchannel_id schid;
 
-	if (!card)
-		return -EINVAL;
 	if (!card->options.sbp.supported_funcs)
 		return -EOPNOTSUPP;
-	ddev = CARD_DDEV(card);
-	ccw_device_get_schid(ddev, &schid);
 
 	if (enable) {
 		qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
-		rc = qdio_pnso_brinfo(schid, 1, &response,
-			qeth_bridgeport_an_set_cb, card);
-	} else
-		rc = qdio_pnso_brinfo(schid, 0, &response, NULL, NULL);
-	return qeth_anset_makerc(card, rc, response);
-}
-
-static bool qeth_bridgeport_is_in_use(struct qeth_card *card)
-{
-	return (card->options.sbp.role || card->options.sbp.reflect_promisc ||
-		card->options.sbp.hostnotification);
+		qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT);
+		rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 1,
+				  qeth_bridgeport_an_set_cb, card);
+		if (rc)
+			qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+	} else {
+		rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 0, NULL, NULL);
+		qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+	}
+	return rc;
 }
 
 /* VNIC Characteristics support */
@@ -1704,23 +1790,11 @@
 	return rc;
 }
 
-/* generic VNICC request call back control */
-struct _qeth_l2_vnicc_request_cbctl {
-	struct {
-		union{
-			u32 *sup_cmds;
-			u32 *timeout;
-		};
-	} result;
-};
-
 /* generic VNICC request call back */
 static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
 				    struct qeth_reply *reply,
 				    unsigned long data)
 {
-	struct _qeth_l2_vnicc_request_cbctl *cbctl =
-		(struct _qeth_l2_vnicc_request_cbctl *) reply->param;
 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
 	struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc;
 	u32 sub_cmd = cmd->data.vnicc.hdr.sub_command;
@@ -1733,9 +1807,9 @@
 	card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled;
 
 	if (sub_cmd == IPA_VNICC_QUERY_CMDS)
-		*cbctl->result.sup_cmds = rep->data.query_cmds.sup_cmds;
+		*(u32 *)reply->param = rep->data.query_cmds.sup_cmds;
 	else if (sub_cmd == IPA_VNICC_GET_TIMEOUT)
-		*cbctl->result.timeout = rep->data.getset_timeout.timeout;
+		*(u32 *)reply->param = rep->data.getset_timeout.timeout;
 
 	return 0;
 }
@@ -1776,7 +1850,6 @@
 static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
 				    u32 *sup_cmds)
 {
-	struct _qeth_l2_vnicc_request_cbctl cbctl;
 	struct qeth_cmd_buffer *iob;
 
 	QETH_CARD_TEXT(card, 2, "vniccqcm");
@@ -1787,10 +1860,7 @@
 
 	__ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char;
 
-	/* prepare callback control */
-	cbctl.result.sup_cmds = sup_cmds;
-
-	return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
+	return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, sup_cmds);
 }
 
 /* VNICC enable/disable characteristic request */
@@ -1814,7 +1884,6 @@
 					u32 cmd, u32 *timeout)
 {
 	struct qeth_vnicc_getset_timeout *getset_timeout;
-	struct _qeth_l2_vnicc_request_cbctl cbctl;
 	struct qeth_cmd_buffer *iob;
 
 	QETH_CARD_TEXT(card, 2, "vniccgst");
@@ -1829,11 +1898,20 @@
 	if (cmd == IPA_VNICC_SET_TIMEOUT)
 		getset_timeout->timeout = *timeout;
 
-	/* prepare callback control */
-	if (cmd == IPA_VNICC_GET_TIMEOUT)
-		cbctl.result.timeout = timeout;
+	return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, timeout);
+}
 
-	return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
+/* recover user timeout setting */
+static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
+					  u32 *timeout)
+{
+	if (card->options.vnicc.sup_chars & vnicc &&
+	    card->options.vnicc.getset_timeout_sup & vnicc &&
+	    !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT,
+					  timeout))
+		return false;
+	*timeout = QETH_VNICC_DEFAULT_TIMEOUT;
+	return true;
 }
 
 /* set current VNICC flag state; called from sysfs store function */
@@ -1976,7 +2054,7 @@
 }
 
 /* check if VNICC is currently enabled */
-bool qeth_l2_vnicc_is_in_use(struct qeth_card *card)
+static bool _qeth_l2_vnicc_is_in_use(struct qeth_card *card)
 {
 	if (!card->options.vnicc.sup_chars)
 		return false;
@@ -1991,17 +2069,19 @@
 	return true;
 }
 
-/* recover user timeout setting */
-static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
-					  u32 *timeout)
+/**
+ *	qeth_bridgeport_allowed - are any qeth_bridgeport functions allowed?
+ *	@card: qeth_card structure pointer
+ *
+ *	qeth_bridgeport functionality is mutually exclusive with usage of the
+ *	VNIC Characteristics and dev2br address notifications
+ */
+bool qeth_bridgeport_allowed(struct qeth_card *card)
 {
-	if (card->options.vnicc.sup_chars & vnicc &&
-	    card->options.vnicc.getset_timeout_sup & vnicc &&
-	    !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT,
-					  timeout))
-		return false;
-	*timeout = QETH_VNICC_DEFAULT_TIMEOUT;
-	return true;
+	struct qeth_priv *priv = netdev_priv(card->dev);
+
+	return (!_qeth_l2_vnicc_is_in_use(card) &&
+		!(priv->brport_features & BR_LEARNING_SYNC));
 }
 
 /* recover user characteristic setting */
@@ -2092,6 +2172,181 @@
 	card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
 }
 
+static const struct device_type qeth_l2_devtype = {
+	.name = "qeth_layer2",
+	.groups = qeth_l2_attr_groups,
+};
+
+static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc;
+
+	if (IS_OSN(card))
+		dev_notice(&gdev->dev, "OSN support will be dropped in 2021\n");
+
+	qeth_l2_vnicc_set_defaults(card);
+	mutex_init(&card->sbp_lock);
+
+	if (gdev->dev.type == &qeth_generic_devtype) {
+		rc = qeth_l2_create_device_attributes(&gdev->dev);
+		if (rc)
+			return rc;
+	}
+
+	INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
+	return 0;
+}
+
+static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
+{
+	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
+	if (gdev->dev.type == &qeth_generic_devtype)
+		qeth_l2_remove_device_attributes(&gdev->dev);
+	qeth_set_allowed_threads(card, 0, 1);
+	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+
+	if (gdev->state == CCWGROUP_ONLINE)
+		qeth_set_offline(card, card->discipline, false);
+
+	cancel_work_sync(&card->close_dev_work);
+	if (card->dev->reg_state == NETREG_REGISTERED)
+		unregister_netdev(card->dev);
+}
+
+static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok)
+{
+	struct net_device *dev = card->dev;
+	int rc = 0;
+
+	qeth_l2_detect_dev2br_support(card);
+
+	mutex_lock(&card->sbp_lock);
+	qeth_bridgeport_query_support(card);
+	if (card->options.sbp.supported_funcs) {
+		qeth_l2_setup_bridgeport_attrs(card);
+		dev_info(&card->gdev->dev,
+			 "The device represents a Bridge Capable Port\n");
+	}
+	mutex_unlock(&card->sbp_lock);
+
+	qeth_l2_register_dev_addr(card);
+
+	/* for the rx_bcast characteristic, init VNICC after setmac */
+	qeth_l2_vnicc_init(card);
+
+	qeth_l2_trace_features(card);
+
+	/* softsetup */
+	QETH_CARD_TEXT(card, 2, "softsetp");
+
+	card->state = CARD_STATE_SOFTSETUP;
+
+	qeth_set_allowed_threads(card, 0xffffffff, 0);
+
+	if (dev->reg_state != NETREG_REGISTERED) {
+		rc = qeth_l2_setup_netdev(card);
+		if (rc)
+			goto err_setup;
+
+		if (carrier_ok)
+			netif_carrier_on(dev);
+	} else {
+		rtnl_lock();
+		rc = qeth_set_real_num_tx_queues(card,
+						 qeth_tx_actual_queues(card));
+		if (rc) {
+			rtnl_unlock();
+			goto err_set_queues;
+		}
+
+		if (carrier_ok)
+			netif_carrier_on(dev);
+		else
+			netif_carrier_off(dev);
+
+		netif_device_attach(dev);
+		qeth_enable_hw_features(dev);
+		qeth_l2_enable_brport_features(card);
+
+		if (card->info.open_when_online) {
+			card->info.open_when_online = 0;
+			dev_open(dev, NULL);
+		}
+		rtnl_unlock();
+	}
+	return 0;
+
+err_set_queues:
+err_setup:
+	qeth_set_allowed_threads(card, 0, 1);
+	card->state = CARD_STATE_DOWN;
+	return rc;
+}
+
+static void qeth_l2_set_offline(struct qeth_card *card)
+{
+	struct qeth_priv *priv = netdev_priv(card->dev);
+
+	qeth_set_allowed_threads(card, 0, 1);
+	qeth_l2_drain_rx_mode_cache(card);
+
+	if (card->state == CARD_STATE_SOFTSETUP)
+		card->state = CARD_STATE_DOWN;
+
+	qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+	if (priv->brport_features & BR_LEARNING_SYNC) {
+		rtnl_lock();
+		qeth_l2_dev2br_fdb_flush(card);
+		rtnl_unlock();
+	}
+}
+
+/* Returns zero if the command is successfully "consumed" */
+static int qeth_l2_control_event(struct qeth_card *card,
+				 struct qeth_ipa_cmd *cmd)
+{
+	switch (cmd->hdr.command) {
+	case IPA_CMD_SETBRIDGEPORT_OSA:
+	case IPA_CMD_SETBRIDGEPORT_IQD:
+		if (cmd->data.sbp.hdr.command_code ==
+		    IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
+			qeth_bridge_state_change(card, cmd);
+			return 0;
+		}
+
+		return 1;
+	case IPA_CMD_ADDRESS_CHANGE_NOTIF:
+		qeth_addr_change_event(card, cmd);
+		return 0;
+	default:
+		return 1;
+	}
+}
+
+const struct qeth_discipline qeth_l2_discipline = {
+	.devtype = &qeth_l2_devtype,
+	.setup = qeth_l2_probe_device,
+	.remove = qeth_l2_remove_device,
+	.set_online = qeth_l2_set_online,
+	.set_offline = qeth_l2_set_offline,
+	.do_ioctl = NULL,
+	.control_event_handler = qeth_l2_control_event,
+};
+EXPORT_SYMBOL_GPL(qeth_l2_discipline);
+
+static int __init qeth_l2_init(void)
+{
+	pr_info("register layer 2 discipline\n");
+	return 0;
+}
+
+static void __exit qeth_l2_exit(void)
+{
+	pr_info("unregister layer 2 discipline\n");
+}
+
 module_init(qeth_l2_init);
 module_exit(qeth_l2_exit);
 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index fc7101a..4ba3bc5 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -18,10 +18,7 @@
 	int rc = 0;
 	char *word;
 
-	if (!card)
-		return -EINVAL;
-
-	if (qeth_l2_vnicc_is_in_use(card))
+	if (!qeth_bridgeport_allowed(card))
 		return sprintf(buf, "n/a (VNIC characteristics)\n");
 
 	mutex_lock(&card->sbp_lock);
@@ -68,7 +65,7 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (qeth_l2_vnicc_is_in_use(card))
+	if (!qeth_bridgeport_allowed(card))
 		return sprintf(buf, "n/a (VNIC characteristics)\n");
 
 	return qeth_bridge_port_role_state_show(dev, attr, buf, 0);
@@ -81,8 +78,6 @@
 	int rc = 0;
 	enum qeth_sbp_roles role;
 
-	if (!card)
-		return -EINVAL;
 	if (sysfs_streq(buf, "primary"))
 		role = QETH_SBP_ROLE_PRIMARY;
 	else if (sysfs_streq(buf, "secondary"))
@@ -95,7 +90,7 @@
 	mutex_lock(&card->conf_mutex);
 	mutex_lock(&card->sbp_lock);
 
-	if (qeth_l2_vnicc_is_in_use(card))
+	if (!qeth_bridgeport_allowed(card))
 		rc = -EBUSY;
 	else if (card->options.sbp.reflect_promisc)
 		/* Forbid direct manipulation */
@@ -121,7 +116,7 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (qeth_l2_vnicc_is_in_use(card))
+	if (!qeth_bridgeport_allowed(card))
 		return sprintf(buf, "n/a (VNIC characteristics)\n");
 
 	return qeth_bridge_port_role_state_show(dev, attr, buf, 1);
@@ -136,10 +131,7 @@
 	struct qeth_card *card = dev_get_drvdata(dev);
 	int enabled;
 
-	if (!card)
-		return -EINVAL;
-
-	if (qeth_l2_vnicc_is_in_use(card))
+	if (!qeth_bridgeport_allowed(card))
 		return sprintf(buf, "n/a (VNIC characteristics)\n");
 
 	enabled = card->options.sbp.hostnotification;
@@ -154,9 +146,6 @@
 	bool enable;
 	int rc;
 
-	if (!card)
-		return -EINVAL;
-
 	rc = kstrtobool(buf, &enable);
 	if (rc)
 		return rc;
@@ -164,10 +153,11 @@
 	mutex_lock(&card->conf_mutex);
 	mutex_lock(&card->sbp_lock);
 
-	if (qeth_l2_vnicc_is_in_use(card))
+	if (!qeth_bridgeport_allowed(card))
 		rc = -EBUSY;
 	else if (qeth_card_hw_is_reachable(card)) {
 		rc = qeth_bridgeport_an_set(card, enable);
+		/* sbp_lock ensures ordering vs notifications-stopped events */
 		if (!rc)
 			card->options.sbp.hostnotification = enable;
 	} else
@@ -189,10 +179,7 @@
 	struct qeth_card *card = dev_get_drvdata(dev);
 	char *state;
 
-	if (!card)
-		return -EINVAL;
-
-	if (qeth_l2_vnicc_is_in_use(card))
+	if (!qeth_bridgeport_allowed(card))
 		return sprintf(buf, "n/a (VNIC characteristics)\n");
 
 	if (card->options.sbp.reflect_promisc) {
@@ -213,9 +200,6 @@
 	int enable, primary;
 	int rc = 0;
 
-	if (!card)
-		return -EINVAL;
-
 	if (sysfs_streq(buf, "none")) {
 		enable = 0;
 		primary = 0;
@@ -231,7 +215,7 @@
 	mutex_lock(&card->conf_mutex);
 	mutex_lock(&card->sbp_lock);
 
-	if (qeth_l2_vnicc_is_in_use(card))
+	if (!qeth_bridgeport_allowed(card))
 		rc = -EBUSY;
 	else if (card->options.sbp.role != QETH_SBP_ROLE_NONE)
 		rc = -EPERM;
@@ -263,40 +247,6 @@
 	.attrs = qeth_l2_bridgeport_attrs,
 };
 
-/**
- * qeth_l2_setup_bridgeport_attrs() - set/restore attrs when turning online.
- * @card:			      qeth_card structure pointer
- *
- * Note: this function is called with conf_mutex held by the caller
- */
-void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
-{
-	int rc;
-
-	if (!card)
-		return;
-	if (!card->options.sbp.supported_funcs)
-		return;
-
-	mutex_lock(&card->sbp_lock);
-	if (!card->options.sbp.reflect_promisc &&
-	    card->options.sbp.role != QETH_SBP_ROLE_NONE) {
-		/* Conditional to avoid spurious error messages */
-		qeth_bridgeport_setrole(card, card->options.sbp.role);
-		/* Let the callback function refresh the stored role value. */
-		qeth_bridgeport_query_ports(card,
-			&card->options.sbp.role, NULL);
-	}
-	if (card->options.sbp.hostnotification) {
-		rc = qeth_bridgeport_an_set(card, 1);
-		if (rc)
-			card->options.sbp.hostnotification = 0;
-	} else {
-		qeth_bridgeport_an_set(card, 0);
-	}
-	mutex_unlock(&card->sbp_lock);
-}
-
 /* VNIC CHARS support */
 
 /* convert sysfs attr name to VNIC characteristic */
@@ -328,9 +278,6 @@
 	u32 timeout;
 	int rc;
 
-	if (!card)
-		return -EINVAL;
-
 	rc = qeth_l2_vnicc_get_timeout(card, &timeout);
 	if (rc == -EBUSY)
 		return sprintf(buf, "n/a (BridgePort)\n");
@@ -348,9 +295,6 @@
 	u32 timeout;
 	int rc;
 
-	if (!card)
-		return -EINVAL;
-
 	rc = kstrtou32(buf, 10, &timeout);
 	if (rc)
 		return rc;
@@ -370,9 +314,6 @@
 	u32 vnicc;
 	int rc;
 
-	if (!card)
-		return -EINVAL;
-
 	vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name);
 	rc = qeth_l2_vnicc_get_state(card, vnicc, &state);
 
@@ -393,9 +334,6 @@
 	u32 vnicc;
 	int rc;
 
-	if (!card)
-		return -EINVAL;
-
 	if (kstrtobool(buf, &state))
 		return -EINVAL;
 
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 87659cf..acd130c 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -13,8 +13,6 @@
 #include "qeth_core.h"
 #include <linux/hashtable.h>
 
-#define QETH_SNIFF_AVAIL	0x0008
-
 enum qeth_ip_types {
 	QETH_IP_TYPE_NORMAL,
 	QETH_IP_TYPE_VIPA,
@@ -24,9 +22,7 @@
 struct qeth_ipaddr {
 	struct hlist_node hnode;
 	enum qeth_ip_types type;
-	unsigned char mac[ETH_ALEN];
 	u8 is_multicast:1;
-	u8 in_progress:1;
 	u8 disp_flag:2;
 	u8 ipato:1;			/* ucast only */
 
@@ -37,8 +33,8 @@
 	enum qeth_prot_versions proto;
 	union {
 		struct {
-			unsigned int addr;
-			unsigned int mask;
+			__be32 addr;
+			__be32 mask;
 		} a4;
 		struct {
 			struct in6_addr addr;
@@ -55,6 +51,7 @@
 	addr->type = type;
 	addr->proto = proto;
 	addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+	addr->ref_counter = 1;
 }
 
 static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
@@ -74,12 +71,10 @@
 	 * so 'proto' and 'addr' match for sure.
 	 *
 	 * For ucast:
-	 * -	'mac' is always 0.
 	 * -	'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
 	 *	values are required to avoid mixups in takeover eligibility.
 	 *
 	 * For mcast,
-	 * -	'mac' is mapped from the IP, and thus always matches.
 	 * -	'mask'/'pfxlen' is always 0.
 	 */
 	if (a1->type != a2->type)
@@ -89,33 +84,25 @@
 	return a1->u.a4.mask == a2->u.a4.mask;
 }
 
-static inline  u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
+static inline u32 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
 {
-	u64  ret = 0;
-	u8 *point;
-
-	if (addr->proto == QETH_PROT_IPV6) {
-		point = (u8 *) &addr->u.a6.addr;
-		ret = get_unaligned((u64 *)point) ^
-			get_unaligned((u64 *) (point + 8));
-	}
-	if (addr->proto == QETH_PROT_IPV4) {
-		point = (u8 *) &addr->u.a4.addr;
-		ret = get_unaligned((u32 *) point);
-	}
-	return ret;
+	if (addr->proto == QETH_PROT_IPV6)
+		return ipv6_addr_hash(&addr->u.a6.addr);
+	else
+		return ipv4_addr_hash(addr->u.a4.addr);
 }
 
 struct qeth_ipato_entry {
 	struct list_head entry;
 	enum qeth_prot_versions proto;
 	char addr[16];
-	int mask_bits;
+	unsigned int mask_bits;
 };
 
 extern const struct attribute_group *qeth_l3_attr_groups[];
 
-void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
+int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr,
+			     char *buf);
 int qeth_l3_create_device_attributes(struct device *);
 void qeth_l3_remove_device_attributes(struct device *);
 int qeth_l3_setrouting_v4(struct qeth_card *);
@@ -123,7 +110,7 @@
 int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
 int qeth_l3_del_ipato_entry(struct qeth_card *card,
 			    enum qeth_prot_versions proto, u8 *addr,
-			    int mask_bits);
+			    unsigned int mask_bits);
 void qeth_l3_update_ipato(struct qeth_card *card);
 int qeth_l3_modify_hsuid(struct qeth_card *card, bool add);
 int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8dee16a..291861c 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -37,50 +37,28 @@
 
 #include "qeth_l3.h"
 
-
-static int qeth_l3_set_offline(struct ccwgroup_device *);
-static void qeth_l3_set_rx_mode(struct net_device *dev);
 static int qeth_l3_register_addr_entry(struct qeth_card *,
 		struct qeth_ipaddr *);
 static int qeth_l3_deregister_addr_entry(struct qeth_card *,
 		struct qeth_ipaddr *);
 
-static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
-{
-	sprintf(buf, "%pI4", addr);
-}
-
-static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
-{
-	sprintf(buf, "%pI6", addr);
-}
-
-void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
-				char *buf)
+int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr,
+			     char *buf)
 {
 	if (proto == QETH_PROT_IPV4)
-		qeth_l3_ipaddr4_to_string(addr, buf);
-	else if (proto == QETH_PROT_IPV6)
-		qeth_l3_ipaddr6_to_string(addr, buf);
-}
-
-static struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions prot)
-{
-	struct qeth_ipaddr *addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
-
-	if (addr)
-		qeth_l3_init_ipaddr(addr, QETH_IP_TYPE_NORMAL, prot);
-	return addr;
+		return sprintf(buf, "%pI4", addr);
+	else
+		return sprintf(buf, "%pI6", addr);
 }
 
 static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
 						   struct qeth_ipaddr *query)
 {
-	u64 key = qeth_l3_ipaddr_hash(query);
+	u32 key = qeth_l3_ipaddr_hash(query);
 	struct qeth_ipaddr *addr;
 
 	if (query->is_multicast) {
-		hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
+		hash_for_each_possible(card->rx_mode_addrs, addr, hnode, key)
 			if (qeth_l3_addr_match_ip(addr, query))
 				return addr;
 	} else {
@@ -119,7 +97,7 @@
 		return false;
 
 	qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
-				  (addr->proto == QETH_PROT_IPV4)? 4:16);
+				     (addr->proto == QETH_PROT_IPV4) ? 4 : 16);
 	list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
 		if (addr->proto != ipatoe->proto)
 			continue;
@@ -127,11 +105,9 @@
 					  (ipatoe->proto == QETH_PROT_IPV4) ?
 					  4 : 16);
 		if (addr->proto == QETH_PROT_IPV4)
-			rc = !memcmp(addr_bits, ipatoe_bits,
-				     min(32, ipatoe->mask_bits));
+			rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
 		else
-			rc = !memcmp(addr_bits, ipatoe_bits,
-				     min(128, ipatoe->mask_bits));
+			rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
 		if (rc)
 			break;
 	}
@@ -171,8 +147,6 @@
 	addr->ref_counter--;
 	if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
 		return rc;
-	if (addr->in_progress)
-		return -EINPROGRESS;
 
 	if (qeth_card_hw_is_reachable(card))
 		rc = qeth_l3_deregister_addr_entry(card, addr);
@@ -217,13 +191,10 @@
 			 "Registering IP address %s failed\n", buf);
 		return -EADDRINUSE;
 	} else {
-		addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
+		addr = kmemdup(tmp_addr, sizeof(*tmp_addr), GFP_KERNEL);
 		if (!addr)
 			return -ENOMEM;
 
-		memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
-		addr->ref_counter = 1;
-
 		if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
 			QETH_CARD_TEXT(card, 2, "tkovaddr");
 			addr->ipato = 1;
@@ -236,29 +207,10 @@
 			return 0;
 		}
 
-		/* qeth_l3_register_addr_entry can go to sleep
-		 * if we add a IPV4 addr. It is caused by the reason
-		 * that SETIP ipa cmd starts ARP staff for IPV4 addr.
-		 * Thus we should unlock spinlock, and make a protection
-		 * using in_progress variable to indicate that there is
-		 * an hardware operation with this IPV4 address
-		 */
-		if (addr->proto == QETH_PROT_IPV4) {
-			addr->in_progress = 1;
-			mutex_unlock(&card->ip_lock);
-			rc = qeth_l3_register_addr_entry(card, addr);
-			mutex_lock(&card->ip_lock);
-			addr->in_progress = 0;
-		} else
-			rc = qeth_l3_register_addr_entry(card, addr);
+		rc = qeth_l3_register_addr_entry(card, addr);
 
 		if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
 			addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
-			if (addr->ref_counter < 1) {
-				qeth_l3_deregister_addr_entry(card, addr);
-				hash_del(&addr->hnode);
-				kfree(addr);
-			}
 		} else {
 			hash_del(&addr->hnode);
 			kfree(addr);
@@ -285,7 +237,7 @@
 	struct hlist_node *tmp;
 	int i;
 
-	hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+	hash_for_each_safe(card->rx_mode_addrs, i, tmp, addr, hnode) {
 		hash_del(&addr->hnode);
 		kfree(addr);
 	}
@@ -326,19 +278,10 @@
 
 	hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
 		if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
-			if (addr->proto == QETH_PROT_IPV4) {
-				addr->in_progress = 1;
-				mutex_unlock(&card->ip_lock);
-				rc = qeth_l3_register_addr_entry(card, addr);
-				mutex_lock(&card->ip_lock);
-				addr->in_progress = 0;
-			} else
-				rc = qeth_l3_register_addr_entry(card, addr);
+			rc = qeth_l3_register_addr_entry(card, addr);
 
 			if (!rc) {
 				addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
-				if (addr->ref_counter < 1)
-					qeth_l3_delete_ip(card, addr);
 			} else {
 				hash_del(&addr->hnode);
 				kfree(addr);
@@ -369,7 +312,8 @@
 }
 
 static int qeth_l3_send_setdelmc(struct qeth_card *card,
-			struct qeth_ipaddr *addr, int ipacmd)
+				 struct qeth_ipaddr *addr,
+				 enum qeth_ipa_cmds ipacmd)
 {
 	struct qeth_cmd_buffer *iob;
 	struct qeth_ipa_cmd *cmd;
@@ -381,27 +325,27 @@
 	if (!iob)
 		return -ENOMEM;
 	cmd = __ipa_cmd(iob);
-	ether_addr_copy(cmd->data.setdelipm.mac, addr->mac);
-	if (addr->proto == QETH_PROT_IPV6)
-		memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
-		       sizeof(struct in6_addr));
-	else
-		memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4);
+	if (addr->proto == QETH_PROT_IPV6) {
+		cmd->data.setdelipm.ip = addr->u.a6.addr;
+		ipv6_eth_mc_map(&addr->u.a6.addr, cmd->data.setdelipm.mac);
+	} else {
+		cmd->data.setdelipm.ip.s6_addr32[3] = addr->u.a4.addr;
+		ip_eth_mc_map(addr->u.a4.addr, cmd->data.setdelipm.mac);
+	}
 
 	return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
 }
 
-static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len)
+static void qeth_l3_set_ipv6_prefix(struct in6_addr *prefix, unsigned int len)
 {
-	int i, j;
-	for (i = 0; i < 16; i++) {
-		j = (len) - (i * 8);
-		if (j >= 8)
-			netmask[i] = 0xff;
-		else if (j > 0)
-			netmask[i] = (u8)(0xFF00 >> j);
-		else
-			netmask[i] = 0;
+	unsigned int i = 0;
+
+	while (len && i < 4) {
+		int mask_len = min_t(int, len, 32);
+
+		prefix->s6_addr32[i] = inet_make_mask(mask_len);
+		len -= mask_len;
+		i++;
 	}
 }
 
@@ -424,7 +368,6 @@
 {
 	struct qeth_cmd_buffer *iob;
 	struct qeth_ipa_cmd *cmd;
-	__u8 netmask[16];
 	u32 flags;
 
 	QETH_CARD_TEXT(card, 4, "setdelip");
@@ -439,15 +382,13 @@
 	QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
 
 	if (addr->proto == QETH_PROT_IPV6) {
-		memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
-		       sizeof(struct in6_addr));
-		qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen);
-		memcpy(cmd->data.setdelip6.mask, netmask,
-		       sizeof(struct in6_addr));
+		cmd->data.setdelip6.addr = addr->u.a6.addr;
+		qeth_l3_set_ipv6_prefix(&cmd->data.setdelip6.prefix,
+					addr->u.a6.pfxlen);
 		cmd->data.setdelip6.flags = flags;
 	} else {
-		memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
-		memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
+		cmd->data.setdelip4.addr = addr->u.a4.addr;
+		cmd->data.setdelip4.mask = addr->u.a4.mask;
 		cmd->data.setdelip4.flags = flags;
 	}
 
@@ -599,7 +540,7 @@
 		if (ipatoe->proto != new->proto)
 			continue;
 		if (!memcmp(ipatoe->addr, new->addr,
-			    (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
+			    (ipatoe->proto == QETH_PROT_IPV4) ? 4 : 16) &&
 		    (ipatoe->mask_bits == new->mask_bits)) {
 			rc = -EEXIST;
 			break;
@@ -618,7 +559,7 @@
 
 int qeth_l3_del_ipato_entry(struct qeth_card *card,
 			    enum qeth_prot_versions proto, u8 *addr,
-			    int mask_bits)
+			    unsigned int mask_bits)
 {
 	struct qeth_ipato_entry *ipatoe, *tmp;
 	int rc = -ENOENT;
@@ -631,7 +572,7 @@
 		if (ipatoe->proto != proto)
 			continue;
 		if (!memcmp(ipatoe->addr, addr,
-			    (proto == QETH_PROT_IPV4)? 4:16) &&
+			    (proto == QETH_PROT_IPV4) ? 4 : 16) &&
 		    (ipatoe->mask_bits == mask_bits)) {
 			list_del(&ipatoe->entry);
 			qeth_l3_update_ipato(card);
@@ -641,6 +582,7 @@
 	}
 
 	mutex_unlock(&card->ip_lock);
+
 	return rc;
 }
 
@@ -764,16 +706,16 @@
 
 	if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
 		dev_info(&card->gdev->dev,
-			"ARP processing not supported on %s!\n",
-			QETH_CARD_IFNAME(card));
+			 "ARP processing not supported on %s!\n",
+			 netdev_name(card->dev));
 		return 0;
 	}
 	rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
 					  IPA_CMD_ASS_START, NULL);
 	if (rc) {
 		dev_warn(&card->gdev->dev,
-			"Starting ARP processing support for %s failed\n",
-			QETH_CARD_IFNAME(card));
+			 "Starting ARP processing support for %s failed\n",
+			 netdev_name(card->dev));
 	}
 	return rc;
 }
@@ -786,8 +728,8 @@
 
 	if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
 		dev_info(&card->gdev->dev,
-			"Inbound source MAC-address not supported on %s\n",
-			QETH_CARD_IFNAME(card));
+			 "Inbound source MAC-address not supported on %s\n",
+			 netdev_name(card->dev));
 		return -EOPNOTSUPP;
 	}
 
@@ -795,8 +737,8 @@
 					  IPA_CMD_ASS_START, NULL);
 	if (rc)
 		dev_warn(&card->gdev->dev,
-			"Starting source MAC-address support for %s failed\n",
-			QETH_CARD_IFNAME(card));
+			 "Starting source MAC-address support for %s failed\n",
+			 netdev_name(card->dev));
 	return rc;
 }
 
@@ -808,7 +750,7 @@
 
 	if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
 		dev_info(&card->gdev->dev,
-			"VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
+			 "VLAN not supported on %s\n", netdev_name(card->dev));
 		return -EOPNOTSUPP;
 	}
 
@@ -816,8 +758,8 @@
 					  IPA_CMD_ASS_START, NULL);
 	if (rc) {
 		dev_warn(&card->gdev->dev,
-			"Starting VLAN support for %s failed\n",
-			QETH_CARD_IFNAME(card));
+			 "Starting VLAN support for %s failed\n",
+			 netdev_name(card->dev));
 	} else {
 		dev_info(&card->gdev->dev, "VLAN enabled\n");
 	}
@@ -832,8 +774,8 @@
 
 	if (!qeth_is_supported(card, IPA_MULTICASTING)) {
 		dev_info(&card->gdev->dev,
-			"Multicast not supported on %s\n",
-			QETH_CARD_IFNAME(card));
+			 "Multicast not supported on %s\n",
+			 netdev_name(card->dev));
 		return -EOPNOTSUPP;
 	}
 
@@ -841,8 +783,8 @@
 					  IPA_CMD_ASS_START, NULL);
 	if (rc) {
 		dev_warn(&card->gdev->dev,
-			"Starting multicast support for %s failed\n",
-			QETH_CARD_IFNAME(card));
+			 "Starting multicast support for %s failed\n",
+			 netdev_name(card->dev));
 	} else {
 		dev_info(&card->gdev->dev, "Multicast enabled\n");
 		card->dev->flags |= IFF_MULTICAST;
@@ -865,7 +807,7 @@
 	if (rc) {
 		dev_err(&card->gdev->dev,
 			"Activating IPv6 support for %s failed\n",
-			QETH_CARD_IFNAME(card));
+			netdev_name(card->dev));
 		return rc;
 	}
 	rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6, IPA_CMD_ASS_START,
@@ -873,15 +815,15 @@
 	if (rc) {
 		dev_err(&card->gdev->dev,
 			"Activating IPv6 support for %s failed\n",
-			 QETH_CARD_IFNAME(card));
+			 netdev_name(card->dev));
 		return rc;
 	}
 	rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU,
 					     IPA_CMD_ASS_START, NULL);
 	if (rc) {
 		dev_warn(&card->gdev->dev,
-			"Enabling the passthrough mode for %s failed\n",
-			QETH_CARD_IFNAME(card));
+			 "Enabling the passthrough mode for %s failed\n",
+			 netdev_name(card->dev));
 		return rc;
 	}
 out:
@@ -895,7 +837,7 @@
 
 	if (!qeth_is_supported(card, IPA_IPV6)) {
 		dev_info(&card->gdev->dev,
-			"IPv6 not supported on %s\n", QETH_CARD_IFNAME(card));
+			 "IPv6 not supported on %s\n", netdev_name(card->dev));
 		return 0;
 	}
 	return qeth_l3_softsetup_ipv6(card);
@@ -910,16 +852,17 @@
 	card->info.broadcast_capable = 0;
 	if (!qeth_is_supported(card, IPA_FILTERING)) {
 		dev_info(&card->gdev->dev,
-			"Broadcast not supported on %s\n",
-			QETH_CARD_IFNAME(card));
+			 "Broadcast not supported on %s\n",
+			 netdev_name(card->dev));
 		rc = -EOPNOTSUPP;
 		goto out;
 	}
 	rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
 					  IPA_CMD_ASS_START, NULL);
 	if (rc) {
-		dev_warn(&card->gdev->dev, "Enabling broadcast filtering for "
-			"%s failed\n", QETH_CARD_IFNAME(card));
+		dev_warn(&card->gdev->dev,
+			 "Enabling broadcast filtering for %s failed\n",
+			 netdev_name(card->dev));
 		goto out;
 	}
 
@@ -927,8 +870,8 @@
 					  IPA_CMD_ASS_CONFIGURE, &filter_data);
 	if (rc) {
 		dev_warn(&card->gdev->dev,
-			"Setting up broadcast filtering for %s failed\n",
-			QETH_CARD_IFNAME(card));
+			 "Setting up broadcast filtering for %s failed\n",
+			 netdev_name(card->dev));
 		goto out;
 	}
 	card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
@@ -936,8 +879,9 @@
 	rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
 					  IPA_CMD_ASS_ENABLE, &filter_data);
 	if (rc) {
-		dev_warn(&card->gdev->dev, "Setting up broadcast echo "
-			"filtering for %s failed\n", QETH_CARD_IFNAME(card));
+		dev_warn(&card->gdev->dev,
+			 "Setting up broadcast echo filtering for %s failed\n",
+			 netdev_name(card->dev));
 		goto out;
 	}
 	card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
@@ -949,19 +893,16 @@
 	return rc;
 }
 
-static int qeth_l3_start_ipassists(struct qeth_card *card)
+static void qeth_l3_start_ipassists(struct qeth_card *card)
 {
 	QETH_CARD_TEXT(card, 3, "strtipas");
 
-	if (qeth_set_access_ctrl_online(card, 0))
-		return -EIO;
 	qeth_l3_start_ipa_arp_processing(card);	/* go on*/
 	qeth_l3_start_ipa_source_mac(card);	/* go on*/
 	qeth_l3_start_ipa_vlan(card);		/* go on*/
 	qeth_l3_start_ipa_multicast(card);		/* go on*/
 	qeth_l3_start_ipa_ipv6(card);		/* go on*/
 	qeth_l3_start_ipa_broadcast(card);		/* go on*/
-	return 0;
 }
 
 static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
@@ -971,9 +912,11 @@
 
 	if (cmd->hdr.return_code)
 		return -EIO;
+	if (!is_valid_ether_addr(cmd->data.create_destroy_addr.mac_addr))
+		return -EADDRNOTAVAIL;
 
 	ether_addr_copy(card->dev->dev_addr,
-			cmd->data.create_destroy_addr.unique_id);
+			cmd->data.create_destroy_addr.mac_addr);
 	return 0;
 }
 
@@ -981,7 +924,6 @@
 {
 	int rc = 0;
 	struct qeth_cmd_buffer *iob;
-	struct qeth_ipa_cmd *cmd;
 
 	QETH_CARD_TEXT(card, 2, "hsrmac");
 
@@ -989,9 +931,6 @@
 				 IPA_DATA_SIZEOF(create_destroy_addr));
 	if (!iob)
 		return -ENOMEM;
-	cmd = __ipa_cmd(iob);
-	*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
-			card->info.unique_id;
 
 	rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb,
 				NULL);
@@ -1002,43 +941,36 @@
 		struct qeth_reply *reply, unsigned long data)
 {
 	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+	u16 *uid = reply->param;
 
 	if (cmd->hdr.return_code == 0) {
-		card->info.unique_id = *((__u16 *)
-				&cmd->data.create_destroy_addr.unique_id[6]);
+		*uid = cmd->data.create_destroy_addr.uid;
 		return 0;
 	}
 
-	card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
-			       UNIQUE_ID_NOT_BY_CARD;
 	dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n");
 	return -EIO;
 }
 
-static int qeth_l3_get_unique_id(struct qeth_card *card)
+static u16 qeth_l3_get_unique_id(struct qeth_card *card, u16 uid)
 {
-	int rc = 0;
 	struct qeth_cmd_buffer *iob;
-	struct qeth_ipa_cmd *cmd;
 
 	QETH_CARD_TEXT(card, 2, "guniqeid");
 
-	if (!qeth_is_supported(card, IPA_IPV6)) {
-		card->info.unique_id =  UNIQUE_ID_IF_CREATE_ADDR_FAILED |
-					UNIQUE_ID_NOT_BY_CARD;
-		return 0;
-	}
+	if (!qeth_is_supported(card, IPA_IPV6))
+		goto out;
 
 	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6,
 				 IPA_DATA_SIZEOF(create_destroy_addr));
 	if (!iob)
-		return -ENOMEM;
-	cmd = __ipa_cmd(iob);
-	*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
-			card->info.unique_id;
+		goto out;
 
-	rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL);
-	return rc;
+	__ipa_cmd(iob)->data.create_destroy_addr.uid = uid;
+	qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, &uid);
+
+out:
+	return uid;
 }
 
 static int
@@ -1115,176 +1047,83 @@
 	return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
 }
 
-static void
-qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
+static int qeth_l3_add_mcast_rtnl(struct net_device *dev, int vid, void *arg)
 {
+	struct qeth_card *card = arg;
+	struct inet6_dev *in6_dev;
+	struct in_device *in4_dev;
+	struct qeth_ipaddr *ipm;
+	struct qeth_ipaddr tmp;
 	struct ip_mc_list *im4;
-	struct qeth_ipaddr *tmp, *ipm;
+	struct ifmcaddr6 *im6;
 
 	QETH_CARD_TEXT(card, 4, "addmc");
 
-	tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
-	if (!tmp)
-		return;
+	if (!dev || !(dev->flags & IFF_UP))
+		goto out;
 
-	for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
-	     im4 = rcu_dereference(im4->next_rcu)) {
-		ip_eth_mc_map(im4->multiaddr, tmp->mac);
-		tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
-		tmp->is_multicast = 1;
+	in4_dev = __in_dev_get_rtnl(dev);
+	if (!in4_dev)
+		goto walk_ipv6;
 
-		ipm = qeth_l3_find_addr_by_ip(card, tmp);
-		if (ipm) {
-			/* for mcast, by-IP match means full match */
-			ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
-		} else {
-			ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
-			if (!ipm)
-				continue;
-			ether_addr_copy(ipm->mac, tmp->mac);
-			ipm->u.a4.addr = be32_to_cpu(im4->multiaddr);
-			ipm->is_multicast = 1;
-			ipm->disp_flag = QETH_DISP_ADDR_ADD;
-			hash_add(card->ip_mc_htable,
-					&ipm->hnode, qeth_l3_ipaddr_hash(ipm));
-		}
-	}
+	qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
+	tmp.disp_flag = QETH_DISP_ADDR_ADD;
+	tmp.is_multicast = 1;
 
-	kfree(tmp);
-}
+	for (im4 = rtnl_dereference(in4_dev->mc_list); im4 != NULL;
+	     im4 = rtnl_dereference(im4->next_rcu)) {
+		tmp.u.a4.addr = im4->multiaddr;
 
-/* called with rcu_read_lock */
-static void qeth_l3_add_vlan_mc(struct qeth_card *card)
-{
-	struct in_device *in_dev;
-	u16 vid;
-
-	QETH_CARD_TEXT(card, 4, "addmcvl");
-
-	if (!qeth_is_supported(card, IPA_FULL_VLAN))
-		return;
-
-	for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
-		struct net_device *netdev;
-
-		netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
-					      vid);
-		if (netdev == NULL ||
-		    !(netdev->flags & IFF_UP))
-			continue;
-		in_dev = __in_dev_get_rcu(netdev);
-		if (!in_dev)
-			continue;
-		qeth_l3_add_mc_to_hash(card, in_dev);
-	}
-}
-
-static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
-{
-	struct in_device *in4_dev;
-
-	QETH_CARD_TEXT(card, 4, "chkmcv4");
-
-	rcu_read_lock();
-	in4_dev = __in_dev_get_rcu(card->dev);
-	if (in4_dev == NULL)
-		goto unlock;
-	qeth_l3_add_mc_to_hash(card, in4_dev);
-	qeth_l3_add_vlan_mc(card);
-unlock:
-	rcu_read_unlock();
-}
-
-static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
-				    struct inet6_dev *in6_dev)
-{
-	struct qeth_ipaddr *ipm;
-	struct ifmcaddr6 *im6;
-	struct qeth_ipaddr *tmp;
-
-	QETH_CARD_TEXT(card, 4, "addmc6");
-
-	tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
-	if (!tmp)
-		return;
-
-	for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
-		ipv6_eth_mc_map(&im6->mca_addr, tmp->mac);
-		memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
-		       sizeof(struct in6_addr));
-		tmp->is_multicast = 1;
-
-		ipm = qeth_l3_find_addr_by_ip(card, tmp);
+		ipm = qeth_l3_find_addr_by_ip(card, &tmp);
 		if (ipm) {
 			/* for mcast, by-IP match means full match */
 			ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
 			continue;
 		}
 
-		ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
+		ipm = kmemdup(&tmp, sizeof(tmp), GFP_KERNEL);
 		if (!ipm)
 			continue;
 
-		ether_addr_copy(ipm->mac, tmp->mac);
-		memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
-		       sizeof(struct in6_addr));
-		ipm->is_multicast = 1;
-		ipm->disp_flag = QETH_DISP_ADDR_ADD;
-		hash_add(card->ip_mc_htable,
-				&ipm->hnode, qeth_l3_ipaddr_hash(ipm));
-
+		hash_add(card->rx_mode_addrs, &ipm->hnode,
+			 qeth_l3_ipaddr_hash(ipm));
 	}
-	kfree(tmp);
-}
 
-/* called with rcu_read_lock */
-static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
-{
-	struct inet6_dev *in_dev;
-	u16 vid;
-
-	QETH_CARD_TEXT(card, 4, "admc6vl");
-
-	if (!qeth_is_supported(card, IPA_FULL_VLAN))
-		return;
-
-	for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
-		struct net_device *netdev;
-
-		netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
-					      vid);
-		if (netdev == NULL ||
-		    !(netdev->flags & IFF_UP))
-			continue;
-		in_dev = in6_dev_get(netdev);
-		if (!in_dev)
-			continue;
-		read_lock_bh(&in_dev->lock);
-		qeth_l3_add_mc6_to_hash(card, in_dev);
-		read_unlock_bh(&in_dev->lock);
-		in6_dev_put(in_dev);
-	}
-}
-
-static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
-{
-	struct inet6_dev *in6_dev;
-
-	QETH_CARD_TEXT(card, 4, "chkmcv6");
-
+walk_ipv6:
 	if (!qeth_is_supported(card, IPA_IPV6))
-		return ;
-	in6_dev = in6_dev_get(card->dev);
-	if (!in6_dev)
-		return;
+		goto out;
 
-	rcu_read_lock();
+	in6_dev = __in6_dev_get(dev);
+	if (!in6_dev)
+		goto out;
+
+	qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
+	tmp.disp_flag = QETH_DISP_ADDR_ADD;
+	tmp.is_multicast = 1;
+
 	read_lock_bh(&in6_dev->lock);
-	qeth_l3_add_mc6_to_hash(card, in6_dev);
-	qeth_l3_add_vlan_mc6(card);
+	for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
+		tmp.u.a6.addr = im6->mca_addr;
+
+		ipm = qeth_l3_find_addr_by_ip(card, &tmp);
+		if (ipm) {
+			/* for mcast, by-IP match means full match */
+			ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+			continue;
+		}
+
+		ipm = kmemdup(&tmp, sizeof(tmp), GFP_ATOMIC);
+		if (!ipm)
+			continue;
+
+		hash_add(card->rx_mode_addrs, &ipm->hnode,
+			 qeth_l3_ipaddr_hash(ipm));
+
+	}
 	read_unlock_bh(&in6_dev->lock);
-	rcu_read_unlock();
-	in6_dev_put(in6_dev);
+
+out:
+	return 0;
 }
 
 static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
@@ -1292,7 +1131,7 @@
 {
 	struct qeth_card *card = dev->ml_priv;
 
-	set_bit(vid, card->active_vlans);
+	QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
 	return 0;
 }
 
@@ -1302,140 +1141,9 @@
 	struct qeth_card *card = dev->ml_priv;
 
 	QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
-
-	clear_bit(vid, card->active_vlans);
-	qeth_l3_set_rx_mode(dev);
 	return 0;
 }
 
-static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
-				struct qeth_hdr *hdr)
-{
-	struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
-	struct net_device *dev = skb->dev;
-
-	if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
-		dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
-				"FAKELL", skb->len);
-		return;
-	}
-
-	if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
-		u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
-								 ETH_P_IP;
-		unsigned char tg_addr[ETH_ALEN];
-
-		skb_reset_network_header(skb);
-		switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
-		case QETH_CAST_MULTICAST:
-			if (prot == ETH_P_IP)
-				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
-			else
-				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
-			QETH_CARD_STAT_INC(card, rx_multicast);
-			break;
-		case QETH_CAST_BROADCAST:
-			ether_addr_copy(tg_addr, card->dev->broadcast);
-			QETH_CARD_STAT_INC(card, rx_multicast);
-			break;
-		default:
-			if (card->options.sniffer)
-				skb->pkt_type = PACKET_OTHERHOST;
-			ether_addr_copy(tg_addr, card->dev->dev_addr);
-		}
-
-		if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
-			card->dev->header_ops->create(skb, card->dev, prot,
-				tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac,
-				skb->len);
-		else
-			card->dev->header_ops->create(skb, card->dev, prot,
-				tg_addr, "FAKELL", skb->len);
-	}
-
-	/* copy VLAN tag from hdr into skb */
-	if (!card->options.sniffer &&
-	    (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
-				      QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
-		u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
-				hdr->hdr.l3.vlan_id :
-				hdr->hdr.l3.next_hop.rx.vlan_id;
-		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
-	}
-
-	qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags);
-}
-
-static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
-				int budget, int *done)
-{
-	int work_done = 0;
-	struct sk_buff *skb;
-	struct qeth_hdr *hdr;
-	unsigned int len;
-
-	*done = 0;
-	WARN_ON_ONCE(!budget);
-	while (budget) {
-		skb = qeth_core_get_next_skb(card,
-			&card->qdio.in_q->bufs[card->rx.b_index],
-			&card->rx.b_element, &card->rx.e_offset, &hdr);
-		if (!skb) {
-			*done = 1;
-			break;
-		}
-		switch (hdr->hdr.l3.id) {
-		case QETH_HEADER_TYPE_LAYER3:
-			qeth_l3_rebuild_skb(card, skb, hdr);
-			/* fall through */
-		case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
-			skb->protocol = eth_type_trans(skb, skb->dev);
-			len = skb->len;
-			napi_gro_receive(&card->napi, skb);
-			break;
-		default:
-			dev_kfree_skb_any(skb);
-			QETH_CARD_TEXT(card, 3, "inbunkno");
-			QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
-			continue;
-		}
-		work_done++;
-		budget--;
-		QETH_CARD_STAT_INC(card, rx_packets);
-		QETH_CARD_STAT_ADD(card, rx_bytes, len);
-	}
-	return work_done;
-}
-
-static void qeth_l3_stop_card(struct qeth_card *card)
-{
-	QETH_CARD_TEXT(card, 2, "stopcard");
-
-	qeth_set_allowed_threads(card, 0, 1);
-
-	cancel_work_sync(&card->rx_mode_work);
-	qeth_l3_drain_rx_mode_cache(card);
-
-	if (card->options.sniffer &&
-	    (card->info.promisc_mode == SET_PROMISC_MODE_ON))
-		qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
-
-	if (card->state == CARD_STATE_SOFTSETUP) {
-		qeth_l3_clear_ip_htable(card, 1);
-		qeth_clear_ipacmd_list(card);
-		card->state = CARD_STATE_HARDSETUP;
-	}
-	if (card->state == CARD_STATE_HARDSETUP) {
-		qeth_drain_output_queues(card);
-		qeth_clear_working_pool_list(card);
-		card->state = CARD_STATE_DOWN;
-	}
-
-	qeth_qdio_clear_card(card, 0);
-	flush_workqueue(card->event_wq);
-	card->info.promisc_mode = 0;
-}
-
 static void qeth_l3_set_promisc_mode(struct qeth_card *card)
 {
 	bool enable = card->dev->flags & IFF_PROMISC;
@@ -1469,10 +1177,13 @@
 	QETH_CARD_TEXT(card, 3, "setmulti");
 
 	if (!card->options.sniffer) {
-		qeth_l3_add_multicast_ipv4(card);
-		qeth_l3_add_multicast_ipv6(card);
+		rtnl_lock();
+		qeth_l3_add_mcast_rtnl(card->dev, 0, card);
+		if (qeth_is_supported(card, IPA_FULL_VLAN))
+			vlan_for_each(card->dev, qeth_l3_add_mcast_rtnl, card);
+		rtnl_unlock();
 
-		hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+		hash_for_each_safe(card->rx_mode_addrs, i, tmp, addr, hnode) {
 			switch (addr->disp_flag) {
 			case QETH_DISP_ADDR_DELETE:
 				rc = qeth_l3_deregister_addr_entry(card, addr);
@@ -1488,8 +1199,7 @@
 					kfree(addr);
 					break;
 				}
-				addr->ref_counter = 1;
-				/* fall through */
+				fallthrough;
 			default:
 				/* for next call to set_rx_mode(): */
 				addr->disp_flag = QETH_DISP_ADDR_DELETE;
@@ -1947,8 +1657,8 @@
 
 		if (skb->protocol == htons(ETH_P_AF_IUCV)) {
 			l3_hdr->flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
-			l3_hdr->next_hop.ipv6_addr.s6_addr16[0] = htons(0xfe80);
-			memcpy(&l3_hdr->next_hop.ipv6_addr.s6_addr32[2],
+			l3_hdr->next_hop.addr.s6_addr16[0] = htons(0xfe80);
+			memcpy(&l3_hdr->next_hop.addr.s6_addr32[2],
 			       iucv_trans_hdr(skb)->destUserID, 8);
 			return;
 		}
@@ -1982,18 +1692,10 @@
 	l3_hdr->flags |= qeth_l3_cast_type_to_flag(cast_type);
 
 	if (ipv == 4) {
-		struct rtable *rt = (struct rtable *) dst;
-
-		*((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
-				rt_nexthop(rt, ip_hdr(skb)->daddr) :
-				ip_hdr(skb)->daddr;
+		l3_hdr->next_hop.addr.s6_addr32[3] =
+					qeth_next_hop_v4_rcu(skb, dst);
 	} else if (ipv == 6) {
-		struct rt6_info *rt = (struct rt6_info *) dst;
-
-		if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
-			l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway;
-		else
-			l3_hdr->next_hop.ipv6_addr = ipv6_hdr(skb)->daddr;
+		l3_hdr->next_hop.addr = *qeth_next_hop_v6_rcu(skb, dst);
 
 		hdr->hdr.l3.flags |= QETH_HDR_IPV6;
 		if (!IS_IQD(card))
@@ -2131,7 +1833,10 @@
 {
 	struct qeth_card *card = dev->ml_priv;
 
-	return qeth_get_priority_queue(card, skb);
+	if (qeth_uses_tx_prio_queueing(card))
+		return qeth_get_priority_queue(card, skb);
+
+	return netdev_pick_tx(dev, skb, sb_dev);
 }
 
 static const struct net_device_ops qeth_l3_netdev_ops = {
@@ -2168,24 +1873,17 @@
 	.ndo_neigh_setup	= qeth_l3_neigh_setup,
 };
 
-static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
+static int qeth_l3_setup_netdev(struct qeth_card *card)
 {
+	struct net_device *dev = card->dev;
 	unsigned int headroom;
 	int rc;
 
 	if (IS_OSD(card) || IS_OSX(card)) {
-		if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
-		    (card->info.link_type == QETH_LINK_TYPE_HSTR)) {
-			pr_info("qeth_l3: ignoring TR device\n");
-			return -ENODEV;
-		}
-
 		card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
 
 		/*IPv6 address autoconfiguration stuff*/
-		qeth_l3_get_unique_id(card);
-		if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
-			card->dev->dev_id = card->info.unique_id & 0xffff;
+		dev->dev_id = qeth_l3_get_unique_id(card, dev->dev_id);
 
 		if (!IS_VM_NIC(card)) {
 			card->dev->features |= NETIF_F_SG;
@@ -2218,7 +1916,7 @@
 
 		rc = qeth_l3_iqd_read_initial_mac(card);
 		if (rc)
-			goto out;
+			return rc;
 	} else
 		return -ENODEV;
 
@@ -2233,14 +1931,7 @@
 				       PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
 
 	netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
-	rc = register_netdev(card->dev);
-	if (!rc && carrier_ok)
-		netif_carrier_on(card->dev);
-
-out:
-	if (rc)
-		card->dev->netdev_ops = NULL;
-	return rc;
+	return register_netdev(card->dev);
 }
 
 static const struct device_type qeth_l3_devtype = {
@@ -2268,7 +1959,6 @@
 		}
 	}
 
-	hash_init(card->ip_mc_htable);
 	INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work);
 	return 0;
 }
@@ -2284,10 +1974,10 @@
 	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
 
 	if (cgdev->state == CCWGROUP_ONLINE)
-		qeth_l3_set_offline(cgdev);
+		qeth_set_offline(card, card->discipline, false);
 
 	cancel_work_sync(&card->close_dev_work);
-	if (qeth_netdev_is_registered(card->dev))
+	if (card->dev->reg_state == NETREG_REGISTERED)
 		unregister_netdev(card->dev);
 
 	flush_workqueue(card->cmd_wq);
@@ -2296,33 +1986,10 @@
 	qeth_l3_clear_ipato_list(card);
 }
 
-static int qeth_l3_set_online(struct ccwgroup_device *gdev)
+static int qeth_l3_set_online(struct qeth_card *card, bool carrier_ok)
 {
-	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
 	struct net_device *dev = card->dev;
 	int rc = 0;
-	bool carrier_ok;
-
-	mutex_lock(&card->discipline_mutex);
-	mutex_lock(&card->conf_mutex);
-	QETH_CARD_TEXT(card, 2, "setonlin");
-
-	rc = qeth_core_hardsetup_card(card, &carrier_ok);
-	if (rc) {
-		QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
-		rc = -ENODEV;
-		goto out_remove;
-	}
-
-	if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
-		if (card->info.hwtrap &&
-		    qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
-			card->info.hwtrap = 0;
-	} else
-		card->info.hwtrap = 0;
-
-	card->state = CARD_STATE_HARDSETUP;
-	qeth_print_status_message(card);
 
 	/* softsetup */
 	QETH_CARD_TEXT(card, 2, "softsetp");
@@ -2331,11 +1998,8 @@
 	if (rc)
 		QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
 	if (!card->options.sniffer) {
-		rc = qeth_l3_start_ipassists(card);
-		if (rc) {
-			QETH_CARD_TEXT_(card, 2, "3err%d", rc);
-			goto out_remove;
-		}
+		qeth_l3_start_ipassists(card);
+
 		rc = qeth_l3_setrouting_v4(card);
 		if (rc)
 			QETH_CARD_TEXT_(card, 2, "4err%04x", rc);
@@ -2344,23 +2008,27 @@
 			QETH_CARD_TEXT_(card, 2, "5err%04x", rc);
 	}
 
-	rc = qeth_init_qdio_queues(card);
-	if (rc) {
-		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
-		rc = -ENODEV;
-		goto out_remove;
-	}
 	card->state = CARD_STATE_SOFTSETUP;
 
 	qeth_set_allowed_threads(card, 0xffffffff, 0);
 	qeth_l3_recover_ip(card);
 
-	if (!qeth_netdev_is_registered(dev)) {
-		rc = qeth_l3_setup_netdev(card, carrier_ok);
+	if (dev->reg_state != NETREG_REGISTERED) {
+		rc = qeth_l3_setup_netdev(card);
 		if (rc)
-			goto out_remove;
+			goto err_setup;
+
+		if (carrier_ok)
+			netif_carrier_on(dev);
 	} else {
 		rtnl_lock();
+		rc = qeth_set_real_num_tx_queues(card,
+						 qeth_tx_actual_queues(card));
+		if (rc) {
+			rtnl_unlock();
+			goto err_set_queues;
+		}
+
 		if (carrier_ok)
 			netif_carrier_on(dev);
 		else
@@ -2375,100 +2043,29 @@
 		}
 		rtnl_unlock();
 	}
-	qeth_trace_features(card);
-	/* let user_space know that device is online */
-	kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
-	mutex_unlock(&card->conf_mutex);
-	mutex_unlock(&card->discipline_mutex);
 	return 0;
-out_remove:
-	qeth_l3_stop_card(card);
-	qeth_stop_channel(&card->data);
-	qeth_stop_channel(&card->write);
-	qeth_stop_channel(&card->read);
-	qdio_free(CARD_DDEV(card));
 
-	mutex_unlock(&card->conf_mutex);
-	mutex_unlock(&card->discipline_mutex);
+err_set_queues:
+err_setup:
+	qeth_set_allowed_threads(card, 0, 1);
+	card->state = CARD_STATE_DOWN;
+	qeth_l3_clear_ip_htable(card, 1);
 	return rc;
 }
 
-static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
-			int recovery_mode)
+static void qeth_l3_set_offline(struct qeth_card *card)
 {
-	struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
-	int rc = 0, rc2 = 0, rc3 = 0;
+	qeth_set_allowed_threads(card, 0, 1);
+	qeth_l3_drain_rx_mode_cache(card);
 
-	mutex_lock(&card->discipline_mutex);
-	mutex_lock(&card->conf_mutex);
-	QETH_CARD_TEXT(card, 3, "setoffl");
+	if (card->options.sniffer &&
+	    (card->info.promisc_mode == SET_PROMISC_MODE_ON))
+		qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
 
-	if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
-		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
-		card->info.hwtrap = 1;
+	if (card->state == CARD_STATE_SOFTSETUP) {
+		card->state = CARD_STATE_DOWN;
+		qeth_l3_clear_ip_htable(card, 1);
 	}
-
-	rtnl_lock();
-	card->info.open_when_online = card->dev->flags & IFF_UP;
-	dev_close(card->dev);
-	netif_device_detach(card->dev);
-	netif_carrier_off(card->dev);
-	rtnl_unlock();
-
-	qeth_l3_stop_card(card);
-	if (card->options.cq == QETH_CQ_ENABLED) {
-		rtnl_lock();
-		call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
-		rtnl_unlock();
-	}
-
-	rc  = qeth_stop_channel(&card->data);
-	rc2 = qeth_stop_channel(&card->write);
-	rc3 = qeth_stop_channel(&card->read);
-	if (!rc)
-		rc = (rc2) ? rc2 : rc3;
-	if (rc)
-		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
-	qdio_free(CARD_DDEV(card));
-
-	/* let user_space know that device is offline */
-	kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
-	mutex_unlock(&card->conf_mutex);
-	mutex_unlock(&card->discipline_mutex);
-	return 0;
-}
-
-static int qeth_l3_set_offline(struct ccwgroup_device *cgdev)
-{
-	return __qeth_l3_set_offline(cgdev, 0);
-}
-
-static int qeth_l3_recover(void *ptr)
-{
-	struct qeth_card *card;
-	int rc = 0;
-
-	card = (struct qeth_card *) ptr;
-	QETH_CARD_TEXT(card, 2, "recover1");
-	QETH_CARD_HEX(card, 2, &card, sizeof(void *));
-	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
-		return 0;
-	QETH_CARD_TEXT(card, 2, "recover2");
-	dev_warn(&card->gdev->dev,
-		"A recovery process has been started for the device\n");
-	__qeth_l3_set_offline(card->gdev, 1);
-	rc = qeth_l3_set_online(card->gdev);
-	if (!rc)
-		dev_info(&card->gdev->dev,
-			"Device successfully recovered!\n");
-	else {
-		ccwgroup_set_offline(card->gdev);
-		dev_warn(&card->gdev->dev, "The qeth device driver "
-				"failed to recover an error on the device\n");
-	}
-	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
-	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
-	return 0;
 }
 
 /* Returns zero if the command is successfully "consumed" */
@@ -2478,10 +2075,8 @@
 	return 1;
 }
 
-struct qeth_discipline qeth_l3_discipline = {
+const struct qeth_discipline qeth_l3_discipline = {
 	.devtype = &qeth_l3_devtype,
-	.process_rx_buffer = qeth_l3_process_inbound_buffer,
-	.recover = qeth_l3_recover,
 	.setup = qeth_l3_probe_device,
 	.remove = qeth_l3_remove_device,
 	.set_online = qeth_l3_set_online,
@@ -2544,23 +2139,19 @@
 static int qeth_l3_ip_event(struct notifier_block *this,
 			    unsigned long event, void *ptr)
 {
-
 	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
 	struct net_device *dev = ifa->ifa_dev->dev;
 	struct qeth_ipaddr addr;
 	struct qeth_card *card;
 
-	if (dev_net(dev) != &init_net)
-		return NOTIFY_DONE;
-
 	card = qeth_l3_get_card_from_dev(dev);
 	if (!card)
 		return NOTIFY_DONE;
 	QETH_CARD_TEXT(card, 3, "ipevent");
 
 	qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
-	addr.u.a4.addr = be32_to_cpu(ifa->ifa_address);
-	addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask);
+	addr.u.a4.addr = ifa->ifa_address;
+	addr.u.a4.mask = ifa->ifa_mask;
 
 	return qeth_l3_handle_ip_event(card, &addr, event);
 }
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 333fd46..997fbb7 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -60,9 +60,6 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
 	return qeth_l3_dev_route_show(card, &card->options.route4, buf);
 }
 
@@ -109,9 +106,6 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
 	return qeth_l3_dev_route_store(card, &card->options.route4,
 				QETH_PROT_IPV4, buf, count);
 }
@@ -124,9 +118,6 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
 	return qeth_l3_dev_route_show(card, &card->options.route6, buf);
 }
 
@@ -135,9 +126,6 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
 	return qeth_l3_dev_route_store(card, &card->options.route6,
 				QETH_PROT_IPV6, buf, count);
 }
@@ -145,54 +133,11 @@
 static DEVICE_ATTR(route6, 0644, qeth_l3_dev_route6_show,
 			qeth_l3_dev_route6_store);
 
-static ssize_t qeth_l3_dev_fake_broadcast_show(struct device *dev,
-			struct device_attribute *attr, char *buf)
-{
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
-}
-
-static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct qeth_card *card = dev_get_drvdata(dev);
-	char *tmp;
-	int i, rc = 0;
-
-	if (!card)
-		return -EINVAL;
-
-	mutex_lock(&card->conf_mutex);
-	if (card->state != CARD_STATE_DOWN) {
-		rc = -EPERM;
-		goto out;
-	}
-
-	i = simple_strtoul(buf, &tmp, 16);
-	if ((i == 0) || (i == 1))
-		card->options.fake_broadcast = i;
-	else
-		rc = -EINVAL;
-out:
-	mutex_unlock(&card->conf_mutex);
-	return rc ? rc : count;
-}
-
-static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show,
-		   qeth_l3_dev_fake_broadcast_store);
-
 static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
 	return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0);
 }
 
@@ -203,9 +148,6 @@
 	int rc = 0;
 	unsigned long i;
 
-	if (!card)
-		return -EINVAL;
-
 	if (!IS_IQD(card))
 		return -EPERM;
 	if (card->options.cq == QETH_CQ_ENABLED)
@@ -228,14 +170,13 @@
 		break;
 	case 1:
 		qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
-		if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) {
+		if (card->ssqd.qdioac2 & CHSC_AC2_SNIFFER_AVAILABLE) {
 			card->options.sniffer = i;
-			if (card->qdio.init_pool.buf_count !=
-					QETH_IN_BUF_COUNT_MAX)
-				qeth_realloc_buffer_pool(card,
-					QETH_IN_BUF_COUNT_MAX);
-		} else
+			qeth_resize_buffer_pool(card, QETH_IN_BUF_COUNT_MAX);
+		} else {
 			rc = -EPERM;
+		}
+
 		break;
 	default:
 		rc = -EINVAL;
@@ -248,16 +189,12 @@
 static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
 		qeth_l3_dev_sniffer_store);
 
-
 static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 	char tmp_hsuid[9];
 
-	if (!card)
-		return -EINVAL;
-
 	if (!IS_IQD(card))
 		return -EPERM;
 
@@ -273,9 +210,6 @@
 	int rc = 0;
 	char *tmp;
 
-	if (!card)
-		return -EINVAL;
-
 	if (!IS_IQD(card))
 		return -EPERM;
 
@@ -337,7 +271,6 @@
 static struct attribute *qeth_l3_device_attrs[] = {
 	&dev_attr_route4.attr,
 	&dev_attr_route6.attr,
-	&dev_attr_fake_broadcast.attr,
 	&dev_attr_sniffer.attr,
 	&dev_attr_hsuid.attr,
 	NULL,
@@ -352,10 +285,7 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
-	return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
+	return sprintf(buf, "%u\n", card->ipato.enabled ? 1 : 0);
 }
 
 static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
@@ -365,28 +295,27 @@
 	bool enable;
 	int rc = 0;
 
-	if (!card)
-		return -EINVAL;
-
 	mutex_lock(&card->conf_mutex);
 	if (card->state != CARD_STATE_DOWN) {
 		rc = -EPERM;
 		goto out;
 	}
 
+	mutex_lock(&card->ip_lock);
 	if (sysfs_streq(buf, "toggle")) {
 		enable = !card->ipato.enabled;
 	} else if (kstrtobool(buf, &enable)) {
 		rc = -EINVAL;
-		goto out;
+		goto unlock_ip;
 	}
 
 	if (card->ipato.enabled != enable) {
 		card->ipato.enabled = enable;
-		mutex_lock(&card->ip_lock);
 		qeth_l3_update_ipato(card);
-		mutex_unlock(&card->ip_lock);
 	}
+
+unlock_ip:
+	mutex_unlock(&card->ip_lock);
 out:
 	mutex_unlock(&card->conf_mutex);
 	return rc ? rc : count;
@@ -401,10 +330,7 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
-	return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
+	return sprintf(buf, "%u\n", card->ipato.invert4 ? 1 : 0);
 }
 
 static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
@@ -415,10 +341,7 @@
 	bool invert;
 	int rc = 0;
 
-	if (!card)
-		return -EINVAL;
-
-	mutex_lock(&card->conf_mutex);
+	mutex_lock(&card->ip_lock);
 	if (sysfs_streq(buf, "toggle")) {
 		invert = !card->ipato.invert4;
 	} else if (kstrtobool(buf, &invert)) {
@@ -428,12 +351,11 @@
 
 	if (card->ipato.invert4 != invert) {
 		card->ipato.invert4 = invert;
-		mutex_lock(&card->ip_lock);
 		qeth_l3_update_ipato(card);
-		mutex_unlock(&card->ip_lock);
 	}
+
 out:
-	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->ip_lock);
 	return rc ? rc : count;
 }
 
@@ -445,30 +367,35 @@
 			enum qeth_prot_versions proto)
 {
 	struct qeth_ipato_entry *ipatoe;
-	char addr_str[40];
-	int entry_len; /* length of 1 entry string, differs between v4 and v6 */
-	int i = 0;
+	int str_len = 0;
 
-	entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
-	/* add strlen for "/<mask>\n" */
-	entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
 	mutex_lock(&card->ip_lock);
 	list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
+		char addr_str[40];
+		int entry_len;
+
 		if (ipatoe->proto != proto)
 			continue;
-		/* String must not be longer than PAGE_SIZE. So we check if
-		 * string length gets near PAGE_SIZE. Then we can savely display
-		 * the next IPv6 address (worst case, compared to IPv4) */
-		if ((PAGE_SIZE - i) <= entry_len)
+
+		entry_len = qeth_l3_ipaddr_to_string(proto, ipatoe->addr,
+						     addr_str);
+		if (entry_len < 0)
+			continue;
+
+		/* Append /%mask to the entry: */
+		entry_len += 1 + ((proto == QETH_PROT_IPV4) ? 2 : 3);
+		/* Enough room to format %entry\n into null terminated page? */
+		if (entry_len + 1 > PAGE_SIZE - str_len - 1)
 			break;
-		qeth_l3_ipaddr_to_string(proto, ipatoe->addr, addr_str);
-		i += snprintf(buf + i, PAGE_SIZE - i,
-			      "%s/%i\n", addr_str, ipatoe->mask_bits);
+
+		entry_len = scnprintf(buf, PAGE_SIZE - str_len,
+				      "%s/%i\n", addr_str, ipatoe->mask_bits);
+		str_len += entry_len;
+		buf += entry_len;
 	}
 	mutex_unlock(&card->ip_lock);
-	i += snprintf(buf + i, PAGE_SIZE - i, "\n");
 
-	return i;
+	return str_len ? str_len : scnprintf(buf, PAGE_SIZE, "\n");
 }
 
 static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
@@ -476,36 +403,33 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
 	return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
 }
 
 static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto,
-		  u8 *addr, int *mask_bits)
+				u8 *addr, unsigned int *mask_bits)
 {
-	const char *start, *end;
-	char *tmp;
-	char buffer[40] = {0, };
+	char *sep;
+	int rc;
 
-	start = buf;
-	/* get address string */
-	end = strchr(start, '/');
-	if (!end || (end - start >= 40)) {
+	/* Expected input pattern: %addr/%mask */
+	sep = strnchr(buf, 40, '/');
+	if (!sep)
 		return -EINVAL;
-	}
-	strncpy(buffer, start, end - start);
-	if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) {
+
+	/* Terminate the %addr sub-string, and parse it: */
+	*sep = '\0';
+	rc = qeth_l3_string_to_ipaddr(buf, proto, addr);
+	if (rc)
+		return rc;
+
+	rc = kstrtouint(sep + 1, 10, mask_bits);
+	if (rc)
+		return rc;
+
+	if (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))
 		return -EINVAL;
-	}
-	start = end + 1;
-	*mask_bits = simple_strtoul(start, &tmp, 10);
-	if (!strlen(start) ||
-	    (tmp == start) ||
-	    (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) {
-		return -EINVAL;
-	}
+
 	return 0;
 }
 
@@ -513,29 +437,26 @@
 			 struct qeth_card *card, enum qeth_prot_versions proto)
 {
 	struct qeth_ipato_entry *ipatoe;
+	unsigned int mask_bits;
 	u8 addr[16];
-	int mask_bits;
 	int rc = 0;
 
-	mutex_lock(&card->conf_mutex);
 	rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
 	if (rc)
-		goto out;
+		return rc;
 
 	ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL);
-	if (!ipatoe) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!ipatoe)
+		return -ENOMEM;
+
 	ipatoe->proto = proto;
-	memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
+	memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4) ? 4 : 16);
 	ipatoe->mask_bits = mask_bits;
 
 	rc = qeth_l3_add_ipato_entry(card, ipatoe);
 	if (rc)
 		kfree(ipatoe);
-out:
-	mutex_unlock(&card->conf_mutex);
+
 	return rc ? rc : count;
 }
 
@@ -544,9 +465,6 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
 	return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4);
 }
 
@@ -557,15 +475,13 @@
 static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
 			 struct qeth_card *card, enum qeth_prot_versions proto)
 {
+	unsigned int mask_bits;
 	u8 addr[16];
-	int mask_bits;
 	int rc = 0;
 
-	mutex_lock(&card->conf_mutex);
 	rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
 	if (!rc)
 		rc = qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
-	mutex_unlock(&card->conf_mutex);
 	return rc ? rc : count;
 }
 
@@ -574,9 +490,6 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
 	return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4);
 }
 
@@ -588,10 +501,7 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
-	return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
+	return sprintf(buf, "%u\n", card->ipato.invert6 ? 1 : 0);
 }
 
 static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
@@ -601,10 +511,7 @@
 	bool invert;
 	int rc = 0;
 
-	if (!card)
-		return -EINVAL;
-
-	mutex_lock(&card->conf_mutex);
+	mutex_lock(&card->ip_lock);
 	if (sysfs_streq(buf, "toggle")) {
 		invert = !card->ipato.invert6;
 	} else if (kstrtobool(buf, &invert)) {
@@ -614,12 +521,11 @@
 
 	if (card->ipato.invert6 != invert) {
 		card->ipato.invert6 = invert;
-		mutex_lock(&card->ip_lock);
 		qeth_l3_update_ipato(card);
-		mutex_unlock(&card->ip_lock);
 	}
+
 out:
-	mutex_unlock(&card->conf_mutex);
+	mutex_unlock(&card->ip_lock);
 	return rc ? rc : count;
 }
 
@@ -633,9 +539,6 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
 	return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV6);
 }
 
@@ -644,9 +547,6 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
 	return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6);
 }
 
@@ -659,9 +559,6 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 
-	if (!card)
-		return -EINVAL;
-
 	return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6);
 }
 
@@ -690,34 +587,34 @@
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
 	struct qeth_ipaddr *ipaddr;
-	char addr_str[40];
 	int str_len = 0;
-	int entry_len; /* length of 1 entry string, differs between v4 and v6 */
 	int i;
 
-	if (!card)
-		return -EINVAL;
-
-	entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
-	entry_len += 2; /* \n + terminator */
 	mutex_lock(&card->ip_lock);
 	hash_for_each(card->ip_htable, i, ipaddr, hnode) {
+		char addr_str[40];
+		int entry_len;
+
 		if (ipaddr->proto != proto || ipaddr->type != type)
 			continue;
-		/* String must not be longer than PAGE_SIZE. So we check if
-		 * string length gets near PAGE_SIZE. Then we can savely display
-		 * the next IPv6 address (worst case, compared to IPv4) */
-		if ((PAGE_SIZE - str_len) <= entry_len)
+
+		entry_len = qeth_l3_ipaddr_to_string(proto, (u8 *)&ipaddr->u,
+						     addr_str);
+		if (entry_len < 0)
+			continue;
+
+		/* Enough room to format %addr\n into null terminated page? */
+		if (entry_len + 1 > PAGE_SIZE - str_len - 1)
 			break;
-		qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
-			addr_str);
-		str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
-				    addr_str);
+
+		entry_len = scnprintf(buf, PAGE_SIZE - str_len, "%s\n",
+				      addr_str);
+		str_len += entry_len;
+		buf += entry_len;
 	}
 	mutex_unlock(&card->ip_lock);
-	str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
 
-	return str_len;
+	return str_len ? str_len : scnprintf(buf, PAGE_SIZE, "\n");
 }
 
 static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
@@ -728,69 +625,34 @@
 				       QETH_IP_TYPE_VIPA);
 }
 
-static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto,
-		 u8 *addr)
+static ssize_t qeth_l3_vipa_store(struct device *dev, const char *buf, bool add,
+				  size_t count, enum qeth_prot_versions proto)
 {
-	if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count,
-			struct qeth_card *card, enum qeth_prot_versions proto)
-{
+	struct qeth_card *card = dev_get_drvdata(dev);
 	u8 addr[16] = {0, };
 	int rc;
 
-	mutex_lock(&card->conf_mutex);
-	rc = qeth_l3_parse_vipae(buf, proto, addr);
+	rc = qeth_l3_string_to_ipaddr(buf, proto, addr);
 	if (!rc)
-		rc = qeth_l3_modify_rxip_vipa(card, true, addr,
+		rc = qeth_l3_modify_rxip_vipa(card, add, addr,
 					      QETH_IP_TYPE_VIPA, proto);
-	mutex_unlock(&card->conf_mutex);
 	return rc ? rc : count;
 }
 
 static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
+	return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV4);
 }
 
 static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
 			qeth_l3_dev_vipa_add4_show,
 			qeth_l3_dev_vipa_add4_store);
 
-static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
-			 struct qeth_card *card, enum qeth_prot_versions proto)
-{
-	u8 addr[16];
-	int rc;
-
-	mutex_lock(&card->conf_mutex);
-	rc = qeth_l3_parse_vipae(buf, proto, addr);
-	if (!rc)
-		rc = qeth_l3_modify_rxip_vipa(card, false, addr,
-					      QETH_IP_TYPE_VIPA, proto);
-	mutex_unlock(&card->conf_mutex);
-	return rc ? rc : count;
-}
-
 static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
+	return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV4);
 }
 
 static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
@@ -807,12 +669,7 @@
 static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
+	return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV6);
 }
 
 static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
@@ -822,12 +679,7 @@
 static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
+	return qeth_l3_vipa_store(dev, buf, false, count, QETH_PROT_IPV6);
 }
 
 static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL,
@@ -880,60 +732,34 @@
 	return 0;
 }
 
-static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count,
-			struct qeth_card *card, enum qeth_prot_versions proto)
+static ssize_t qeth_l3_rxip_store(struct device *dev, const char *buf, bool add,
+				  size_t count, enum qeth_prot_versions proto)
 {
+	struct qeth_card *card = dev_get_drvdata(dev);
 	u8 addr[16] = {0, };
 	int rc;
 
-	mutex_lock(&card->conf_mutex);
 	rc = qeth_l3_parse_rxipe(buf, proto, addr);
 	if (!rc)
-		rc = qeth_l3_modify_rxip_vipa(card, true, addr,
+		rc = qeth_l3_modify_rxip_vipa(card, add, addr,
 					      QETH_IP_TYPE_RXIP, proto);
-	mutex_unlock(&card->conf_mutex);
 	return rc ? rc : count;
 }
 
 static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
+	return qeth_l3_rxip_store(dev, buf, true, count, QETH_PROT_IPV4);
 }
 
 static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
 			qeth_l3_dev_rxip_add4_show,
 			qeth_l3_dev_rxip_add4_store);
 
-static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
-			struct qeth_card *card, enum qeth_prot_versions proto)
-{
-	u8 addr[16];
-	int rc;
-
-	mutex_lock(&card->conf_mutex);
-	rc = qeth_l3_parse_rxipe(buf, proto, addr);
-	if (!rc)
-		rc = qeth_l3_modify_rxip_vipa(card, false, addr,
-					      QETH_IP_TYPE_RXIP, proto);
-	mutex_unlock(&card->conf_mutex);
-	return rc ? rc : count;
-}
-
 static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
+	return qeth_l3_rxip_store(dev, buf, false, count, QETH_PROT_IPV4);
 }
 
 static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
@@ -950,12 +776,7 @@
 static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
+	return qeth_l3_rxip_store(dev, buf, true, count, QETH_PROT_IPV6);
 }
 
 static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
@@ -965,12 +786,7 @@
 static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
+	return qeth_l3_rxip_store(dev, buf, false, count, QETH_PROT_IPV6);
 }
 
 static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL,
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 066b5c3..c84ec2f 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -29,12 +29,9 @@
 MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
 
 static struct iucv_path *smsg_path;
-/* dummy device used as trigger for PM functions */
-static struct device *smsg_dev;
 
 static DEFINE_SPINLOCK(smsg_list_lock);
 static LIST_HEAD(smsg_list);
-static int iucv_path_connected;
 
 static int smsg_path_pending(struct iucv_path *, u8 *, u8 *);
 static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
@@ -124,60 +121,15 @@
 	kfree(cb);
 }
 
-static int smsg_pm_freeze(struct device *dev)
-{
-#ifdef CONFIG_PM_DEBUG
-	printk(KERN_WARNING "smsg_pm_freeze\n");
-#endif
-	if (smsg_path && iucv_path_connected) {
-		iucv_path_sever(smsg_path, NULL);
-		iucv_path_connected = 0;
-	}
-	return 0;
-}
-
-static int smsg_pm_restore_thaw(struct device *dev)
-{
-	int rc;
-
-#ifdef CONFIG_PM_DEBUG
-	printk(KERN_WARNING "smsg_pm_restore_thaw\n");
-#endif
-	if (smsg_path && !iucv_path_connected) {
-		memset(smsg_path, 0, sizeof(*smsg_path));
-		smsg_path->msglim = 255;
-		smsg_path->flags = 0;
-		rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG    ",
-				       NULL, NULL, NULL);
-#ifdef CONFIG_PM_DEBUG
-		if (rc)
-			printk(KERN_ERR
-			       "iucv_path_connect returned with rc %i\n", rc);
-#endif
-		if (!rc)
-			iucv_path_connected = 1;
-		cpcmd("SET SMSG IUCV", NULL, 0, NULL);
-	}
-	return 0;
-}
-
-static const struct dev_pm_ops smsg_pm_ops = {
-	.freeze = smsg_pm_freeze,
-	.thaw = smsg_pm_restore_thaw,
-	.restore = smsg_pm_restore_thaw,
-};
-
 static struct device_driver smsg_driver = {
 	.owner = THIS_MODULE,
 	.name = SMSGIUCV_DRV_NAME,
 	.bus  = &iucv_bus,
-	.pm = &smsg_pm_ops,
 };
 
 static void __exit smsg_exit(void)
 {
 	cpcmd("SET SMSG OFF", NULL, 0, NULL);
-	device_unregister(smsg_dev);
 	iucv_unregister(&smsg_handler, 1);
 	driver_unregister(&smsg_driver);
 }
@@ -205,27 +157,10 @@
 			       NULL, NULL, NULL);
 	if (rc)
 		goto out_free_path;
-	else
-		iucv_path_connected = 1;
-	smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
-	if (!smsg_dev) {
-		rc = -ENOMEM;
-		goto out_free_path;
-	}
-	dev_set_name(smsg_dev, "smsg_iucv");
-	smsg_dev->bus = &iucv_bus;
-	smsg_dev->parent = iucv_root;
-	smsg_dev->release = (void (*)(struct device *))kfree;
-	smsg_dev->driver = &smsg_driver;
-	rc = device_register(smsg_dev);
-	if (rc)
-		goto out_put;
 
 	cpcmd("SET SMSG IUCV", NULL, 0, NULL);
 	return 0;
 
-out_put:
-	put_device(smsg_dev);
 out_free_path:
 	iucv_path_free(smsg_path);
 	smsg_path = NULL;
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index 9dda431..352056e 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -5,6 +5,6 @@
 
 zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_dbf.o zfcp_erp.o \
 	     zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \
-	     zfcp_unit.o
+	     zfcp_unit.o zfcp_diag.o
 
 obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index e390f8c..18b713a 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -4,7 +4,7 @@
  *
  * Module interface and handling of zfcp data structures.
  *
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2020
  */
 
 /*
@@ -25,6 +25,7 @@
  *            Martin Petermann
  *            Sven Schuetz
  *            Steffen Maier
+ *	      Benjamin Block
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -36,6 +37,7 @@
 #include "zfcp_ext.h"
 #include "zfcp_fc.h"
 #include "zfcp_reqlist.h"
+#include "zfcp_diag.h"
 
 #define ZFCP_BUS_ID_SIZE	20
 
@@ -356,6 +358,9 @@
 
 	adapter->erp_action.adapter = adapter;
 
+	if (zfcp_diag_adapter_setup(adapter))
+		goto failed;
+
 	if (zfcp_qdio_setup(adapter))
 		goto failed;
 
@@ -402,13 +407,15 @@
 			       &zfcp_sysfs_adapter_attrs))
 		goto failed;
 
+	if (zfcp_diag_sysfs_setup(adapter))
+		goto failed;
+
 	/* report size limit per scatter-gather segment */
 	adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
 
 	adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
 
-	if (!zfcp_scsi_adapter_register(adapter))
-		return adapter;
+	return adapter;
 
 failed:
 	zfcp_adapter_unregister(adapter);
@@ -426,6 +433,7 @@
 
 	zfcp_fc_wka_ports_force_offline(adapter->gs);
 	zfcp_scsi_adapter_unregister(adapter);
+	zfcp_diag_sysfs_destroy(adapter);
 	sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
 
 	zfcp_erp_thread_kill(adapter);
@@ -449,6 +457,7 @@
 	dev_set_drvdata(&adapter->ccw_device->dev, NULL);
 	zfcp_fc_gs_destroy(adapter);
 	zfcp_free_low_mem_buffers(adapter);
+	zfcp_diag_adapter_free(adapter);
 	kfree(adapter->req_list);
 	kfree(adapter->fc_stats);
 	kfree(adapter->stats_reset_data);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 49eda14..d9fd0a4 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -124,13 +124,12 @@
 		return;
 
 	write_lock_irq(&adapter->port_list_lock);
-	list_for_each_entry_safe(port, p, &adapter->port_list, list) {
+	list_for_each_entry(port, &adapter->port_list, list) {
 		write_lock(&port->unit_list_lock);
-		list_for_each_entry_safe(unit, u, &port->unit_list, list)
-			list_move(&unit->list, &unit_remove_lh);
+		list_splice_init(&port->unit_list, &unit_remove_lh);
 		write_unlock(&port->unit_list_lock);
-		list_move(&port->list, &port_remove_lh);
 	}
+	list_splice_init(&adapter->port_list, &port_remove_lh);
 	write_unlock_irq(&adapter->port_list_lock);
 	zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
 
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 1234294..673e42d 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -4,7 +4,7 @@
  *
  * Debug traces for zfcp.
  *
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -104,6 +104,48 @@
 }
 
 /**
+ * zfcp_dbf_hba_fsf_fces - trace event for fsf responses related to
+ *			   FC Endpoint Security (FCES)
+ * @tag: tag indicating which kind of FC Endpoint Security event has occurred
+ * @req: request for which a response was received
+ * @wwpn: remote port or ZFCP_DBF_INVALID_WWPN
+ * @fc_security_old: old FC Endpoint Security of FCP device or connection
+ * @fc_security_new: new FC Endpoint Security of FCP device or connection
+ */
+void zfcp_dbf_hba_fsf_fces(char *tag, const struct zfcp_fsf_req *req, u64 wwpn,
+			   u32 fc_security_old, u32 fc_security_new)
+{
+	struct zfcp_dbf *dbf = req->adapter->dbf;
+	struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
+	struct fsf_qtcb_header *q_head = &req->qtcb->header;
+	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+	static int const level = 3;
+	unsigned long flags;
+
+	if (unlikely(!debug_level_enabled(dbf->hba, level)))
+		return;
+
+	spin_lock_irqsave(&dbf->hba_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->id = ZFCP_DBF_HBA_FCES;
+	rec->fsf_req_id = req->req_id;
+	rec->fsf_req_status = req->status;
+	rec->fsf_cmd = q_head->fsf_command;
+	rec->fsf_seq_no = q_pref->req_seq_no;
+	rec->u.fces.req_issued = req->issued;
+	rec->u.fces.fsf_status = q_head->fsf_status;
+	rec->u.fces.port_handle = q_head->port_handle;
+	rec->u.fces.wwpn = wwpn;
+	rec->u.fces.fc_security_old = fc_security_old;
+	rec->u.fces.fc_security_new = fc_security_new;
+
+	debug_event(dbf->hba, level, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->hba_lock, flags);
+}
+
+/**
  * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
  * @tag: tag indicating which kind of unsolicited status has been received
  * @req: request providing the unsolicited status
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 900c779..4d1435c 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -3,7 +3,7 @@
  * zfcp device driver
  * debug feature declarations
  *
- * Copyright IBM Corp. 2008, 2017
+ * Copyright IBM Corp. 2008, 2020
  */
 
 #ifndef ZFCP_DBF_H
@@ -16,6 +16,7 @@
 
 #define ZFCP_DBF_TAG_LEN       7
 
+#define ZFCP_DBF_INVALID_WWPN	0x0000000000000000ull
 #define ZFCP_DBF_INVALID_LUN	0xFFFFFFFFFFFFFFFFull
 
 enum zfcp_dbf_pseudo_erp_act_type {
@@ -158,17 +159,38 @@
 } __packed;
 
 /**
+ * struct zfcp_dbf_hba_fces - trace record for FC Endpoint Security
+ * @req_issued: timestamp when request was issued
+ * @fsf_status: fsf status
+ * @port_handle: handle for port
+ * @wwpn: remote FC port WWPN
+ * @fc_security_old: old FC Endpoint Security
+ * @fc_security_new: new FC Endpoint Security
+ *
+ */
+struct zfcp_dbf_hba_fces {
+	u64 req_issued;
+	u32 fsf_status;
+	u32 port_handle;
+	u64 wwpn;
+	u32 fc_security_old;
+	u32 fc_security_new;
+} __packed;
+
+/**
  * enum zfcp_dbf_hba_id - HBA trace record identifier
  * @ZFCP_DBF_HBA_RES: response trace record
  * @ZFCP_DBF_HBA_USS: unsolicited status trace record
  * @ZFCP_DBF_HBA_BIT: bit error trace record
  * @ZFCP_DBF_HBA_BASIC: basic adapter event, only trace tag, no other data
+ * @ZFCP_DBF_HBA_FCES: FC Endpoint Security trace record
  */
 enum zfcp_dbf_hba_id {
 	ZFCP_DBF_HBA_RES	= 1,
 	ZFCP_DBF_HBA_USS	= 2,
 	ZFCP_DBF_HBA_BIT	= 3,
 	ZFCP_DBF_HBA_BASIC	= 4,
+	ZFCP_DBF_HBA_FCES	= 5,
 };
 
 /**
@@ -181,9 +203,10 @@
  * @fsf_seq_no: fsf sequence number
  * @pl_len: length of payload stored as zfcp_dbf_pay
  * @u: record type specific data
- * @u.res: data for fsf responses
- * @u.uss: data for unsolicited status buffer
- * @u.be:  data for bit error unsolicited status buffer
+ * @u.res:  data for fsf responses
+ * @u.uss:  data for unsolicited status buffer
+ * @u.be:   data for bit error unsolicited status buffer
+ * @u.fces: data for FC Endpoint Security
  */
 struct zfcp_dbf_hba {
 	u8 id;
@@ -197,6 +220,7 @@
 		struct zfcp_dbf_hba_res res;
 		struct zfcp_dbf_hba_uss uss;
 		struct fsf_bit_error_payload be;
+		struct zfcp_dbf_hba_fces fces;
 	} u;
 } __packed;
 
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 87d2f47..da8a5ce 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -4,7 +4,7 @@
  *
  * Global definitions for the zfcp device driver.
  *
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2020
  */
 
 #ifndef ZFCP_DEF_H
@@ -86,6 +86,7 @@
 #define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED       0x00000080
 #define ZFCP_STATUS_FSFREQ_TMFUNCFAILED         0x00000200
 #define ZFCP_STATUS_FSFREQ_DISMISSED            0x00001000
+#define ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE	0x00020000
 
 /************************* STRUCTURE DEFINITIONS *****************************/
 
@@ -157,6 +158,8 @@
 	u32			adapter_features;  /* FCP channel features */
 	u32			connection_features; /* host connection features */
         u32			hardware_version;  /* of FCP channel */
+	u32			fc_security_algorithms; /* of FCP channel */
+	u32			fc_security_algorithms_old; /* of FCP channel */
 	u16			timer_ticks;       /* time int for a tick */
 	struct Scsi_Host	*scsi_host;	   /* Pointer to mid-layer */
 	struct list_head	port_list;	   /* remote port list */
@@ -197,6 +200,7 @@
 	struct device_dma_parameters dma_parms;
 	struct zfcp_fc_events events;
 	unsigned long		next_port_scan;
+	struct zfcp_diag_adapter	*diagnostics;
 };
 
 struct zfcp_port {
@@ -216,6 +220,8 @@
         atomic_t               erp_counter;
 	u32                    maxframe_size;
 	u32                    supported_classes;
+	u32                    connection_info;
+	u32                    connection_info_old;
 	struct work_struct     gid_pn_work;
 	struct work_struct     test_link_work;
 	struct work_struct     rport_work;
diff --git a/drivers/s390/scsi/zfcp_diag.c b/drivers/s390/scsi/zfcp_diag.c
new file mode 100644
index 0000000..67a8f4e
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_diag.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * zfcp device driver
+ *
+ * Functions to handle diagnostics.
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/kernfs.h>
+#include <linux/sysfs.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "zfcp_diag.h"
+#include "zfcp_ext.h"
+#include "zfcp_def.h"
+
+static DECLARE_WAIT_QUEUE_HEAD(__zfcp_diag_publish_wait);
+
+/**
+ * zfcp_diag_adapter_setup() - Setup storage for adapter diagnostics.
+ * @adapter: the adapter to setup diagnostics for.
+ *
+ * Creates the data-structures to store the diagnostics for an adapter. This
+ * overwrites whatever was stored before at &zfcp_adapter->diagnostics!
+ *
+ * Return:
+ * * 0	     - Everyting is OK
+ * * -ENOMEM - Could not allocate all/parts of the data-structures;
+ *	       &zfcp_adapter->diagnostics remains unchanged
+ */
+int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter)
+{
+	struct zfcp_diag_adapter *diag;
+	struct zfcp_diag_header *hdr;
+
+	diag = kzalloc(sizeof(*diag), GFP_KERNEL);
+	if (diag == NULL)
+		return -ENOMEM;
+
+	diag->max_age = (5 * 1000); /* default value: 5 s */
+
+	/* setup header for port_data */
+	hdr = &diag->port_data.header;
+
+	spin_lock_init(&hdr->access_lock);
+	hdr->buffer = &diag->port_data.data;
+	hdr->buffer_size = sizeof(diag->port_data.data);
+	/* set the timestamp so that the first test on age will always fail */
+	hdr->timestamp = jiffies - msecs_to_jiffies(diag->max_age);
+
+	/* setup header for config_data */
+	hdr = &diag->config_data.header;
+
+	spin_lock_init(&hdr->access_lock);
+	hdr->buffer = &diag->config_data.data;
+	hdr->buffer_size = sizeof(diag->config_data.data);
+	/* set the timestamp so that the first test on age will always fail */
+	hdr->timestamp = jiffies - msecs_to_jiffies(diag->max_age);
+
+	adapter->diagnostics = diag;
+	return 0;
+}
+
+/**
+ * zfcp_diag_adapter_free() - Frees all adapter diagnostics allocations.
+ * @adapter: the adapter whose diagnostic structures should be freed.
+ *
+ * Frees all data-structures in the given adapter that store diagnostics
+ * information. Can savely be called with partially setup diagnostics.
+ */
+void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter)
+{
+	kfree(adapter->diagnostics);
+	adapter->diagnostics = NULL;
+}
+
+/**
+ * zfcp_diag_sysfs_setup() - Setup the sysfs-group for adapter-diagnostics.
+ * @adapter: target adapter to which the group should be added.
+ *
+ * Return: 0 on success; Something else otherwise (see sysfs_create_group()).
+ */
+int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter)
+{
+	int rc = sysfs_create_group(&adapter->ccw_device->dev.kobj,
+				    &zfcp_sysfs_diag_attr_group);
+	if (rc == 0)
+		adapter->diagnostics->sysfs_established = 1;
+
+	return rc;
+}
+
+/**
+ * zfcp_diag_sysfs_destroy() - Remove the sysfs-group for adapter-diagnostics.
+ * @adapter: target adapter from which the group should be removed.
+ */
+void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter)
+{
+	if (adapter->diagnostics == NULL ||
+	    !adapter->diagnostics->sysfs_established)
+		return;
+
+	/*
+	 * We need this state-handling so we can prevent warnings being printed
+	 * on the kernel-console in case we have to abort a halfway done
+	 * zfcp_adapter_enqueue(), in which the sysfs-group was not yet
+	 * established. sysfs_remove_group() does this checking as well, but
+	 * still prints a warning in case we try to remove a group that has not
+	 * been established before
+	 */
+	adapter->diagnostics->sysfs_established = 0;
+	sysfs_remove_group(&adapter->ccw_device->dev.kobj,
+			   &zfcp_sysfs_diag_attr_group);
+}
+
+
+/**
+ * zfcp_diag_update_xdata() - Update a diagnostics buffer.
+ * @hdr: the meta data to update.
+ * @data: data to use for the update.
+ * @incomplete: flag stating whether the data in @data is incomplete.
+ */
+void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr,
+			    const void *const data, const bool incomplete)
+{
+	const unsigned long capture_timestamp = jiffies;
+	unsigned long flags;
+
+	spin_lock_irqsave(&hdr->access_lock, flags);
+
+	/* make sure we never go into the past with an update */
+	if (!time_after_eq(capture_timestamp, hdr->timestamp))
+		goto out;
+
+	hdr->timestamp = capture_timestamp;
+	hdr->incomplete = incomplete;
+	memcpy(hdr->buffer, data, hdr->buffer_size);
+out:
+	spin_unlock_irqrestore(&hdr->access_lock, flags);
+}
+
+/**
+ * zfcp_diag_update_port_data_buffer() - Implementation of
+ *					 &typedef zfcp_diag_update_buffer_func
+ *					 to collect and update Port Data.
+ * @adapter: Adapter to collect Port Data from.
+ *
+ * This call is SYNCHRONOUS ! It blocks till the respective command has
+ * finished completely, or has failed in some way.
+ *
+ * Return:
+ * * 0		- Successfully retrieved new Diagnostics and Updated the buffer;
+ *		  this also includes cases where data was retrieved, but
+ *		  incomplete; you'll have to check the flag ``incomplete``
+ *		  of &struct zfcp_diag_header.
+ * * see zfcp_fsf_exchange_port_data_sync() for possible error-codes (
+ *   excluding -EAGAIN)
+ */
+int zfcp_diag_update_port_data_buffer(struct zfcp_adapter *const adapter)
+{
+	int rc;
+
+	rc = zfcp_fsf_exchange_port_data_sync(adapter->qdio, NULL);
+	if (rc == -EAGAIN)
+		rc = 0; /* signaling incomplete via struct zfcp_diag_header */
+
+	/* buffer-data was updated in zfcp_fsf_exchange_port_data_handler() */
+
+	return rc;
+}
+
+/**
+ * zfcp_diag_update_config_data_buffer() - Implementation of
+ *					   &typedef zfcp_diag_update_buffer_func
+ *					   to collect and update Config Data.
+ * @adapter: Adapter to collect Config Data from.
+ *
+ * This call is SYNCHRONOUS ! It blocks till the respective command has
+ * finished completely, or has failed in some way.
+ *
+ * Return:
+ * * 0		- Successfully retrieved new Diagnostics and Updated the buffer;
+ *		  this also includes cases where data was retrieved, but
+ *		  incomplete; you'll have to check the flag ``incomplete``
+ *		  of &struct zfcp_diag_header.
+ * * see zfcp_fsf_exchange_config_data_sync() for possible error-codes (
+ *   excluding -EAGAIN)
+ */
+int zfcp_diag_update_config_data_buffer(struct zfcp_adapter *const adapter)
+{
+	int rc;
+
+	rc = zfcp_fsf_exchange_config_data_sync(adapter->qdio, NULL);
+	if (rc == -EAGAIN)
+		rc = 0; /* signaling incomplete via struct zfcp_diag_header */
+
+	/* buffer-data was updated in zfcp_fsf_exchange_config_data_handler() */
+
+	return rc;
+}
+
+static int __zfcp_diag_update_buffer(struct zfcp_adapter *const adapter,
+				     struct zfcp_diag_header *const hdr,
+				     zfcp_diag_update_buffer_func buffer_update,
+				     unsigned long *const flags)
+	__must_hold(hdr->access_lock)
+{
+	int rc;
+
+	if (hdr->updating == 1) {
+		rc = wait_event_interruptible_lock_irq(__zfcp_diag_publish_wait,
+						       hdr->updating == 0,
+						       hdr->access_lock);
+		rc = (rc == 0 ? -EAGAIN : -EINTR);
+	} else {
+		hdr->updating = 1;
+		spin_unlock_irqrestore(&hdr->access_lock, *flags);
+
+		/* unlocked, because update function sleeps */
+		rc = buffer_update(adapter);
+
+		spin_lock_irqsave(&hdr->access_lock, *flags);
+		hdr->updating = 0;
+
+		/*
+		 * every thread waiting here went via an interruptible wait,
+		 * so its fine to only wake those
+		 */
+		wake_up_interruptible_all(&__zfcp_diag_publish_wait);
+	}
+
+	return rc;
+}
+
+static bool
+__zfcp_diag_test_buffer_age_isfresh(const struct zfcp_diag_adapter *const diag,
+				    const struct zfcp_diag_header *const hdr)
+	__must_hold(hdr->access_lock)
+{
+	const unsigned long now = jiffies;
+
+	/*
+	 * Should not happen (data is from the future).. if it does, still
+	 * signal that it needs refresh
+	 */
+	if (!time_after_eq(now, hdr->timestamp))
+		return false;
+
+	if (jiffies_to_msecs(now - hdr->timestamp) >= diag->max_age)
+		return false;
+
+	return true;
+}
+
+/**
+ * zfcp_diag_update_buffer_limited() - Collect diagnostics and update a
+ *				       diagnostics buffer rate limited.
+ * @adapter: Adapter to collect the diagnostics from.
+ * @hdr: buffer-header for which to update with the collected diagnostics.
+ * @buffer_update: Specific implementation for collecting and updating.
+ *
+ * This function will cause an update of the given @hdr by calling the also
+ * given @buffer_update function. If called by multiple sources at the same
+ * time, it will synchornize the update by only allowing one source to call
+ * @buffer_update and the others to wait for that source to complete instead
+ * (the wait is interruptible).
+ *
+ * Additionally this version is rate-limited and will only exit if either the
+ * buffer is fresh enough (within the limit) - it will do nothing if the buffer
+ * is fresh enough to begin with -, or if the source/thread that started this
+ * update is the one that made the update (to prevent endless loops).
+ *
+ * Return:
+ * * 0		- If the update was successfully published and/or the buffer is
+ *		  fresh enough
+ * * -EINTR	- If the thread went into the wait-state and was interrupted
+ * * whatever @buffer_update returns
+ */
+int zfcp_diag_update_buffer_limited(struct zfcp_adapter *const adapter,
+				    struct zfcp_diag_header *const hdr,
+				    zfcp_diag_update_buffer_func buffer_update)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&hdr->access_lock, flags);
+
+	for (rc = 0;
+	     !__zfcp_diag_test_buffer_age_isfresh(adapter->diagnostics, hdr);
+	     rc = 0) {
+		rc = __zfcp_diag_update_buffer(adapter, hdr, buffer_update,
+					       &flags);
+		if (rc != -EAGAIN)
+			break;
+	}
+
+	spin_unlock_irqrestore(&hdr->access_lock, flags);
+
+	return rc;
+}
diff --git a/drivers/s390/scsi/zfcp_diag.h b/drivers/s390/scsi/zfcp_diag.h
new file mode 100644
index 0000000..3852367
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_diag.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * zfcp device driver
+ *
+ * Definitions for handling diagnostics in the the zfcp device driver.
+ *
+ * Copyright IBM Corp. 2018, 2020
+ */
+
+#ifndef ZFCP_DIAG_H
+#define ZFCP_DIAG_H
+
+#include <linux/spinlock.h>
+
+#include "zfcp_fsf.h"
+#include "zfcp_def.h"
+
+/**
+ * struct zfcp_diag_header - general part of a diagnostic buffer.
+ * @access_lock: lock protecting all the data in this buffer.
+ * @updating: flag showing that an update for this buffer is currently running.
+ * @incomplete: flag showing that the data in @buffer is incomplete.
+ * @timestamp: time in jiffies when the data of this buffer was last captured.
+ * @buffer: implementation-depending data of this buffer
+ * @buffer_size: size of @buffer
+ */
+struct zfcp_diag_header {
+	spinlock_t	access_lock;
+
+	/* Flags */
+	u64		updating	:1;
+	u64		incomplete	:1;
+
+	unsigned long	timestamp;
+
+	void		*buffer;
+	size_t		buffer_size;
+};
+
+/**
+ * struct zfcp_diag_adapter - central storage for all diagnostics concerning an
+ *			      adapter.
+ * @sysfs_established: flag showing that the associated sysfs-group was created
+ *		       during run of zfcp_adapter_enqueue().
+ * @max_age: maximum age of data in diagnostic buffers before they need to be
+ *	     refreshed (in ms).
+ * @port_data: data retrieved using exchange port data.
+ * @port_data.header: header with metadata for the cache in @port_data.data.
+ * @port_data.data: cached QTCB Bottom of command exchange port data.
+ * @config_data: data retrieved using exchange config data.
+ * @config_data.header: header with metadata for the cache in @config_data.data.
+ * @config_data.data: cached QTCB Bottom of command exchange config data.
+ */
+struct zfcp_diag_adapter {
+	u64	sysfs_established	:1;
+
+	unsigned long	max_age;
+
+	struct zfcp_diag_adapter_port_data {
+		struct zfcp_diag_header		header;
+		struct fsf_qtcb_bottom_port	data;
+	} port_data;
+	struct zfcp_diag_adapter_config_data {
+		struct zfcp_diag_header		header;
+		struct fsf_qtcb_bottom_config	data;
+	} config_data;
+};
+
+int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter);
+void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter);
+
+int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter);
+void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter);
+
+void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr,
+			    const void *const data, const bool incomplete);
+
+/*
+ * Function-Type used in zfcp_diag_update_buffer_limited() for the function
+ * that does the buffer-implementation dependent work.
+ */
+typedef int (*zfcp_diag_update_buffer_func)(struct zfcp_adapter *const adapter);
+
+int zfcp_diag_update_config_data_buffer(struct zfcp_adapter *const adapter);
+int zfcp_diag_update_port_data_buffer(struct zfcp_adapter *const adapter);
+int zfcp_diag_update_buffer_limited(struct zfcp_adapter *const adapter,
+				    struct zfcp_diag_header *const hdr,
+				    zfcp_diag_update_buffer_func buffer_update);
+
+/**
+ * zfcp_diag_support_sfp() - Return %true if the @adapter supports reporting
+ *			     SFP Data.
+ * @adapter: adapter to test the availability of SFP Data reporting for.
+ */
+static inline bool
+zfcp_diag_support_sfp(const struct zfcp_adapter *const adapter)
+{
+	return !!(adapter->adapter_features & FSF_FEATURE_REPORT_SFP_DATA);
+}
+
+#endif /* ZFCP_DIAG_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 08dc2ef..78d52a4 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -4,7 +4,7 @@
  *
  * Error Recovery Procedures (ERP).
  *
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2020
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -14,6 +14,7 @@
 #include <linux/bug.h>
 #include "zfcp_ext.h"
 #include "zfcp_reqlist.h"
+#include "zfcp_diag.h"
 
 #define ZFCP_MAX_ERPS                   3
 
@@ -67,7 +68,7 @@
 {
 	struct zfcp_adapter *adapter = act->adapter;
 
-	list_move(&act->list, &act->adapter->erp_ready_head);
+	list_move(&act->list, &adapter->erp_ready_head);
 	zfcp_dbf_rec_run("erardy1", act);
 	wake_up(&adapter->erp_ready_wq);
 	zfcp_dbf_rec_run("erardy2", act);
@@ -174,29 +175,29 @@
 			return 0;
 		p_status = atomic_read(&port->status);
 		if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
-		      p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
+		    p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
 			return 0;
 		if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
 			need = ZFCP_ERP_ACTION_REOPEN_PORT;
-		/* fall through */
+		fallthrough;
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
 		p_status = atomic_read(&port->status);
 		if (!(p_status & ZFCP_STATUS_COMMON_OPEN))
 			need = ZFCP_ERP_ACTION_REOPEN_PORT;
-		/* fall through */
+		fallthrough;
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 		p_status = atomic_read(&port->status);
 		if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE)
 			return 0;
 		a_status = atomic_read(&adapter->status);
 		if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) ||
-		      a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
+		    a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
 			return 0;
 		if (p_status & ZFCP_STATUS_COMMON_NOESC)
 			return need;
 		if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED))
 			need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
-		/* fall through */
+		fallthrough;
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
 		a_status = atomic_read(&adapter->status);
 		if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE)
@@ -777,10 +778,14 @@
 	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
 		return ZFCP_ERP_FAILED;
 
+	return ZFCP_ERP_SUCCEEDED;
+}
+
+static void
+zfcp_erp_adapter_strategy_open_ptp_port(struct zfcp_adapter *const adapter)
+{
 	if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
 		zfcp_erp_enqueue_ptp_port(adapter);
-
-	return ZFCP_ERP_SUCCEEDED;
 }
 
 static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
@@ -809,6 +814,59 @@
 	return ZFCP_ERP_SUCCEEDED;
 }
 
+static enum zfcp_erp_act_result
+zfcp_erp_adapter_strategy_alloc_shost(struct zfcp_adapter *const adapter)
+{
+	struct zfcp_diag_adapter_config_data *const config_data =
+		&adapter->diagnostics->config_data;
+	struct zfcp_diag_adapter_port_data *const port_data =
+		&adapter->diagnostics->port_data;
+	unsigned long flags;
+	int rc;
+
+	rc = zfcp_scsi_adapter_register(adapter);
+	if (rc == -EEXIST)
+		return ZFCP_ERP_SUCCEEDED;
+	else if (rc)
+		return ZFCP_ERP_FAILED;
+
+	/*
+	 * We allocated the shost for the first time. Before it was NULL,
+	 * and so we deferred all updates in the xconf- and xport-data
+	 * handlers. We need to make up for that now, and make all the updates
+	 * that would have been done before.
+	 *
+	 * We can be sure that xconf- and xport-data succeeded, because
+	 * otherwise this function is not called. But they might have been
+	 * incomplete.
+	 */
+
+	spin_lock_irqsave(&config_data->header.access_lock, flags);
+	zfcp_scsi_shost_update_config_data(adapter, &config_data->data,
+					   !!config_data->header.incomplete);
+	spin_unlock_irqrestore(&config_data->header.access_lock, flags);
+
+	if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+		spin_lock_irqsave(&port_data->header.access_lock, flags);
+		zfcp_scsi_shost_update_port_data(adapter, &port_data->data);
+		spin_unlock_irqrestore(&port_data->header.access_lock, flags);
+	}
+
+	/*
+	 * There is a remote possibility that the 'Exchange Port Data' request
+	 * reports a different connectivity status than 'Exchange Config Data'.
+	 * But any change to the connectivity status of the local optic that
+	 * happens after the initial xconf request is expected to be reported
+	 * to us, as soon as we post Status Read Buffers to the FCP channel
+	 * firmware after this function. So any resulting inconsistency will
+	 * only be momentary.
+	 */
+	if (config_data->header.incomplete)
+		zfcp_fsf_fc_host_link_down(adapter);
+
+	return ZFCP_ERP_SUCCEEDED;
+}
+
 static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
 	struct zfcp_erp_action *act)
 {
@@ -818,6 +876,12 @@
 	if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
 		return ZFCP_ERP_FAILED;
 
+	if (zfcp_erp_adapter_strategy_alloc_shost(act->adapter) ==
+	    ZFCP_ERP_FAILED)
+		return ZFCP_ERP_FAILED;
+
+	zfcp_erp_adapter_strategy_open_ptp_port(act->adapter);
+
 	if (mempool_resize(act->adapter->pool.sr_data,
 			   act->adapter->stat_read_buf_num))
 		return ZFCP_ERP_FAILED;
@@ -1095,7 +1159,7 @@
 		if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
 			return zfcp_erp_lun_strategy_close(erp_action);
 		/* already closed */
-		/* fall through */
+		fallthrough;
 	case ZFCP_ERP_STEP_LUN_CLOSING:
 		if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
 			return ZFCP_ERP_FAILED;
@@ -1424,7 +1488,7 @@
 		if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
 			if (result == ZFCP_ERP_SUCCEEDED)
 				zfcp_erp_try_rport_unblock(port);
-		/* fall through */
+		fallthrough;
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
 		put_device(&port->dev);
 		break;
@@ -1543,7 +1607,6 @@
 static int zfcp_erp_thread(void *data)
 {
 	struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
-	struct list_head *next;
 	struct zfcp_erp_action *act;
 	unsigned long flags;
 
@@ -1556,12 +1619,11 @@
 			break;
 
 		write_lock_irqsave(&adapter->erp_lock, flags);
-		next = adapter->erp_ready_head.next;
+		act = list_first_entry_or_null(&adapter->erp_ready_head,
+					       struct zfcp_erp_action, list);
 		write_unlock_irqrestore(&adapter->erp_lock, flags);
 
-		if (next != &adapter->erp_ready_head) {
-			act = list_entry(next, struct zfcp_erp_action, list);
-
+		if (act) {
 			/* there is more to come after dismission, no notify */
 			if (zfcp_erp_strategy(act) != ZFCP_ERP_DISMISSED)
 				zfcp_erp_wakeup(adapter);
@@ -1645,6 +1707,13 @@
 		atomic_or(common_mask, &port->status);
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 
+	/*
+	 * if `scsi_host` is missing, xconfig/xport data has never completed
+	 * yet, so we can't access it, but there are also no SDEVs yet
+	 */
+	if (adapter->scsi_host == NULL)
+		return;
+
 	spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
 	__shost_for_each_device(sdev, adapter->scsi_host)
 		atomic_or(common_mask, &sdev_to_zfcp(sdev)->status);
@@ -1682,6 +1751,13 @@
 	}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 
+	/*
+	 * if `scsi_host` is missing, xconfig/xport data has never completed
+	 * yet, so we can't access it, but there are also no SDEVs yet
+	 */
+	if (adapter->scsi_host == NULL)
+		return;
+
 	spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
 	__shost_for_each_device(sdev, adapter->scsi_host) {
 		atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 31e8a72..3ef5d74 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -4,7 +4,7 @@
  *
  * External function declarations.
  *
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
  */
 
 #ifndef ZFCP_EXT_H
@@ -44,6 +44,9 @@
 extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
 extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_fsf_fces(char *tag, const struct zfcp_fsf_req *req,
+				  u64 wwpn, u32 fc_security_old,
+				  u32 fc_security_new);
 extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
 extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
@@ -122,6 +125,7 @@
 extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
 extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *,
 					    struct fsf_qtcb_bottom_port *);
+extern u32 zfcp_fsf_convert_portspeed(u32 fsf_speed);
 extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
 extern int zfcp_fsf_status_read(struct zfcp_qdio *);
 extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
@@ -131,10 +135,18 @@
 			     struct zfcp_fsf_ct_els *, unsigned int);
 extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
 extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
+extern void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter);
 extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
 						   u8 tm_flags);
 extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
 extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
+enum zfcp_fsf_print_fmt {
+	ZFCP_FSF_PRINT_FMT_LIST,
+	ZFCP_FSF_PRINT_FMT_SINGLEITEM,
+};
+extern ssize_t zfcp_fsf_scnprint_fc_security(char *buf, size_t size,
+					     u32 fc_security,
+					     enum zfcp_fsf_print_fmt fmt);
 
 /* zfcp_qdio.c */
 extern int zfcp_qdio_setup(struct zfcp_adapter *);
@@ -143,6 +155,8 @@
 extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
 extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
 				   struct scatterlist *);
+extern void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
+				   const struct zfcp_qdio *const qdio);
 extern int zfcp_qdio_open(struct zfcp_qdio *);
 extern void zfcp_qdio_close(struct zfcp_qdio *);
 extern void zfcp_qdio_siosl(struct zfcp_adapter *);
@@ -159,6 +173,13 @@
 extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
 extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
 extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
+extern void zfcp_scsi_shost_update_config_data(
+	struct zfcp_adapter *const adapter,
+	const struct fsf_qtcb_bottom_config *const bottom,
+	const bool bottom_incomplete);
+extern void zfcp_scsi_shost_update_port_data(
+	struct zfcp_adapter *const adapter,
+	const struct fsf_qtcb_bottom_port *const bottom);
 
 /* zfcp_sysfs.c */
 extern const struct attribute_group *zfcp_unit_attr_groups[];
@@ -167,6 +188,7 @@
 extern struct mutex zfcp_sysfs_port_units_mutex;
 extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
 extern struct device_attribute *zfcp_sysfs_shost_attrs[];
+extern const struct attribute_group zfcp_sysfs_diag_attr_group;
 bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port);
 
 /* zfcp_unit.c */
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index b018b61..511bf8e 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -48,7 +48,7 @@
 {
 	if (!port_scan_backoff)
 		return 0;
-	return get_random_int() % port_scan_backoff;
+	return prandom_u32_max(port_scan_backoff);
 }
 
 static void zfcp_fc_port_scan_time(struct zfcp_adapter *adapter)
@@ -521,6 +521,8 @@
 		goto out;
 	}
 
+	/* re-init to undo drop from zfcp_fc_adisc() */
+	port->d_id = ntoh24(adisc_resp->adisc_port_id);
 	/* port is good, unblock rport without going through erp */
 	zfcp_scsi_schedule_rport_register(port);
  out:
@@ -534,6 +536,7 @@
 	struct zfcp_fc_req *fc_req;
 	struct zfcp_adapter *adapter = port->adapter;
 	struct Scsi_Host *shost = adapter->scsi_host;
+	u32 d_id;
 	int ret;
 
 	fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
@@ -558,7 +561,15 @@
 	fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
 	hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
 
-	ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
+	d_id = port->d_id; /* remember as destination for send els below */
+	/*
+	 * Force fresh GID_PN lookup on next port recovery.
+	 * Must happen after request setup and before sending request,
+	 * to prevent race with port->d_id re-init in zfcp_fc_adisc_handler().
+	 */
+	port->d_id = 0;
+
+	ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els,
 				ZFCP_FC_CTELS_TMO);
 	if (ret)
 		kmem_cache_free(zfcp_fc_req_cache, fc_req);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 5c652de..6cb963a 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -4,13 +4,14 @@
  *
  * Implementation of FSF commands.
  *
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
  */
 
 #define KMSG_COMPONENT "zfcp"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include <linux/blktrace_api.h>
+#include <linux/jiffies.h>
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <scsi/fc/fc_els.h>
@@ -19,6 +20,7 @@
 #include "zfcp_dbf.h"
 #include "zfcp_qdio.h"
 #include "zfcp_reqlist.h"
+#include "zfcp_diag.h"
 
 /* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
 #define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
@@ -118,6 +120,27 @@
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
 
+void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
+{
+	struct Scsi_Host *shost = adapter->scsi_host;
+
+	adapter->hydra_version = 0;
+	adapter->peer_wwpn = 0;
+	adapter->peer_wwnn = 0;
+	adapter->peer_d_id = 0;
+
+	/* if there is no shost yet, we have nothing to zero-out */
+	if (shost == NULL)
+		return;
+
+	fc_host_port_id(shost) = 0;
+	fc_host_fabric_name(shost) = 0;
+	fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+	fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+	snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 0);
+	memset(fc_host_active_fc4s(shost), 0, FC_FC4_LIST_SIZE);
+}
+
 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
 					 struct fsf_link_down_info *link_down)
 {
@@ -130,6 +153,8 @@
 
 	zfcp_scsi_schedule_rports_block(adapter);
 
+	zfcp_fsf_fc_host_link_down(adapter);
+
 	if (!link_down)
 		goto out;
 
@@ -401,9 +426,14 @@
  * or it has been dismissed due to a queue shutdown, this function
  * is called to process the completion status and trigger further
  * events related to the FSF request.
+ * Caller must ensure that the request has been removed from
+ * adapter->req_list, to protect against concurrent modification
+ * by zfcp_erp_strategy_check_fsfreq().
  */
 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
 {
+	struct zfcp_erp_action *erp_action;
+
 	if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) {
 		zfcp_fsf_status_read_handler(req);
 		return;
@@ -414,8 +444,9 @@
 	zfcp_fsf_fsfstatus_eval(req);
 	req->handler(req);
 
-	if (req->erp_action)
-		zfcp_erp_notify(req->erp_action, 0);
+	erp_action = req->erp_action;
+	if (erp_action)
+		zfcp_erp_notify(erp_action, 0);
 
 	if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
 		zfcp_fsf_req_free(req);
@@ -458,7 +489,7 @@
 #define ZFCP_FSF_PORTSPEED_128GBIT	(1 <<  8)
 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
 
-static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
+u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
 {
 	u32 fdmi_speed = 0;
 	if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
@@ -488,40 +519,24 @@
 {
 	struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
 	struct zfcp_adapter *adapter = req->adapter;
-	struct Scsi_Host *shost = adapter->scsi_host;
-	struct fc_els_flogi *nsp, *plogi;
+	struct fc_els_flogi *plogi;
 
 	/* adjust pointers for missing command code */
-	nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
-					- sizeof(u32));
 	plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
 					- sizeof(u32));
 
 	if (req->data)
 		memcpy(req->data, bottom, sizeof(*bottom));
 
-	fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
-	fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
-	fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
-
 	adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
 	adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
 					 (u16)FSF_STATUS_READS_RECOM);
 
-	if (fc_host_permanent_port_name(shost) == -1)
-		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
-
-	zfcp_scsi_set_prot(adapter);
-
 	/* no error return above here, otherwise must fix call chains */
 	/* do not evaluate invalid fields */
 	if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
 		return 0;
 
-	fc_host_port_id(shost) = ntoh24(bottom->s_id);
-	fc_host_speed(shost) =
-		zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
-
 	adapter->hydra_version = bottom->adapter_type;
 
 	switch (bottom->fc_topology) {
@@ -529,17 +544,10 @@
 		adapter->peer_d_id = ntoh24(bottom->peer_d_id);
 		adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
 		adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
-		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
 		break;
 	case FSF_TOPO_FABRIC:
-		if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
-			fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
-		else
-			fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
 		break;
 	case FSF_TOPO_AL:
-		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
-		/* fall through */
 	default:
 		dev_err(&adapter->ccw_device->dev,
 			"Unknown or unsupported arbitrated loop "
@@ -554,9 +562,10 @@
 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
 {
 	struct zfcp_adapter *adapter = req->adapter;
+	struct zfcp_diag_header *const diag_hdr =
+		&adapter->diagnostics->config_data.header;
 	struct fsf_qtcb *qtcb = req->qtcb;
 	struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
-	struct Scsi_Host *shost = adapter->scsi_host;
 
 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
 		return;
@@ -570,6 +579,13 @@
 
 	switch (qtcb->header.fsf_status) {
 	case FSF_GOOD:
+		/*
+		 * usually we wait with an update till the cache is too old,
+		 * but because we have the data available, update it anyway
+		 */
+		zfcp_diag_update_xdata(diag_hdr, bottom, false);
+
+		zfcp_scsi_shost_update_config_data(adapter, bottom, false);
 		if (zfcp_fsf_exchange_config_evaluate(req))
 			return;
 
@@ -585,12 +601,8 @@
 				&adapter->status);
 		break;
 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
-		fc_host_node_name(shost) = 0;
-		fc_host_port_name(shost) = 0;
-		fc_host_port_id(shost) = 0;
-		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
-		fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
-		adapter->hydra_version = 0;
+		zfcp_diag_update_xdata(diag_hdr, bottom, true);
+		req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
 
 		/* avoids adapter shutdown to be able to recognize
 		 * events such as LINK UP */
@@ -598,6 +610,8 @@
 				&adapter->status);
 		zfcp_fsf_link_down_info_eval(req,
 			&qtcb->header.fsf_status_qual.link_down_info);
+
+		zfcp_scsi_shost_update_config_data(adapter, bottom, true);
 		if (zfcp_fsf_exchange_config_evaluate(req))
 			return;
 		break;
@@ -606,13 +620,8 @@
 		return;
 	}
 
-	if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+	if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)
 		adapter->hardware_version = bottom->hardware_version;
-		memcpy(fc_host_serial_number(shost), bottom->serial_number,
-		       min(FC_SERIAL_NUMBER_SIZE, 17));
-		EBCASC(fc_host_serial_number(shost),
-		       min(FC_SERIAL_NUMBER_SIZE, 17));
-	}
 
 	if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
 		dev_err(&adapter->ccw_device->dev,
@@ -629,43 +638,145 @@
 	}
 }
 
+/*
+ * Mapping of FC Endpoint Security flag masks to mnemonics
+ *
+ * NOTE: Update macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH when making any
+ *       changes.
+ */
+static const struct {
+	u32	mask;
+	char	*name;
+} zfcp_fsf_fc_security_mnemonics[] = {
+	{ FSF_FC_SECURITY_AUTH,		"Authentication" },
+	{ FSF_FC_SECURITY_ENC_FCSP2 |
+	  FSF_FC_SECURITY_ENC_ERAS,	"Encryption" },
+};
+
+/* maximum strlen(zfcp_fsf_fc_security_mnemonics[...].name) + 1 */
+#define ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH 15
+
+/**
+ * zfcp_fsf_scnprint_fc_security() - translate FC Endpoint Security flags into
+ *                                   mnemonics and place in a buffer
+ * @buf        : the buffer to place the translated FC Endpoint Security flag(s)
+ *               into
+ * @size       : the size of the buffer, including the trailing null space
+ * @fc_security: one or more FC Endpoint Security flags, or zero
+ * @fmt        : specifies whether a list or a single item is to be put into the
+ *               buffer
+ *
+ * The Fibre Channel (FC) Endpoint Security flags are translated into mnemonics.
+ * If the FC Endpoint Security flags are zero "none" is placed into the buffer.
+ *
+ * With ZFCP_FSF_PRINT_FMT_LIST the mnemonics are placed as a list separated by
+ * a comma followed by a space into the buffer. If one or more FC Endpoint
+ * Security flags cannot be translated into a mnemonic, as they are undefined
+ * in zfcp_fsf_fc_security_mnemonics, their bitwise ORed value in hexadecimal
+ * representation is placed into the buffer.
+ *
+ * With ZFCP_FSF_PRINT_FMT_SINGLEITEM only one single mnemonic is placed into
+ * the buffer. If the FC Endpoint Security flag cannot be translated, as it is
+ * undefined in zfcp_fsf_fc_security_mnemonics, its value in hexadecimal
+ * representation is placed into the buffer. If more than one FC Endpoint
+ * Security flag was specified, their value in hexadecimal representation is
+ * placed into the buffer. The macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH
+ * can be used to define a buffer that is large enough to hold one mnemonic.
+ *
+ * Return: The number of characters written into buf not including the trailing
+ *         '\0'. If size is == 0 the function returns 0.
+ */
+ssize_t zfcp_fsf_scnprint_fc_security(char *buf, size_t size, u32 fc_security,
+				      enum zfcp_fsf_print_fmt fmt)
+{
+	const char *prefix = "";
+	ssize_t len = 0;
+	int i;
+
+	if (fc_security == 0)
+		return scnprintf(buf, size, "none");
+	if (fmt == ZFCP_FSF_PRINT_FMT_SINGLEITEM && hweight32(fc_security) != 1)
+		return scnprintf(buf, size, "0x%08x", fc_security);
+
+	for (i = 0; i < ARRAY_SIZE(zfcp_fsf_fc_security_mnemonics); i++) {
+		if (!(fc_security & zfcp_fsf_fc_security_mnemonics[i].mask))
+			continue;
+
+		len += scnprintf(buf + len, size - len, "%s%s", prefix,
+				 zfcp_fsf_fc_security_mnemonics[i].name);
+		prefix = ", ";
+		fc_security &= ~zfcp_fsf_fc_security_mnemonics[i].mask;
+	}
+
+	if (fc_security != 0)
+		len += scnprintf(buf + len, size - len, "%s0x%08x",
+				 prefix, fc_security);
+
+	return len;
+}
+
+static void zfcp_fsf_dbf_adapter_fc_security(struct zfcp_adapter *adapter,
+					     struct zfcp_fsf_req *req)
+{
+	if (adapter->fc_security_algorithms ==
+	    adapter->fc_security_algorithms_old) {
+		/* no change, no trace */
+		return;
+	}
+
+	zfcp_dbf_hba_fsf_fces("fsfcesa", req, ZFCP_DBF_INVALID_WWPN,
+			      adapter->fc_security_algorithms_old,
+			      adapter->fc_security_algorithms);
+
+	adapter->fc_security_algorithms_old = adapter->fc_security_algorithms;
+}
+
 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
 {
 	struct zfcp_adapter *adapter = req->adapter;
 	struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
-	struct Scsi_Host *shost = adapter->scsi_host;
 
 	if (req->data)
 		memcpy(req->data, bottom, sizeof(*bottom));
 
-	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
-		fc_host_permanent_port_name(shost) = bottom->wwpn;
-	} else
-		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
-	fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
-	fc_host_supported_speeds(shost) =
-		zfcp_fsf_convert_portspeed(bottom->supported_speed);
-	memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
-	       FC_FC4_LIST_SIZE);
-	memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
-	       FC_FC4_LIST_SIZE);
+	if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
+		adapter->fc_security_algorithms =
+			bottom->fc_security_algorithms;
+	else
+		adapter->fc_security_algorithms = 0;
+	zfcp_fsf_dbf_adapter_fc_security(adapter, req);
 }
 
 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
 {
+	struct zfcp_diag_header *const diag_hdr =
+		&req->adapter->diagnostics->port_data.header;
 	struct fsf_qtcb *qtcb = req->qtcb;
+	struct fsf_qtcb_bottom_port *bottom = &qtcb->bottom.port;
 
 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
 		return;
 
 	switch (qtcb->header.fsf_status) {
 	case FSF_GOOD:
+		/*
+		 * usually we wait with an update till the cache is too old,
+		 * but because we have the data available, update it anyway
+		 */
+		zfcp_diag_update_xdata(diag_hdr, bottom, false);
+
+		zfcp_scsi_shost_update_port_data(req->adapter, bottom);
 		zfcp_fsf_exchange_port_evaluate(req);
 		break;
 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
-		zfcp_fsf_exchange_port_evaluate(req);
+		zfcp_diag_update_xdata(diag_hdr, bottom, true);
+		req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
+
 		zfcp_fsf_link_down_info_eval(req,
 			&qtcb->header.fsf_status_qual.link_down_info);
+
+		zfcp_scsi_shost_update_port_data(req->adapter, bottom);
+		zfcp_fsf_exchange_port_evaluate(req);
 		break;
 	}
 }
@@ -889,7 +1000,7 @@
 		switch (fsq->word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
 			zfcp_fc_test_link(zfcp_sdev->port);
-			/* fall through */
+			fallthrough;
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
@@ -984,7 +1095,7 @@
 		break;
 	case FSF_PORT_HANDLE_NOT_VALID:
 		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
-		/* fall through */
+		fallthrough;
 	case FSF_GENERIC_COMMAND_REJECTED:
 	case FSF_PAYLOAD_SIZE_MISMATCH:
 	case FSF_REQUEST_SIZE_TOO_LARGE:
@@ -1170,7 +1281,7 @@
 		break;
 	case FSF_SBAL_MISMATCH:
 		/* should never occur, avoided in zfcp_fsf_send_els */
-		/* fall through */
+		fallthrough;
 	default:
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
@@ -1261,7 +1372,9 @@
 
 	req->qtcb->bottom.config.feature_selection =
 			FSF_FEATURE_NOTIFICATION_LOST |
-			FSF_FEATURE_UPDATE_ALERT;
+			FSF_FEATURE_UPDATE_ALERT |
+			FSF_FEATURE_REQUEST_SFP_DATA |
+			FSF_FEATURE_FC_SECURITY;
 	req->erp_action = erp_action;
 	req->handler = zfcp_fsf_exchange_config_data_handler;
 	erp_action->fsf_req_id = req->req_id;
@@ -1278,6 +1391,19 @@
 	return retval;
 }
 
+
+/**
+ * zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel.
+ * @qdio: pointer to the QDIO-Queue to use for sending the command.
+ * @data: pointer to the QTCB-Bottom for storing the result of the command,
+ *	  might be %NULL.
+ *
+ * Returns:
+ * * 0		- Exchange Config Data was successful, @data is complete
+ * * -EIO	- Exchange Config Data was not successful, @data is invalid
+ * * -EAGAIN	- @data contains incomplete data
+ * * -ENOMEM	- Some memory allocation failed along the way
+ */
 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
 				       struct fsf_qtcb_bottom_config *data)
 {
@@ -1301,7 +1427,9 @@
 
 	req->qtcb->bottom.config.feature_selection =
 			FSF_FEATURE_NOTIFICATION_LOST |
-			FSF_FEATURE_UPDATE_ALERT;
+			FSF_FEATURE_UPDATE_ALERT |
+			FSF_FEATURE_REQUEST_SFP_DATA |
+			FSF_FEATURE_FC_SECURITY;
 
 	if (data)
 		req->data = data;
@@ -1309,9 +1437,16 @@
 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
 	retval = zfcp_fsf_req_send(req);
 	spin_unlock_irq(&qdio->req_q_lock);
+
 	if (!retval) {
 		/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
 		wait_for_completion(&req->completion);
+
+		if (req->status &
+		    (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
+			retval = -EIO;
+		else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
+			retval = -EAGAIN;
 	}
 
 	zfcp_fsf_req_free(req);
@@ -1369,10 +1504,17 @@
 }
 
 /**
- * zfcp_fsf_exchange_port_data_sync - request information about local port
- * @qdio: pointer to struct zfcp_qdio
- * @data: pointer to struct fsf_qtcb_bottom_port
- * Returns: 0 on success, error otherwise
+ * zfcp_fsf_exchange_port_data_sync() - Request information about local port.
+ * @qdio: pointer to the QDIO-Queue to use for sending the command.
+ * @data: pointer to the QTCB-Bottom for storing the result of the command,
+ *	  might be %NULL.
+ *
+ * Returns:
+ * * 0		- Exchange Port Data was successful, @data is complete
+ * * -EIO	- Exchange Port Data was not successful, @data is invalid
+ * * -EAGAIN	- @data contains incomplete data
+ * * -ENOMEM	- Some memory allocation failed along the way
+ * * -EOPNOTSUPP	- This operation is not supported
  */
 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
 				     struct fsf_qtcb_bottom_port *data)
@@ -1408,10 +1550,15 @@
 	if (!retval) {
 		/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
 		wait_for_completion(&req->completion);
+
+		if (req->status &
+		    (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
+			retval = -EIO;
+		else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
+			retval = -EAGAIN;
 	}
 
 	zfcp_fsf_req_free(req);
-
 	return retval;
 
 out_unlock:
@@ -1419,10 +1566,117 @@
 	return retval;
 }
 
+static void zfcp_fsf_log_port_fc_security(struct zfcp_port *port,
+					  struct zfcp_fsf_req *req)
+{
+	char mnemonic_old[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH];
+	char mnemonic_new[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH];
+
+	if (port->connection_info == port->connection_info_old) {
+		/* no change, no log nor trace */
+		return;
+	}
+
+	zfcp_dbf_hba_fsf_fces("fsfcesp", req, port->wwpn,
+			      port->connection_info_old,
+			      port->connection_info);
+
+	zfcp_fsf_scnprint_fc_security(mnemonic_old, sizeof(mnemonic_old),
+				      port->connection_info_old,
+				      ZFCP_FSF_PRINT_FMT_SINGLEITEM);
+	zfcp_fsf_scnprint_fc_security(mnemonic_new, sizeof(mnemonic_new),
+				      port->connection_info,
+				      ZFCP_FSF_PRINT_FMT_SINGLEITEM);
+
+	if (strncmp(mnemonic_old, mnemonic_new,
+		    ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH) == 0) {
+		/* no change in string representation, no log */
+		goto out;
+	}
+
+	if (port->connection_info_old == 0) {
+		/* activation */
+		dev_info(&port->adapter->ccw_device->dev,
+			 "FC Endpoint Security of connection to remote port 0x%16llx enabled: %s\n",
+			 port->wwpn, mnemonic_new);
+	} else if (port->connection_info == 0) {
+		/* deactivation */
+		dev_warn(&port->adapter->ccw_device->dev,
+			 "FC Endpoint Security of connection to remote port 0x%16llx disabled: was %s\n",
+			 port->wwpn, mnemonic_old);
+	} else {
+		/* change */
+		dev_warn(&port->adapter->ccw_device->dev,
+			 "FC Endpoint Security of connection to remote port 0x%16llx changed: from %s to %s\n",
+			 port->wwpn, mnemonic_old, mnemonic_new);
+	}
+
+out:
+	port->connection_info_old = port->connection_info;
+}
+
+static void zfcp_fsf_log_security_error(const struct device *dev, u32 fsf_sqw0,
+					u64 wwpn)
+{
+	switch (fsf_sqw0) {
+
+	/*
+	 * Open Port command error codes
+	 */
+
+	case FSF_SQ_SECURITY_REQUIRED:
+		dev_warn_ratelimited(dev,
+				     "FC Endpoint Security error: FC security is required but not supported or configured on remote port 0x%016llx\n",
+				     wwpn);
+		break;
+	case FSF_SQ_SECURITY_TIMEOUT:
+		dev_warn_ratelimited(dev,
+				     "FC Endpoint Security error: a timeout prevented opening remote port 0x%016llx\n",
+				     wwpn);
+		break;
+	case FSF_SQ_SECURITY_KM_UNAVAILABLE:
+		dev_warn_ratelimited(dev,
+				     "FC Endpoint Security error: opening remote port 0x%016llx failed because local and external key manager cannot communicate\n",
+				     wwpn);
+		break;
+	case FSF_SQ_SECURITY_RKM_UNAVAILABLE:
+		dev_warn_ratelimited(dev,
+				     "FC Endpoint Security error: opening remote port 0x%016llx failed because it cannot communicate with the external key manager\n",
+				     wwpn);
+		break;
+	case FSF_SQ_SECURITY_AUTH_FAILURE:
+		dev_warn_ratelimited(dev,
+				     "FC Endpoint Security error: the device could not verify the identity of remote port 0x%016llx\n",
+				     wwpn);
+		break;
+
+	/*
+	 * Send FCP command error codes
+	 */
+
+	case FSF_SQ_SECURITY_ENC_FAILURE:
+		dev_warn_ratelimited(dev,
+				     "FC Endpoint Security error: FC connection to remote port 0x%016llx closed because encryption broke down\n",
+				     wwpn);
+		break;
+
+	/*
+	 * Unknown error codes
+	 */
+
+	default:
+		dev_warn_ratelimited(dev,
+				     "FC Endpoint Security error: the device issued an unknown error code 0x%08x related to the FC connection to remote port 0x%016llx\n",
+				     fsf_sqw0, wwpn);
+	}
+}
+
 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
 {
+	struct zfcp_adapter *adapter = req->adapter;
 	struct zfcp_port *port = req->data;
 	struct fsf_qtcb_header *header = &req->qtcb->header;
+	struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
 	struct fc_els_flogi *plogi;
 
 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
@@ -1432,7 +1686,7 @@
 	case FSF_PORT_ALREADY_OPEN:
 		break;
 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
-		dev_warn(&req->adapter->ccw_device->dev,
+		dev_warn(&adapter->ccw_device->dev,
 			 "Not enough FCP adapter resources to open "
 			 "remote port 0x%016Lx\n",
 			 (unsigned long long)port->wwpn);
@@ -1440,11 +1694,17 @@
 					 ZFCP_STATUS_COMMON_ERP_FAILED);
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
+	case FSF_SECURITY_ERROR:
+		zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev,
+					    header->fsf_status_qual.word[0],
+					    port->wwpn);
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		switch (header->fsf_status_qual.word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
 			/* no zfcp_fc_test_link() with failed open port */
-			/* fall through */
+			fallthrough;
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 		case FSF_SQ_NO_RETRY_POSSIBLE:
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1453,6 +1713,11 @@
 		break;
 	case FSF_GOOD:
 		port->handle = header->port_handle;
+		if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
+			port->connection_info = bottom->connection_info;
+		else
+			port->connection_info = 0;
+		zfcp_fsf_log_port_fc_security(port, req);
 		atomic_or(ZFCP_STATUS_COMMON_OPEN |
 				ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
 		atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
@@ -1472,10 +1737,9 @@
 		 * another GID_PN straight after a port has been opened.
 		 * Alternately, an ADISC/PDISC ELS should suffice, as well.
 		 */
-		plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
-		if (req->qtcb->bottom.support.els1_length >=
-		    FSF_PLOGI_MIN_LEN)
-				zfcp_fc_plogi_evaluate(port, plogi);
+		plogi = (struct fc_els_flogi *) bottom->els;
+		if (bottom->els1_length >= FSF_PLOGI_MIN_LEN)
+			zfcp_fc_plogi_evaluate(port, plogi);
 		break;
 	case FSF_UNKNOWN_OP_SUBTYPE:
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1613,14 +1877,14 @@
 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
 		dev_warn(&req->adapter->ccw_device->dev,
 			 "Opening WKA port 0x%x failed\n", wka_port->d_id);
-		/* fall through */
+		fallthrough;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
 		break;
 	case FSF_GOOD:
 		wka_port->handle = header->port_handle;
-		/* fall through */
+		fallthrough;
 	case FSF_PORT_ALREADY_OPEN:
 		wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
 	}
@@ -1763,7 +2027,6 @@
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		switch (header->fsf_status_qual.word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
-			/* fall through */
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
@@ -1848,7 +2111,7 @@
 
 	case FSF_PORT_HANDLE_NOT_VALID:
 		zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
-		/* fall through */
+		fallthrough;
 	case FSF_LUN_ALREADY_OPEN:
 		break;
 	case FSF_PORT_BOXED:
@@ -1879,7 +2142,7 @@
 			 (unsigned long long)zfcp_scsi_dev_lun(sdev),
 			 (unsigned long long)zfcp_sdev->port->wwpn);
 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
-		/* fall through */
+		fallthrough;
 	case FSF_INVALID_COMMAND_OPTION:
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
@@ -1887,7 +2150,7 @@
 		switch (header->fsf_status_qual.word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
 			zfcp_fc_test_link(zfcp_sdev->port);
-			/* fall through */
+			fallthrough;
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
@@ -1981,7 +2244,7 @@
 		switch (req->qtcb->header.fsf_status_qual.word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
 			zfcp_fc_test_link(zfcp_sdev->port);
-			/* fall through */
+			fallthrough;
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 			break;
@@ -2166,6 +2429,13 @@
 			zfcp_fc_test_link(zfcp_sdev->port);
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
+	case FSF_SECURITY_ERROR:
+		zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev,
+					    header->fsf_status_qual.word[0],
+					    zfcp_sdev->port->wwpn);
+		zfcp_erp_port_forced_reopen(zfcp_sdev->port, 0, "fssfch7");
+		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+		break;
 	}
 }
 
@@ -2451,7 +2721,7 @@
 	for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
 
 		sbale = &sbal->element[idx];
-		req_id = (unsigned long) sbale->addr;
+		req_id = sbale->addr;
 		fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
 
 		if (!fsf_req) {
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 2c658b6..09d73d0 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -4,7 +4,7 @@
  *
  * Interface to the FSF support functions.
  *
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
  */
 
 #ifndef FSF_H
@@ -78,6 +78,7 @@
 #define FSF_BLOCK_GUARD_CHECK_FAILURE		0x00000081
 #define FSF_APP_TAG_CHECK_FAILURE		0x00000082
 #define FSF_REF_TAG_CHECK_FAILURE		0x00000083
+#define FSF_SECURITY_ERROR			0x00000090
 #define FSF_ADAPTER_STATUS_AVAILABLE		0x000000AD
 #define FSF_FCP_RSP_AVAILABLE			0x000000AF
 #define FSF_UNKNOWN_COMMAND			0x000000E2
@@ -110,6 +111,14 @@
 #define FSF_PSQ_LINK_MODE_TABLE_CURRUPTED	0x00004000
 #define FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT		0x00008000
 
+/* FSF status qualifier, security error */
+#define FSF_SQ_SECURITY_REQUIRED		0x00000001
+#define FSF_SQ_SECURITY_TIMEOUT			0x00000002
+#define FSF_SQ_SECURITY_KM_UNAVAILABLE		0x00000003
+#define FSF_SQ_SECURITY_RKM_UNAVAILABLE		0x00000004
+#define FSF_SQ_SECURITY_AUTH_FAILURE		0x00000005
+#define FSF_SQ_SECURITY_ENC_FAILURE		0x00000010
+
 /* payload size in status read buffer */
 #define FSF_STATUS_READ_PAYLOAD_SIZE		4032
 
@@ -163,6 +172,9 @@
 #define FSF_FEATURE_ELS_CT_CHAINED_SBALS	0x00000020
 #define FSF_FEATURE_UPDATE_ALERT		0x00000100
 #define FSF_FEATURE_MEASUREMENT_DATA		0x00000200
+#define FSF_FEATURE_REQUEST_SFP_DATA		0x00000200
+#define FSF_FEATURE_REPORT_SFP_DATA		0x00000800
+#define FSF_FEATURE_FC_SECURITY			0x00001000
 #define FSF_FEATURE_DIF_PROT_TYPE1		0x00010000
 #define FSF_FEATURE_DIX_PROT_TCPIP		0x00020000
 
@@ -172,6 +184,11 @@
 /* option */
 #define FSF_OPEN_LUN_SUPPRESS_BOXING		0x00000001
 
+/* FC security algorithms */
+#define FSF_FC_SECURITY_AUTH			0x00000001
+#define FSF_FC_SECURITY_ENC_FCSP2		0x00000002
+#define FSF_FC_SECURITY_ENC_ERAS		0x00000004
+
 struct fsf_queue_designator {
 	u8  cssid;
 	u8  chpid;
@@ -336,7 +353,8 @@
 	u8  res3[3];
 	u8  timeout;
         u32 lun_access_info;
-        u8  res4[180];
+	u32 connection_info;
+	u8  res4[176];
 	u32 els1_length;
 	u32 els2_length;
 	u32 req_buf_length;
@@ -407,7 +425,25 @@
 	u8 cp_util;
 	u8 cb_util;
 	u8 a_util;
-	u8 res2[253];
+	u8 res2;
+	s16 temperature;
+	u16 vcc;
+	u16 tx_bias;
+	u16 tx_power;
+	u16 rx_power;
+	union {
+		u16 raw;
+		struct {
+			u16 fec_active		:1;
+			u16:7;
+			u16 connector_type	:2;
+			u16 sfp_invalid		:1;
+			u16 optical_port	:1;
+			u16 port_tx_type	:4;
+		};
+	} sfp_flags;
+	u32 fc_security_algorithms;
+	u8 res3[236];
 } __attribute__ ((packed));
 
 union fsf_qtcb_bottom {
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 661436a..a8a5140 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -4,7 +4,7 @@
  *
  * Setup and helper functions to access QDIO.
  *
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2020
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -98,7 +98,7 @@
 			memset(pl, 0,
 			       ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
 			sbale = qdio->res_q[idx]->element;
-			req_id = (u64) sbale->addr;
+			req_id = sbale->addr;
 			scount = min(sbale->scount + 1,
 				     ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
 				     /* incl. signaling SBAL */
@@ -199,7 +199,7 @@
 					     q_req->sbal_number);
 			return -EINVAL;
 		}
-		sbale->addr = sg_virt(sg);
+		sbale->addr = sg_phys(sg);
 		sbale->length = sg->length;
 	}
 	return 0;
@@ -246,7 +246,7 @@
 }
 
 /**
- * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
+ * zfcp_qdio_send - send req to QDIO
  * @qdio: pointer to struct zfcp_qdio
  * @q_req: pointer to struct zfcp_qdio_req
  * Returns: 0 on success, error otherwise
@@ -260,46 +260,26 @@
 	zfcp_qdio_account(qdio);
 	spin_unlock(&qdio->stat_lock);
 
+	atomic_sub(sbal_number, &qdio->req_q_free);
+
 	retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
 			 q_req->sbal_first, sbal_number);
 
 	if (unlikely(retval)) {
+		/* Failed to submit the IO, roll back our modifications. */
+		atomic_add(sbal_number, &qdio->req_q_free);
 		zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
 				     sbal_number);
 		return retval;
 	}
 
 	/* account for transferred buffers */
-	atomic_sub(sbal_number, &qdio->req_q_free);
 	qdio->req_q_idx += sbal_number;
 	qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
 
 	return 0;
 }
 
-
-static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
-				      struct zfcp_qdio *qdio)
-{
-	memset(id, 0, sizeof(*id));
-	id->cdev = qdio->adapter->ccw_device;
-	id->q_format = QDIO_ZFCP_QFMT;
-	memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
-	ASCEBC(id->adapter_name, 8);
-	id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
-	if (enable_multibuffer)
-		id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
-	id->no_input_qs = 1;
-	id->no_output_qs = 1;
-	id->input_handler = zfcp_qdio_int_resp;
-	id->output_handler = zfcp_qdio_int_req;
-	id->int_parm = (unsigned long) qdio;
-	id->input_sbal_addr_array = qdio->res_q;
-	id->output_sbal_addr_array = qdio->req_q;
-	id->scan_threshold =
-		QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
-}
-
 /**
  * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
  * @qdio: pointer to struct zfcp_qdio
@@ -308,7 +288,6 @@
  */
 static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
 {
-	struct qdio_initialize init_data;
 	int ret;
 
 	ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
@@ -319,10 +298,9 @@
 	if (ret)
 		goto free_req_q;
 
-	zfcp_qdio_setup_init_data(&init_data, qdio);
 	init_waitqueue_head(&qdio->req_q_wq);
 
-	ret = qdio_allocate(&init_data);
+	ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1);
 	if (ret)
 		goto free_res_q;
 
@@ -367,6 +345,18 @@
 	atomic_set(&qdio->req_q_free, 0);
 }
 
+void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
+			    const struct zfcp_qdio *const qdio)
+{
+	struct Scsi_Host *const shost = adapter->scsi_host;
+
+	if (shost == NULL)
+		return;
+
+	shost->sg_tablesize = qdio->max_sbale_per_req;
+	shost->max_sectors = qdio->max_sbale_per_req * 8;
+}
+
 /**
  * zfcp_qdio_open - prepare and initialize response queue
  * @qdio: pointer to struct zfcp_qdio
@@ -374,8 +364,10 @@
  */
 int zfcp_qdio_open(struct zfcp_qdio *qdio)
 {
+	struct qdio_buffer **input_sbals[1] = {qdio->res_q};
+	struct qdio_buffer **output_sbals[1] = {qdio->req_q};
 	struct qdio_buffer_element *sbale;
-	struct qdio_initialize init_data;
+	struct qdio_initialize init_data = {0};
 	struct zfcp_adapter *adapter = qdio->adapter;
 	struct ccw_device *cdev = adapter->ccw_device;
 	struct qdio_ssqd_desc ssqd;
@@ -387,12 +379,24 @@
 	atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
 			  &qdio->adapter->status);
 
-	zfcp_qdio_setup_init_data(&init_data, qdio);
+	init_data.q_format = QDIO_ZFCP_QFMT;
+	init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
+	if (enable_multibuffer)
+		init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
+	init_data.no_input_qs = 1;
+	init_data.no_output_qs = 1;
+	init_data.input_handler = zfcp_qdio_int_resp;
+	init_data.output_handler = zfcp_qdio_int_req;
+	init_data.int_parm = (unsigned long) qdio;
+	init_data.input_sbal_addr_array = input_sbals;
+	init_data.output_sbal_addr_array = output_sbals;
+	init_data.scan_threshold =
+		QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
 
-	if (qdio_establish(&init_data))
+	if (qdio_establish(cdev, &init_data))
 		goto failed_establish;
 
-	if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
+	if (qdio_get_ssqd_desc(cdev, &ssqd))
 		goto failed_qdio;
 
 	if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
@@ -418,7 +422,7 @@
 		sbale->length = 0;
 		sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
 		sbale->sflags = 0;
-		sbale->addr = NULL;
+		sbale->addr = 0;
 	}
 
 	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
@@ -429,10 +433,7 @@
 	atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
 	atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
 
-	if (adapter->scsi_host) {
-		adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
-		adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
-	}
+	zfcp_qdio_shost_update(adapter, qdio);
 
 	return 0;
 
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 2a816a3..6b43d6b 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -122,14 +122,14 @@
 					% QDIO_MAX_BUFFERS_PER_Q;
 
 	sbale = zfcp_qdio_sbale_req(qdio, q_req);
-	sbale->addr = (void *) req_id;
+	sbale->addr = req_id;
 	sbale->eflags = 0;
 	sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
 
 	if (unlikely(!data))
 		return;
 	sbale++;
-	sbale->addr = data;
+	sbale->addr = virt_to_phys(data);
 	sbale->length = len;
 }
 
@@ -152,7 +152,7 @@
 	BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1);
 	q_req->sbale_curr++;
 	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
-	sbale->addr = data;
+	sbale->addr = virt_to_phys(data);
 	sbale->length = len;
 }
 
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index e9ded2b..d58bf79 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -4,7 +4,7 @@
  *
  * Interface to Linux SCSI midlayer.
  *
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -451,26 +451,39 @@
 };
 
 /**
- * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
+ * zfcp_scsi_adapter_register() - Allocate and register SCSI and FC host with
+ *				  SCSI midlayer
  * @adapter: The zfcp adapter to register with the SCSI midlayer
+ *
+ * Allocates the SCSI host object for the given adapter, sets basic properties
+ * (such as the transport template, QDIO limits, ...), and registers it with
+ * the midlayer.
+ *
+ * During registration with the midlayer the corresponding FC host object for
+ * the referenced transport class is also implicitely allocated.
+ *
+ * Upon success adapter->scsi_host is set, and upon failure it remains NULL. If
+ * adapter->scsi_host is already set, nothing is done.
+ *
+ * Return:
+ * * 0	     - Allocation and registration was successful
+ * * -EEXIST - SCSI and FC host did already exist, nothing was done, nothing
+ *	       was changed
+ * * -EIO    - Allocation or registration failed
  */
 int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
 {
 	struct ccw_dev_id dev_id;
 
 	if (adapter->scsi_host)
-		return 0;
+		return -EEXIST;
 
 	ccw_device_get_id(adapter->ccw_device, &dev_id);
 	/* register adapter as SCSI host with mid layer of SCSI stack */
 	adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
 					     sizeof (struct zfcp_adapter *));
-	if (!adapter->scsi_host) {
-		dev_err(&adapter->ccw_device->dev,
-			"Registering the FCP device with the "
-			"SCSI stack failed\n");
-		return -EIO;
-	}
+	if (!adapter->scsi_host)
+		goto err_out;
 
 	/* tell the SCSI stack some characteristics of this adapter */
 	adapter->scsi_host->max_id = 511;
@@ -480,14 +493,23 @@
 	adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
 	adapter->scsi_host->transportt = zfcp_scsi_transport_template;
 
+	/* make all basic properties known at registration time */
+	zfcp_qdio_shost_update(adapter, adapter->qdio);
+	zfcp_scsi_set_prot(adapter);
+
 	adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
 
 	if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
 		scsi_host_put(adapter->scsi_host);
-		return -EIO;
+		goto err_out;
 	}
 
 	return 0;
+err_out:
+	adapter->scsi_host = NULL;
+	dev_err(&adapter->ccw_device->dev,
+		"Registering the FCP device with the SCSI stack failed\n");
+	return -EIO;
 }
 
 /**
@@ -605,7 +627,7 @@
 		return NULL;
 
 	ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
-	if (ret) {
+	if (ret != 0 && ret != -EAGAIN) {
 		kfree(data);
 		return NULL;
 	}
@@ -634,7 +656,7 @@
 		return;
 
 	ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
-	if (ret)
+	if (ret != 0 && ret != -EAGAIN)
 		kfree(data);
 	else {
 		adapter->stats_reset = jiffies/HZ;
@@ -841,6 +863,95 @@
 	set_host_byte(scmd, DID_SOFT_ERROR);
 }
 
+void zfcp_scsi_shost_update_config_data(
+	struct zfcp_adapter *const adapter,
+	const struct fsf_qtcb_bottom_config *const bottom,
+	const bool bottom_incomplete)
+{
+	struct Scsi_Host *const shost = adapter->scsi_host;
+	const struct fc_els_flogi *nsp, *plogi;
+
+	if (shost == NULL)
+		return;
+
+	snprintf(fc_host_firmware_version(shost), FC_VERSION_STRING_SIZE,
+		 "0x%08x", bottom->lic_version);
+
+	if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+		snprintf(fc_host_hardware_version(shost),
+			 FC_VERSION_STRING_SIZE,
+			 "0x%08x", bottom->hardware_version);
+		memcpy(fc_host_serial_number(shost), bottom->serial_number,
+		       min(FC_SERIAL_NUMBER_SIZE, 17));
+		EBCASC(fc_host_serial_number(shost),
+		       min(FC_SERIAL_NUMBER_SIZE, 17));
+	}
+
+	/* adjust pointers for missing command code */
+	nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
+					- sizeof(u32));
+	plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
+					- sizeof(u32));
+
+	snprintf(fc_host_manufacturer(shost), FC_SERIAL_NUMBER_SIZE, "%s",
+		 "IBM");
+	fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
+	fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
+	fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
+
+	zfcp_scsi_set_prot(adapter);
+
+	/* do not evaluate invalid fields */
+	if (bottom_incomplete)
+		return;
+
+	fc_host_port_id(shost) = ntoh24(bottom->s_id);
+	fc_host_speed(shost) =
+		zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
+
+	snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x",
+		 bottom->adapter_type);
+
+	switch (bottom->fc_topology) {
+	case FSF_TOPO_P2P:
+		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+		fc_host_fabric_name(shost) = 0;
+		break;
+	case FSF_TOPO_FABRIC:
+		fc_host_fabric_name(shost) = be64_to_cpu(plogi->fl_wwnn);
+		if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
+			fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+		else
+			fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+		break;
+	case FSF_TOPO_AL:
+		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+		fallthrough;
+	default:
+		fc_host_fabric_name(shost) = 0;
+		break;
+	}
+}
+
+void zfcp_scsi_shost_update_port_data(
+	struct zfcp_adapter *const adapter,
+	const struct fsf_qtcb_bottom_port *const bottom)
+{
+	struct Scsi_Host *const shost = adapter->scsi_host;
+
+	if (shost == NULL)
+		return;
+
+	fc_host_permanent_port_name(shost) = bottom->wwpn;
+	fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
+	fc_host_supported_speeds(shost) =
+		zfcp_fsf_convert_portspeed(bottom->supported_speed);
+	memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
+	       FC_FC4_LIST_SIZE);
+	memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
+	       FC_FC4_LIST_SIZE);
+}
+
 struct fc_function_template zfcp_transport_functions = {
 	.show_starget_port_id = 1,
 	.show_starget_port_name = 1,
@@ -856,6 +967,10 @@
 	.show_host_supported_speeds = 1,
 	.show_host_maxframe_size = 1,
 	.show_host_serial_number = 1,
+	.show_host_manufacturer = 1,
+	.show_host_model = 1,
+	.show_host_hardware_version = 1,
+	.show_host_firmware_version = 1,
 	.get_fc_host_stats = zfcp_scsi_get_fc_host_stats,
 	.reset_fc_host_stats = zfcp_scsi_reset_fc_host_stats,
 	.set_rport_dev_loss_tmo = zfcp_scsi_set_rport_dev_loss_tmo,
@@ -871,5 +986,6 @@
 	.show_host_symbolic_name = 1,
 	.show_host_speed = 1,
 	.show_host_port_id = 1,
+	.show_host_fabric_name = 1,
 	.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
 };
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index af197e2..3c7f5ec 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -4,13 +4,14 @@
  *
  * sysfs attributes.
  *
- * Copyright IBM Corp. 2008, 2010
+ * Copyright IBM Corp. 2008, 2020
  */
 
 #define KMSG_COMPONENT "zfcp"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include <linux/slab.h>
+#include "zfcp_diag.h"
 #include "zfcp_ext.h"
 
 #define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
@@ -215,20 +216,32 @@
 {
 	struct ccw_device *cdev = to_ccwdev(dev);
 	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+	int retval = 0;
 
 	if (!adapter)
 		return -ENODEV;
 
 	/*
+	 * If `scsi_host` is missing, we can't schedule `scan_work`, as it
+	 * makes use of the corresponding fc_host object. But this state is
+	 * only possible if xconfig/xport data has never completed yet,
+	 * and we couldn't successfully scan for ports anyway.
+	 */
+	if (adapter->scsi_host == NULL) {
+		retval = -ENODEV;
+		goto out;
+	}
+
+	/*
 	 * Users wish is our command: immediately schedule and flush a
 	 * worker to conduct a synchronous port scan, that is, neither
 	 * a random delay nor a rate limit is applied here.
 	 */
 	queue_delayed_work(adapter->work_queue, &adapter->scan_work, 0);
 	flush_delayed_work(&adapter->scan_work);
+out:
 	zfcp_ccw_adapter_put(adapter);
-
-	return (ssize_t) count;
+	return retval ? retval : (ssize_t) count;
 }
 static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
 		     zfcp_sysfs_port_rescan_store);
@@ -325,6 +338,86 @@
 static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
 		     zfcp_sysfs_port_remove_store);
 
+static ssize_t
+zfcp_sysfs_adapter_diag_max_age_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
+	ssize_t rc;
+
+	if (!adapter)
+		return -ENODEV;
+
+	/* ceil(log(2^64 - 1) / log(10)) = 20 */
+	rc = scnprintf(buf, 20 + 2, "%lu\n", adapter->diagnostics->max_age);
+
+	zfcp_ccw_adapter_put(adapter);
+	return rc;
+}
+
+static ssize_t
+zfcp_sysfs_adapter_diag_max_age_store(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t count)
+{
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
+	unsigned long max_age;
+	ssize_t rc;
+
+	if (!adapter)
+		return -ENODEV;
+
+	rc = kstrtoul(buf, 10, &max_age);
+	if (rc != 0)
+		goto out;
+
+	adapter->diagnostics->max_age = max_age;
+
+	rc = count;
+out:
+	zfcp_ccw_adapter_put(adapter);
+	return rc;
+}
+static ZFCP_DEV_ATTR(adapter, diag_max_age, 0644,
+		     zfcp_sysfs_adapter_diag_max_age_show,
+		     zfcp_sysfs_adapter_diag_max_age_store);
+
+static ssize_t zfcp_sysfs_adapter_fc_security_show(
+	struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+	unsigned int status;
+	int i;
+
+	if (!adapter)
+		return -ENODEV;
+
+	/*
+	 * Adapter status COMMON_OPEN implies xconf data and xport data
+	 * was done. Adapter FC Endpoint Security capability remains
+	 * unchanged in case of COMMON_ERP_FAILED (e.g. due to local link
+	 * down).
+	 */
+	status = atomic_read(&adapter->status);
+	if (0 == (status & ZFCP_STATUS_COMMON_OPEN))
+		i = sprintf(buf, "unknown\n");
+	else if (!(adapter->adapter_features & FSF_FEATURE_FC_SECURITY))
+		i = sprintf(buf, "unsupported\n");
+	else {
+		i = zfcp_fsf_scnprint_fc_security(
+			buf, PAGE_SIZE - 1, adapter->fc_security_algorithms,
+			ZFCP_FSF_PRINT_FMT_LIST);
+		i += scnprintf(buf + i, PAGE_SIZE - i, "\n");
+	}
+
+	zfcp_ccw_adapter_put(adapter);
+	return i;
+}
+static ZFCP_DEV_ATTR(adapter, fc_security, S_IRUGO,
+		     zfcp_sysfs_adapter_fc_security_show,
+		     NULL);
+
 static struct attribute *zfcp_adapter_attrs[] = {
 	&dev_attr_adapter_failed.attr,
 	&dev_attr_adapter_in_recovery.attr,
@@ -337,6 +430,8 @@
 	&dev_attr_adapter_lic_version.attr,
 	&dev_attr_adapter_status.attr,
 	&dev_attr_adapter_hardware_version.attr,
+	&dev_attr_adapter_diag_max_age.attr,
+	&dev_attr_adapter_fc_security.attr,
 	NULL
 };
 
@@ -380,6 +475,37 @@
 }
 static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
 
+static ssize_t zfcp_sysfs_port_fc_security_show(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+	struct zfcp_adapter *adapter = port->adapter;
+	unsigned int status = atomic_read(&port->status);
+	int i;
+
+	if (0 == (status & ZFCP_STATUS_COMMON_OPEN) ||
+	    0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) ||
+	    0 == (status & ZFCP_STATUS_PORT_PHYS_OPEN) ||
+	    0 != (status & ZFCP_STATUS_PORT_LINK_TEST) ||
+	    0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
+	    0 != (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
+		i = sprintf(buf, "unknown\n");
+	else if (!(adapter->adapter_features & FSF_FEATURE_FC_SECURITY))
+		i = sprintf(buf, "unsupported\n");
+	else {
+		i = zfcp_fsf_scnprint_fc_security(
+			buf, PAGE_SIZE - 1, port->connection_info,
+			ZFCP_FSF_PRINT_FMT_SINGLEITEM);
+		i += scnprintf(buf + i, PAGE_SIZE - i, "\n");
+	}
+
+	return i;
+}
+static ZFCP_DEV_ATTR(port, fc_security, S_IRUGO,
+		     zfcp_sysfs_port_fc_security_show,
+		     NULL);
+
 static struct attribute *zfcp_port_attrs[] = {
 	&dev_attr_unit_add.attr,
 	&dev_attr_unit_remove.attr,
@@ -387,6 +513,7 @@
 	&dev_attr_port_in_recovery.attr,
 	&dev_attr_port_status.attr,
 	&dev_attr_port_access_denied.attr,
+	&dev_attr_port_fc_security.attr,
 	NULL
 };
 static struct attribute_group zfcp_port_attr_group = {
@@ -577,7 +704,7 @@
 		return -ENOMEM;
 
 	retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
-	if (!retval)
+	if (retval == 0 || retval == -EAGAIN)
 		retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
 				 qtcb_port->cb_util, qtcb_port->a_util);
 	kfree(qtcb_port);
@@ -603,7 +730,7 @@
 		return -ENOMEM;
 
 	retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config);
-	if (!retval)
+	if (retval == 0 || retval == -EAGAIN)
 		*stat_inf = qtcb_config->stat_info;
 
 	kfree(qtcb_config);
@@ -664,3 +791,123 @@
 	&dev_attr_queue_full,
 	NULL
 };
+
+static ssize_t zfcp_sysfs_adapter_diag_b2b_credit_show(
+	struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
+	struct zfcp_diag_header *diag_hdr;
+	struct fc_els_flogi *nsp;
+	ssize_t rc = -ENOLINK;
+	unsigned long flags;
+	unsigned int status;
+
+	if (!adapter)
+		return -ENODEV;
+
+	status = atomic_read(&adapter->status);
+	if (0 == (status & ZFCP_STATUS_COMMON_OPEN) ||
+	    0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) ||
+	    0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED))
+		goto out;
+
+	diag_hdr = &adapter->diagnostics->config_data.header;
+
+	rc = zfcp_diag_update_buffer_limited(
+		adapter, diag_hdr, zfcp_diag_update_config_data_buffer);
+	if (rc != 0)
+		goto out;
+
+	spin_lock_irqsave(&diag_hdr->access_lock, flags);
+	/* nport_serv_param doesn't contain the ELS_Command code */
+	nsp = (struct fc_els_flogi *)((unsigned long)
+					      adapter->diagnostics->config_data
+						      .data.nport_serv_param -
+				      sizeof(u32));
+
+	rc = scnprintf(buf, 5 + 2, "%hu\n",
+		       be16_to_cpu(nsp->fl_csp.sp_bb_cred));
+	spin_unlock_irqrestore(&diag_hdr->access_lock, flags);
+
+out:
+	zfcp_ccw_adapter_put(adapter);
+	return rc;
+}
+static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400,
+		     zfcp_sysfs_adapter_diag_b2b_credit_show, NULL);
+
+#define ZFCP_DEFINE_DIAG_SFP_ATTR(_name, _qtcb_member, _prtsize, _prtfmt)      \
+	static ssize_t zfcp_sysfs_adapter_diag_sfp_##_name##_show(	       \
+		struct device *dev, struct device_attribute *attr, char *buf)  \
+	{								       \
+		struct zfcp_adapter *const adapter =			       \
+			zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));	       \
+		struct zfcp_diag_header *diag_hdr;			       \
+		ssize_t rc = -ENOLINK;					       \
+		unsigned long flags;					       \
+		unsigned int status;					       \
+									       \
+		if (!adapter)						       \
+			return -ENODEV;					       \
+									       \
+		status = atomic_read(&adapter->status);			       \
+		if (0 == (status & ZFCP_STATUS_COMMON_OPEN) ||		       \
+		    0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) ||	       \
+		    0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED))	       \
+			goto out;					       \
+									       \
+		if (!zfcp_diag_support_sfp(adapter)) {			       \
+			rc = -EOPNOTSUPP;				       \
+			goto out;					       \
+		}							       \
+									       \
+		diag_hdr = &adapter->diagnostics->port_data.header;	       \
+									       \
+		rc = zfcp_diag_update_buffer_limited(			       \
+			adapter, diag_hdr, zfcp_diag_update_port_data_buffer); \
+		if (rc != 0)						       \
+			goto out;					       \
+									       \
+		spin_lock_irqsave(&diag_hdr->access_lock, flags);	       \
+		rc = scnprintf(						       \
+			buf, (_prtsize) + 2, _prtfmt "\n",		       \
+			adapter->diagnostics->port_data.data._qtcb_member);    \
+		spin_unlock_irqrestore(&diag_hdr->access_lock, flags);	       \
+									       \
+	out:								       \
+		zfcp_ccw_adapter_put(adapter);				       \
+		return rc;						       \
+	}								       \
+	static ZFCP_DEV_ATTR(adapter_diag_sfp, _name, 0400,		       \
+			     zfcp_sysfs_adapter_diag_sfp_##_name##_show, NULL)
+
+ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 6, "%hd");
+ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(rx_power, rx_power, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(port_tx_type, sfp_flags.port_tx_type, 2, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(optical_port, sfp_flags.optical_port, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(sfp_invalid, sfp_flags.sfp_invalid, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(connector_type, sfp_flags.connector_type, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(fec_active, sfp_flags.fec_active, 1, "%hu");
+
+static struct attribute *zfcp_sysfs_diag_attrs[] = {
+	&dev_attr_adapter_diag_sfp_temperature.attr,
+	&dev_attr_adapter_diag_sfp_vcc.attr,
+	&dev_attr_adapter_diag_sfp_tx_bias.attr,
+	&dev_attr_adapter_diag_sfp_tx_power.attr,
+	&dev_attr_adapter_diag_sfp_rx_power.attr,
+	&dev_attr_adapter_diag_sfp_port_tx_type.attr,
+	&dev_attr_adapter_diag_sfp_optical_port.attr,
+	&dev_attr_adapter_diag_sfp_sfp_invalid.attr,
+	&dev_attr_adapter_diag_sfp_connector_type.attr,
+	&dev_attr_adapter_diag_sfp_fec_active.attr,
+	&dev_attr_adapter_diag_b2b_credit.attr,
+	NULL,
+};
+
+const struct attribute_group zfcp_sysfs_diag_attr_group = {
+	.name = "diagnostics",
+	.attrs = zfcp_sysfs_diag_attrs,
+};
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index f6f03a3..54e686d 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -1372,27 +1372,6 @@
 	{},
 };
 
-#ifdef CONFIG_PM_SLEEP
-static int virtio_ccw_freeze(struct ccw_device *cdev)
-{
-	struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
-
-	return virtio_device_freeze(&vcdev->vdev);
-}
-
-static int virtio_ccw_restore(struct ccw_device *cdev)
-{
-	struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
-	int ret;
-
-	ret = virtio_ccw_set_transport_rev(vcdev);
-	if (ret)
-		return ret;
-
-	return virtio_device_restore(&vcdev->vdev);
-}
-#endif
-
 static struct ccw_driver virtio_ccw_driver = {
 	.driver = {
 		.owner = THIS_MODULE,
@@ -1405,11 +1384,6 @@
 	.set_online = virtio_ccw_online,
 	.notify = virtio_ccw_cio_notify,
 	.int_class = IRQIO_VIR,
-#ifdef CONFIG_PM_SLEEP
-	.freeze = virtio_ccw_freeze,
-	.thaw = virtio_ccw_restore,
-	.restore = virtio_ccw_restore,
-#endif
 };
 
 static int __init pure_hex(char **cp, unsigned int *val, int min_digit,