Update Linux to v5.10.109

Sourced from [1]

[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz

Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 52aa95c..22d2db6 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -7,7 +7,8 @@
 obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
 # zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
 zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
-zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o zcrypt_ccamisc.o
+zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
+zcrypt-objs += zcrypt_ccamisc.o zcrypt_ep11misc.o
 obj-$(CONFIG_ZCRYPT) += zcrypt.o
 # adapter drivers depend on ap.o and zcrypt.o
 obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 5256e3c..c00a288 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -18,13 +18,13 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/err.h>
+#include <linux/freezer.h>
 #include <linux/interrupt.h>
 #include <linux/workqueue.h>
 #include <linux/slab.h>
 #include <linux/notifier.h>
 #include <linux/kthread.h>
 #include <linux/mutex.h>
-#include <linux/suspend.h>
 #include <asm/airq.h>
 #include <linux/atomic.h>
 #include <asm/isc.h>
@@ -62,8 +62,10 @@
 
 static struct device *ap_root_device;
 
-DEFINE_SPINLOCK(ap_list_lock);
-LIST_HEAD(ap_card_list);
+/* Hashtable of all queue devices on the AP bus */
+DEFINE_HASHTABLE(ap_queues, 8);
+/* lock used for the ap_queues hashtable */
+DEFINE_SPINLOCK(ap_queues_lock);
 
 /* Default permissions (ioctl, card and domain masking) */
 struct ap_perms ap_perms;
@@ -71,8 +73,7 @@
 DEFINE_MUTEX(ap_perms_mutex);
 EXPORT_SYMBOL(ap_perms_mutex);
 
-static struct ap_config_info *ap_configuration;
-static bool initialised;
+static struct ap_config_info *ap_qci_info;
 
 /*
  * AP bus related debug feature things.
@@ -91,7 +92,7 @@
  * Tasklet & timer for AP request polling and interrupts
  */
 static void ap_tasklet_fn(unsigned long);
-static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0);
+static DECLARE_TASKLET_OLD(ap_tasklet, ap_tasklet_fn);
 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
 static struct task_struct *ap_poll_kthread;
 static DEFINE_MUTEX(ap_poll_thread_mutex);
@@ -103,22 +104,17 @@
  */
 static unsigned long long poll_timeout = 250000;
 
-/* Suspend flag */
-static int ap_suspend_flag;
-/* Maximum domain id */
-static int ap_max_domain_id;
-/*
- * Flag to check if domain was set through module parameter domain=. This is
- * important when supsend and resume is done in a z/VM environment where the
- * domain might change.
- */
-static int user_set_domain;
+/* Maximum domain id, if not given via qci */
+static int ap_max_domain_id = 15;
+/* Maximum adapter id, if not given via qci */
+static int ap_max_adapter_id = 63;
+
 static struct bus_type ap_bus_type;
 
 /* Adapter interrupt definitions */
 static void ap_interrupt_handler(struct airq_struct *airq, bool floating);
 
-static int ap_airq_flag;
+static bool ap_irq_flag;
 
 static struct airq_struct ap_airq = {
 	.handler = ap_interrupt_handler,
@@ -126,15 +122,6 @@
 };
 
 /**
- * ap_using_interrupts() - Returns non-zero if interrupt support is
- * available.
- */
-static inline int ap_using_interrupts(void)
-{
-	return ap_airq_flag;
-}
-
-/**
  * ap_airq_ptr() - Get the address of the adapter interrupt indicator
  *
  * Returns the address of the local-summary-indicator of the adapter
@@ -143,7 +130,7 @@
  */
 void *ap_airq_ptr(void)
 {
-	if (ap_using_interrupts())
+	if (ap_irq_flag)
 		return ap_airq.lsi_ptr;
 	return NULL;
 }
@@ -159,12 +146,12 @@
 }
 
 /**
- * ap_configuration_available(): Test if AP configuration
- * information is available.
+ * ap_qci_available(): Test if AP configuration
+ * information can be queried via QCI subfunction.
  *
- * Returns 1 if AP configuration information is available.
+ * Returns 1 if subfunction PQAP(QCI) is available.
  */
-static int ap_configuration_available(void)
+static int ap_qci_available(void)
 {
 	return test_facility(12);
 }
@@ -187,22 +174,22 @@
  */
 static inline int ap_qact_available(void)
 {
-	if (ap_configuration)
-		return ap_configuration->qact;
+	if (ap_qci_info)
+		return ap_qci_info->qact;
 	return 0;
 }
 
 /*
- * ap_query_configuration(): Fetch cryptographic config info
+ * ap_fetch_qci_info(): Fetch cryptographic config info
  *
  * Returns the ap configuration info fetched via PQAP(QCI).
  * On success 0 is returned, on failure a negative errno
  * is returned, e.g. if the PQAP(QCI) instruction is not
  * available, the return value will be -EOPNOTSUPP.
  */
-static inline int ap_query_configuration(struct ap_config_info *info)
+static inline int ap_fetch_qci_info(struct ap_config_info *info)
 {
-	if (!ap_configuration_available())
+	if (!ap_qci_available())
 		return -EOPNOTSUPP;
 	if (!info)
 		return -EINVAL;
@@ -210,20 +197,39 @@
 }
 
 /**
- * ap_init_configuration(): Allocate and query configuration array.
- */
-static void ap_init_configuration(void)
-{
-	if (!ap_configuration_available())
-		return;
+ * ap_init_qci_info(): Allocate and query qci config info.
+ * Does also update the static variables ap_max_domain_id
+ * and ap_max_adapter_id if this info is available.
 
-	ap_configuration = kzalloc(sizeof(*ap_configuration), GFP_KERNEL);
-	if (!ap_configuration)
+ */
+static void __init ap_init_qci_info(void)
+{
+	if (!ap_qci_available()) {
+		AP_DBF_INFO("%s QCI not supported\n", __func__);
 		return;
-	if (ap_query_configuration(ap_configuration) != 0) {
-		kfree(ap_configuration);
-		ap_configuration = NULL;
+	}
+
+	ap_qci_info = kzalloc(sizeof(*ap_qci_info), GFP_KERNEL);
+	if (!ap_qci_info)
 		return;
+	if (ap_fetch_qci_info(ap_qci_info) != 0) {
+		kfree(ap_qci_info);
+		ap_qci_info = NULL;
+		return;
+	}
+	AP_DBF_INFO("%s successful fetched initial qci info\n", __func__);
+
+	if (ap_qci_info->apxa) {
+		if (ap_qci_info->Na) {
+			ap_max_adapter_id = ap_qci_info->Na;
+			AP_DBF_INFO("%s new ap_max_adapter_id is %d\n",
+				    __func__, ap_max_adapter_id);
+		}
+		if (ap_qci_info->Nd) {
+			ap_max_domain_id = ap_qci_info->Nd;
+			AP_DBF_INFO("%s new ap_max_domain_id is %d\n",
+				    __func__, ap_max_domain_id);
+		}
 	}
 }
 
@@ -238,7 +244,6 @@
 
 /*
  * ap_test_config_card_id(): Test, whether an AP card ID is configured.
- * @id AP card ID
  *
  * Returns 0 if the card is not configured
  *	   1 if the card is configured or
@@ -246,16 +251,16 @@
  */
 static inline int ap_test_config_card_id(unsigned int id)
 {
-	if (!ap_configuration)	/* QCI not supported */
-		/* only ids 0...3F may be probed */
-		return id < 0x40 ? 1 : 0;
-	return ap_test_config(ap_configuration->apm, id);
+	if (id > ap_max_adapter_id)
+		return 0;
+	if (ap_qci_info)
+		return ap_test_config(ap_qci_info->apm, id);
+	return 1;
 }
 
 /*
  * ap_test_config_usage_domain(): Test, whether an AP usage domain
  * is configured.
- * @domain AP usage domain ID
  *
  * Returns 0 if the usage domain is not configured
  *	   1 if the usage domain is configured or
@@ -263,9 +268,11 @@
  */
 int ap_test_config_usage_domain(unsigned int domain)
 {
-	if (!ap_configuration)	/* QCI not supported */
-		return domain < 16;
-	return ap_test_config(ap_configuration->aqm, domain);
+	if (domain > ap_max_domain_id)
+		return 0;
+	if (ap_qci_info)
+		return ap_test_config(ap_qci_info->aqm, domain);
+	return 1;
 }
 EXPORT_SYMBOL(ap_test_config_usage_domain);
 
@@ -279,43 +286,48 @@
  */
 int ap_test_config_ctrl_domain(unsigned int domain)
 {
-	if (!ap_configuration)	/* QCI not supported */
+	if (!ap_qci_info || domain > ap_max_domain_id)
 		return 0;
-	return ap_test_config(ap_configuration->adm, domain);
+	return ap_test_config(ap_qci_info->adm, domain);
 }
 EXPORT_SYMBOL(ap_test_config_ctrl_domain);
 
-/**
- * ap_query_queue(): Check if an AP queue is available.
- * @qid: The AP queue number
- * @queue_depth: Pointer to queue depth value
- * @device_type: Pointer to device type value
- * @facilities: Pointer to facility indicator
+/*
+ * ap_queue_info(): Check and get AP queue info.
+ * Returns true if TAPQ succeeded and the info is filled or
+ * false otherwise.
  */
-static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
-			  unsigned int *facilities)
+static bool ap_queue_info(ap_qid_t qid, int *q_type,
+			  unsigned int *q_fac, int *q_depth, bool *q_decfg)
 {
 	struct ap_queue_status status;
-	unsigned long info;
-	int nd;
+	unsigned long info = 0;
 
-	if (!ap_test_config_card_id(AP_QID_CARD(qid)))
-		return -ENODEV;
+	/* make sure we don't run into a specifiation exception */
+	if (AP_QID_CARD(qid) > ap_max_adapter_id ||
+	    AP_QID_QUEUE(qid) > ap_max_domain_id)
+		return false;
 
+	/* call TAPQ on this APQN */
 	status = ap_test_queue(qid, ap_apft_available(), &info);
 	switch (status.response_code) {
 	case AP_RESPONSE_NORMAL:
-		*queue_depth = (int)(info & 0xff);
-		*device_type = (int)((info >> 24) & 0xff);
-		*facilities = (unsigned int)(info >> 32);
-		/* Update maximum domain id */
-		nd = (info >> 16) & 0xff;
-		/* if N bit is available, z13 and newer */
-		if ((info & (1UL << 57)) && nd > 0)
-			ap_max_domain_id = nd;
-		else /* older machine types */
-			ap_max_domain_id = 15;
-		switch (*device_type) {
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+	case AP_RESPONSE_DECONFIGURED:
+	case AP_RESPONSE_CHECKSTOPPED:
+	case AP_RESPONSE_BUSY:
+		/*
+		 * According to the architecture in all these cases the
+		 * info should be filled. All bits 0 is not possible as
+		 * there is at least one of the mode bits set.
+		 */
+		if (WARN_ON_ONCE(!info))
+			return false;
+		*q_type = (int)((info >> 24) & 0xff);
+		*q_fac = (unsigned int)(info >> 32);
+		*q_depth = (int)(info & 0xff);
+		*q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
+		switch (*q_type) {
 			/* For CEX2 and CEX3 the available functions
 			 * are not reflected by the facilities bits.
 			 * Instead it is coded into the type. So here
@@ -323,45 +335,39 @@
 			 */
 		case AP_DEVICE_TYPE_CEX2A:
 		case AP_DEVICE_TYPE_CEX3A:
-			*facilities |= 0x08000000;
+			*q_fac |= 0x08000000;
 			break;
 		case AP_DEVICE_TYPE_CEX2C:
 		case AP_DEVICE_TYPE_CEX3C:
-			*facilities |= 0x10000000;
+			*q_fac |= 0x10000000;
 			break;
 		default:
 			break;
 		}
-		return 0;
-	case AP_RESPONSE_Q_NOT_AVAIL:
-	case AP_RESPONSE_DECONFIGURED:
-	case AP_RESPONSE_CHECKSTOPPED:
-	case AP_RESPONSE_INVALID_ADDRESS:
-		return -ENODEV;
-	case AP_RESPONSE_RESET_IN_PROGRESS:
-	case AP_RESPONSE_OTHERWISE_CHANGED:
-	case AP_RESPONSE_BUSY:
-		return -EBUSY;
+		return true;
 	default:
-		BUG();
+		/*
+		 * A response code which indicates, there is no info available.
+		 */
+		return false;
 	}
 }
 
-void ap_wait(enum ap_wait wait)
+void ap_wait(enum ap_sm_wait wait)
 {
 	ktime_t hr_time;
 
 	switch (wait) {
-	case AP_WAIT_AGAIN:
-	case AP_WAIT_INTERRUPT:
-		if (ap_using_interrupts())
+	case AP_SM_WAIT_AGAIN:
+	case AP_SM_WAIT_INTERRUPT:
+		if (ap_irq_flag)
 			break;
 		if (ap_poll_kthread) {
 			wake_up(&ap_poll_wait);
 			break;
 		}
-		/* Fall through */
-	case AP_WAIT_TIMEOUT:
+		fallthrough;
+	case AP_SM_WAIT_TIMEOUT:
 		spin_lock_bh(&ap_poll_timer_lock);
 		if (!hrtimer_is_queued(&ap_poll_timer)) {
 			hr_time = poll_timeout;
@@ -370,7 +376,7 @@
 		}
 		spin_unlock_bh(&ap_poll_timer_lock);
 		break;
-	case AP_WAIT_NONE:
+	case AP_SM_WAIT_NONE:
 	default:
 		break;
 	}
@@ -386,10 +392,8 @@
 {
 	struct ap_queue *aq = from_timer(aq, t, timeout);
 
-	if (ap_suspend_flag)
-		return;
 	spin_lock_bh(&aq->lock);
-	ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT));
+	ap_wait(ap_sm_event(aq, AP_SM_EVENT_TIMEOUT));
 	spin_unlock_bh(&aq->lock);
 }
 
@@ -401,8 +405,7 @@
  */
 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
 {
-	if (!ap_suspend_flag)
-		tasklet_schedule(&ap_tasklet);
+	tasklet_schedule(&ap_tasklet);
 	return HRTIMER_NORESTART;
 }
 
@@ -413,8 +416,7 @@
 static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
 {
 	inc_irq_stat(IRQIO_APB);
-	if (!ap_suspend_flag)
-		tasklet_schedule(&ap_tasklet);
+	tasklet_schedule(&ap_tasklet);
 }
 
 /**
@@ -425,45 +427,41 @@
  */
 static void ap_tasklet_fn(unsigned long dummy)
 {
-	struct ap_card *ac;
+	int bkt;
 	struct ap_queue *aq;
-	enum ap_wait wait = AP_WAIT_NONE;
+	enum ap_sm_wait wait = AP_SM_WAIT_NONE;
 
 	/* Reset the indicator if interrupts are used. Thus new interrupts can
 	 * be received. Doing it in the beginning of the tasklet is therefor
 	 * important that no requests on any AP get lost.
 	 */
-	if (ap_using_interrupts())
+	if (ap_irq_flag)
 		xchg(ap_airq.lsi_ptr, 0);
 
-	spin_lock_bh(&ap_list_lock);
-	for_each_ap_card(ac) {
-		for_each_ap_queue(aq, ac) {
-			spin_lock_bh(&aq->lock);
-			wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL));
-			spin_unlock_bh(&aq->lock);
-		}
+	spin_lock_bh(&ap_queues_lock);
+	hash_for_each(ap_queues, bkt, aq, hnode) {
+		spin_lock_bh(&aq->lock);
+		wait = min(wait, ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
+		spin_unlock_bh(&aq->lock);
 	}
-	spin_unlock_bh(&ap_list_lock);
+	spin_unlock_bh(&ap_queues_lock);
 
 	ap_wait(wait);
 }
 
 static int ap_pending_requests(void)
 {
-	struct ap_card *ac;
+	int bkt;
 	struct ap_queue *aq;
 
-	spin_lock_bh(&ap_list_lock);
-	for_each_ap_card(ac) {
-		for_each_ap_queue(aq, ac) {
-			if (aq->queue_count == 0)
-				continue;
-			spin_unlock_bh(&ap_list_lock);
-			return 1;
-		}
+	spin_lock_bh(&ap_queues_lock);
+	hash_for_each(ap_queues, bkt, aq, hnode) {
+		if (aq->queue_count == 0)
+			continue;
+		spin_unlock_bh(&ap_queues_lock);
+		return 1;
 	}
-	spin_unlock_bh(&ap_list_lock);
+	spin_unlock_bh(&ap_queues_lock);
 	return 0;
 }
 
@@ -486,7 +484,7 @@
 	while (!kthread_should_stop()) {
 		add_wait_queue(&ap_poll_wait, &wait);
 		set_current_state(TASK_INTERRUPTIBLE);
-		if (ap_suspend_flag || !ap_pending_requests()) {
+		if (!ap_pending_requests()) {
 			schedule();
 			try_to_freeze();
 		}
@@ -507,7 +505,7 @@
 {
 	int rc;
 
-	if (ap_using_interrupts() || ap_poll_kthread)
+	if (ap_irq_flag || ap_poll_kthread)
 		return 0;
 	mutex_lock(&ap_poll_thread_mutex);
 	ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
@@ -587,51 +585,6 @@
 	return retval;
 }
 
-static int ap_dev_suspend(struct device *dev)
-{
-	struct ap_device *ap_dev = to_ap_dev(dev);
-
-	if (ap_dev->drv && ap_dev->drv->suspend)
-		ap_dev->drv->suspend(ap_dev);
-	return 0;
-}
-
-static int ap_dev_resume(struct device *dev)
-{
-	struct ap_device *ap_dev = to_ap_dev(dev);
-
-	if (ap_dev->drv && ap_dev->drv->resume)
-		ap_dev->drv->resume(ap_dev);
-	return 0;
-}
-
-static void ap_bus_suspend(void)
-{
-	AP_DBF(DBF_DEBUG, "%s running\n", __func__);
-
-	ap_suspend_flag = 1;
-	/*
-	 * Disable scanning for devices, thus we do not want to scan
-	 * for them after removing.
-	 */
-	flush_work(&ap_scan_work);
-	tasklet_disable(&ap_tasklet);
-}
-
-static int __ap_card_devices_unregister(struct device *dev, void *dummy)
-{
-	if (is_card_dev(dev))
-		device_unregister(dev);
-	return 0;
-}
-
-static int __ap_queue_devices_unregister(struct device *dev, void *dummy)
-{
-	if (is_queue_dev(dev))
-		device_unregister(dev);
-	return 0;
-}
-
 static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
 {
 	if (is_queue_dev(dev) &&
@@ -640,67 +593,10 @@
 	return 0;
 }
 
-static void ap_bus_resume(void)
-{
-	int rc;
-
-	AP_DBF(DBF_DEBUG, "%s running\n", __func__);
-
-	/* remove all queue devices */
-	bus_for_each_dev(&ap_bus_type, NULL, NULL,
-			 __ap_queue_devices_unregister);
-	/* remove all card devices */
-	bus_for_each_dev(&ap_bus_type, NULL, NULL,
-			 __ap_card_devices_unregister);
-
-	/* Reset thin interrupt setting */
-	if (ap_interrupts_available() && !ap_using_interrupts()) {
-		rc = register_adapter_interrupt(&ap_airq);
-		ap_airq_flag = (rc == 0);
-	}
-	if (!ap_interrupts_available() && ap_using_interrupts()) {
-		unregister_adapter_interrupt(&ap_airq);
-		ap_airq_flag = 0;
-	}
-	/* Reset domain */
-	if (!user_set_domain)
-		ap_domain_index = -1;
-	/* Get things going again */
-	ap_suspend_flag = 0;
-	if (ap_airq_flag)
-		xchg(ap_airq.lsi_ptr, 0);
-	tasklet_enable(&ap_tasklet);
-	queue_work(system_long_wq, &ap_scan_work);
-}
-
-static int ap_power_event(struct notifier_block *this, unsigned long event,
-			  void *ptr)
-{
-	switch (event) {
-	case PM_HIBERNATION_PREPARE:
-	case PM_SUSPEND_PREPARE:
-		ap_bus_suspend();
-		break;
-	case PM_POST_HIBERNATION:
-	case PM_POST_SUSPEND:
-		ap_bus_resume();
-		break;
-	default:
-		break;
-	}
-	return NOTIFY_DONE;
-}
-static struct notifier_block ap_power_notifier = {
-	.notifier_call = ap_power_event,
-};
-
-static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, ap_dev_resume);
-
 static struct bus_type ap_bus_type = {
 	.name = "ap",
 	.match = &ap_bus_match,
 	.uevent = &ap_uevent,
-	.pm = &ap_bus_pm_ops,
 };
 
 static int __ap_revise_reserved(struct device *dev, void *dummy)
@@ -717,8 +613,8 @@
 		drvres = to_ap_drv(dev->driver)->flags
 			& AP_DRIVER_FLAG_DEFAULT;
 		if (!!devres != !!drvres) {
-			AP_DBF(DBF_DEBUG, "reprobing queue=%02x.%04x\n",
-			       card, queue);
+			AP_DBF_DBG("reprobing queue=%02x.%04x\n",
+				   card, queue);
 			rc = device_reprobe(dev);
 		}
 	}
@@ -775,7 +671,10 @@
 {
 	struct ap_device *ap_dev = to_ap_dev(dev);
 	struct ap_driver *ap_drv = to_ap_drv(dev->driver);
-	int card, queue, devres, drvres, rc;
+	int card, queue, devres, drvres, rc = -ENODEV;
+
+	if (!get_device(dev))
+		return rc;
 
 	if (is_queue_dev(dev)) {
 		/*
@@ -792,31 +691,30 @@
 		mutex_unlock(&ap_perms_mutex);
 		drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
 		if (!!devres != !!drvres)
-			return -ENODEV;
+			goto out;
 	}
 
 	/* Add queue/card to list of active queues/cards */
-	spin_lock_bh(&ap_list_lock);
-	if (is_card_dev(dev))
-		list_add(&to_ap_card(dev)->list, &ap_card_list);
-	else
-		list_add(&to_ap_queue(dev)->list,
-			 &to_ap_queue(dev)->card->queues);
-	spin_unlock_bh(&ap_list_lock);
+	spin_lock_bh(&ap_queues_lock);
+	if (is_queue_dev(dev))
+		hash_add(ap_queues, &to_ap_queue(dev)->hnode,
+			 to_ap_queue(dev)->qid);
+	spin_unlock_bh(&ap_queues_lock);
 
 	ap_dev->drv = ap_drv;
 	rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
 
 	if (rc) {
-		spin_lock_bh(&ap_list_lock);
-		if (is_card_dev(dev))
-			list_del_init(&to_ap_card(dev)->list);
-		else
-			list_del_init(&to_ap_queue(dev)->list);
-		spin_unlock_bh(&ap_list_lock);
+		spin_lock_bh(&ap_queues_lock);
+		if (is_queue_dev(dev))
+			hash_del(&to_ap_queue(dev)->hnode);
+		spin_unlock_bh(&ap_queues_lock);
 		ap_dev->drv = NULL;
 	}
 
+out:
+	if (rc)
+		put_device(dev);
 	return rc;
 }
 
@@ -838,24 +736,40 @@
 		ap_queue_remove(to_ap_queue(dev));
 
 	/* Remove queue/card from list of active queues/cards */
-	spin_lock_bh(&ap_list_lock);
-	if (is_card_dev(dev))
-		list_del_init(&to_ap_card(dev)->list);
-	else
-		list_del_init(&to_ap_queue(dev)->list);
-	spin_unlock_bh(&ap_list_lock);
+	spin_lock_bh(&ap_queues_lock);
+	if (is_queue_dev(dev))
+		hash_del(&to_ap_queue(dev)->hnode);
+	spin_unlock_bh(&ap_queues_lock);
+
+	put_device(dev);
 
 	return 0;
 }
 
+struct ap_queue *ap_get_qdev(ap_qid_t qid)
+{
+	int bkt;
+	struct ap_queue *aq;
+
+	spin_lock_bh(&ap_queues_lock);
+	hash_for_each(ap_queues, bkt, aq, hnode) {
+		if (aq->qid == qid) {
+			get_device(&aq->ap_dev.device);
+			spin_unlock_bh(&ap_queues_lock);
+			return aq;
+		}
+	}
+	spin_unlock_bh(&ap_queues_lock);
+
+	return NULL;
+}
+EXPORT_SYMBOL(ap_get_qdev);
+
 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
 		       char *name)
 {
 	struct device_driver *drv = &ap_drv->driver;
 
-	if (!initialised)
-		return -ENODEV;
-
 	drv->bus = &ap_bus_type;
 	drv->probe = ap_device_probe;
 	drv->remove = ap_device_remove;
@@ -873,8 +787,6 @@
 
 void ap_bus_force_rescan(void)
 {
-	if (ap_suspend_flag)
-		return;
 	/* processing a asynchronous bus rescan */
 	del_timer(&ap_config_timer);
 	queue_work(system_long_wq, &ap_scan_work);
@@ -887,7 +799,7 @@
 */
 void ap_bus_cfg_chg(void)
 {
-	AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__);
+	AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__);
 
 	ap_bus_force_rescan();
 }
@@ -1021,7 +933,7 @@
 
 static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
 }
 
 static ssize_t ap_domain_store(struct bus_type *bus,
@@ -1033,11 +945,12 @@
 	    domain < 0 || domain > ap_max_domain_id ||
 	    !test_bit_inv(domain, ap_perms.aqm))
 		return -EINVAL;
+
 	spin_lock_bh(&ap_domain_lock);
 	ap_domain_index = domain;
 	spin_unlock_bh(&ap_domain_lock);
 
-	AP_DBF(DBF_DEBUG, "stored new default domain=%d\n", domain);
+	AP_DBF_INFO("stored new default domain=%d\n", domain);
 
 	return count;
 }
@@ -1046,60 +959,60 @@
 
 static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
 {
-	if (!ap_configuration)	/* QCI not supported */
-		return snprintf(buf, PAGE_SIZE, "not supported\n");
+	if (!ap_qci_info)	/* QCI not supported */
+		return scnprintf(buf, PAGE_SIZE, "not supported\n");
 
-	return snprintf(buf, PAGE_SIZE,
-			"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
-			ap_configuration->adm[0], ap_configuration->adm[1],
-			ap_configuration->adm[2], ap_configuration->adm[3],
-			ap_configuration->adm[4], ap_configuration->adm[5],
-			ap_configuration->adm[6], ap_configuration->adm[7]);
+	return scnprintf(buf, PAGE_SIZE,
+			 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+			 ap_qci_info->adm[0], ap_qci_info->adm[1],
+			 ap_qci_info->adm[2], ap_qci_info->adm[3],
+			 ap_qci_info->adm[4], ap_qci_info->adm[5],
+			 ap_qci_info->adm[6], ap_qci_info->adm[7]);
 }
 
 static BUS_ATTR_RO(ap_control_domain_mask);
 
 static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
 {
-	if (!ap_configuration)	/* QCI not supported */
-		return snprintf(buf, PAGE_SIZE, "not supported\n");
+	if (!ap_qci_info)	/* QCI not supported */
+		return scnprintf(buf, PAGE_SIZE, "not supported\n");
 
-	return snprintf(buf, PAGE_SIZE,
-			"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
-			ap_configuration->aqm[0], ap_configuration->aqm[1],
-			ap_configuration->aqm[2], ap_configuration->aqm[3],
-			ap_configuration->aqm[4], ap_configuration->aqm[5],
-			ap_configuration->aqm[6], ap_configuration->aqm[7]);
+	return scnprintf(buf, PAGE_SIZE,
+			 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+			 ap_qci_info->aqm[0], ap_qci_info->aqm[1],
+			 ap_qci_info->aqm[2], ap_qci_info->aqm[3],
+			 ap_qci_info->aqm[4], ap_qci_info->aqm[5],
+			 ap_qci_info->aqm[6], ap_qci_info->aqm[7]);
 }
 
 static BUS_ATTR_RO(ap_usage_domain_mask);
 
 static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf)
 {
-	if (!ap_configuration)	/* QCI not supported */
-		return snprintf(buf, PAGE_SIZE, "not supported\n");
+	if (!ap_qci_info)	/* QCI not supported */
+		return scnprintf(buf, PAGE_SIZE, "not supported\n");
 
-	return snprintf(buf, PAGE_SIZE,
-			"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
-			ap_configuration->apm[0], ap_configuration->apm[1],
-			ap_configuration->apm[2], ap_configuration->apm[3],
-			ap_configuration->apm[4], ap_configuration->apm[5],
-			ap_configuration->apm[6], ap_configuration->apm[7]);
+	return scnprintf(buf, PAGE_SIZE,
+			 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+			 ap_qci_info->apm[0], ap_qci_info->apm[1],
+			 ap_qci_info->apm[2], ap_qci_info->apm[3],
+			 ap_qci_info->apm[4], ap_qci_info->apm[5],
+			 ap_qci_info->apm[6], ap_qci_info->apm[7]);
 }
 
 static BUS_ATTR_RO(ap_adapter_mask);
 
 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-			ap_using_interrupts() ? 1 : 0);
+	return scnprintf(buf, PAGE_SIZE, "%d\n",
+			 ap_irq_flag ? 1 : 0);
 }
 
 static BUS_ATTR_RO(ap_interrupts);
 
 static ssize_t config_time_show(struct bus_type *bus, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
 }
 
 static ssize_t config_time_store(struct bus_type *bus,
@@ -1118,7 +1031,7 @@
 
 static ssize_t poll_thread_show(struct bus_type *bus, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
 }
 
 static ssize_t poll_thread_store(struct bus_type *bus,
@@ -1141,7 +1054,7 @@
 
 static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
 }
 
 static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
@@ -1170,27 +1083,28 @@
 
 static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
 {
-	int max_domain_id;
-
-	if (ap_configuration)
-		max_domain_id = ap_max_domain_id ? : -1;
-	else
-		max_domain_id = 15;
-	return snprintf(buf, PAGE_SIZE, "%d\n", max_domain_id);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_domain_id);
 }
 
 static BUS_ATTR_RO(ap_max_domain_id);
 
+static ssize_t ap_max_adapter_id_show(struct bus_type *bus, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_adapter_id);
+}
+
+static BUS_ATTR_RO(ap_max_adapter_id);
+
 static ssize_t apmask_show(struct bus_type *bus, char *buf)
 {
 	int rc;
 
 	if (mutex_lock_interruptible(&ap_perms_mutex))
 		return -ERESTARTSYS;
-	rc = snprintf(buf, PAGE_SIZE,
-		      "0x%016lx%016lx%016lx%016lx\n",
-		      ap_perms.apm[0], ap_perms.apm[1],
-		      ap_perms.apm[2], ap_perms.apm[3]);
+	rc = scnprintf(buf, PAGE_SIZE,
+		       "0x%016lx%016lx%016lx%016lx\n",
+		       ap_perms.apm[0], ap_perms.apm[1],
+		       ap_perms.apm[2], ap_perms.apm[3]);
 	mutex_unlock(&ap_perms_mutex);
 
 	return rc;
@@ -1218,10 +1132,10 @@
 
 	if (mutex_lock_interruptible(&ap_perms_mutex))
 		return -ERESTARTSYS;
-	rc = snprintf(buf, PAGE_SIZE,
-		      "0x%016lx%016lx%016lx%016lx\n",
-		      ap_perms.aqm[0], ap_perms.aqm[1],
-		      ap_perms.aqm[2], ap_perms.aqm[3]);
+	rc = scnprintf(buf, PAGE_SIZE,
+		       "0x%016lx%016lx%016lx%016lx\n",
+		       ap_perms.aqm[0], ap_perms.aqm[1],
+		       ap_perms.aqm[2], ap_perms.aqm[3]);
 	mutex_unlock(&ap_perms_mutex);
 
 	return rc;
@@ -1253,6 +1167,7 @@
 	&bus_attr_ap_interrupts,
 	&bus_attr_poll_timeout,
 	&bus_attr_ap_max_domain_id,
+	&bus_attr_ap_max_adapter_id,
 	&bus_attr_apmask,
 	&bus_attr_aqmask,
 	NULL,
@@ -1264,47 +1179,42 @@
  */
 static void ap_select_domain(void)
 {
-	int count, max_count, best_domain;
 	struct ap_queue_status status;
-	int i, j;
+	int card, dom;
 
 	/*
-	 * We want to use a single domain. Either the one specified with
-	 * the "domain=" parameter or the domain with the maximum number
-	 * of devices.
+	 * Choose the default domain. Either the one specified with
+	 * the "domain=" parameter or the first domain with at least
+	 * one valid APQN.
 	 */
 	spin_lock_bh(&ap_domain_lock);
 	if (ap_domain_index >= 0) {
 		/* Domain has already been selected. */
-		spin_unlock_bh(&ap_domain_lock);
-		return;
+		goto out;
 	}
-	best_domain = -1;
-	max_count = 0;
-	for (i = 0; i < AP_DOMAINS; i++) {
-		if (!ap_test_config_usage_domain(i) ||
-		    !test_bit_inv(i, ap_perms.aqm))
+	for (dom = 0; dom <= ap_max_domain_id; dom++) {
+		if (!ap_test_config_usage_domain(dom) ||
+		    !test_bit_inv(dom, ap_perms.aqm))
 			continue;
-		count = 0;
-		for (j = 0; j < AP_DEVICES; j++) {
-			if (!ap_test_config_card_id(j))
+		for (card = 0; card <= ap_max_adapter_id; card++) {
+			if (!ap_test_config_card_id(card) ||
+			    !test_bit_inv(card, ap_perms.apm))
 				continue;
-			status = ap_test_queue(AP_MKQID(j, i),
+			status = ap_test_queue(AP_MKQID(card, dom),
 					       ap_apft_available(),
 					       NULL);
-			if (status.response_code != AP_RESPONSE_NORMAL)
-				continue;
-			count++;
+			if (status.response_code == AP_RESPONSE_NORMAL)
+				break;
 		}
-		if (count > max_count) {
-			max_count = count;
-			best_domain = i;
-		}
+		if (card <= ap_max_adapter_id)
+			break;
 	}
-	if (best_domain >= 0) {
-		ap_domain_index = best_domain;
-		AP_DBF(DBF_DEBUG, "new ap_domain_index=%d\n", ap_domain_index);
+	if (dom <= ap_max_domain_id) {
+		ap_domain_index = dom;
+		AP_DBF_INFO("%s new default domain is %d\n",
+			    __func__, ap_domain_index);
 	}
+out:
 	spin_unlock_bh(&ap_domain_lock);
 }
 
@@ -1318,8 +1228,11 @@
 	int comp_type = 0;
 
 	/* < CEX2A is not supported */
-	if (rawtype < AP_DEVICE_TYPE_CEX2A)
+	if (rawtype < AP_DEVICE_TYPE_CEX2A) {
+		AP_DBF_WARN("get_comp_type queue=%02x.%04x unsupported type %d\n",
+			    AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
 		return 0;
+	}
 	/* up to CEX7 known and fully supported */
 	if (rawtype <= AP_DEVICE_TYPE_CEX7)
 		return rawtype;
@@ -1341,11 +1254,12 @@
 			comp_type = apinfo.cat;
 	}
 	if (!comp_type)
-		AP_DBF(DBF_WARN, "queue=%02x.%04x unable to map type %d\n",
-		       AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
+		AP_DBF_WARN("get_comp_type queue=%02x.%04x unable to map type %d\n",
+			    AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
 	else if (comp_type != rawtype)
-		AP_DBF(DBF_INFO, "queue=%02x.%04x map type %d to %d\n",
-		       AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype, comp_type);
+		AP_DBF_INFO("get_comp_type queue=%02x.%04x map type %d to %d\n",
+			    AP_QID_CARD(qid), AP_QID_QUEUE(qid),
+			    rawtype, comp_type);
 	return comp_type;
 }
 
@@ -1379,156 +1293,280 @@
 
 /*
  * Helper function for ap_scan_bus().
- * Does the scan bus job for the given adapter id.
+ * Remove card device and associated queue devices.
  */
-static void _ap_scan_bus_adapter(int id)
+static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac)
 {
+	bus_for_each_dev(&ap_bus_type, NULL,
+			 (void *)(long) ac->id,
+			 __ap_queue_devices_with_id_unregister);
+	device_unregister(&ac->ap_dev.device);
+}
+
+/*
+ * Helper function for ap_scan_bus().
+ * Does the scan bus job for all the domains within
+ * a valid adapter given by an ap_card ptr.
+ */
+static inline void ap_scan_domains(struct ap_card *ac)
+{
+	bool decfg;
 	ap_qid_t qid;
 	unsigned int func;
-	struct ap_card *ac;
 	struct device *dev;
 	struct ap_queue *aq;
-	int rc, dom, depth, type, comp_type, borked;
-
-	/* check if there is a card device registered with this id */
-	dev = bus_find_device(&ap_bus_type, NULL,
-			      (void *)(long) id,
-			      __match_card_device_with_id);
-	ac = dev ? to_ap_card(dev) : NULL;
-	if (!ap_test_config_card_id(id)) {
-		if (dev) {
-			/* Card device has been removed from configuration */
-			bus_for_each_dev(&ap_bus_type, NULL,
-					 (void *)(long) id,
-					 __ap_queue_devices_with_id_unregister);
-			device_unregister(dev);
-			put_device(dev);
-		}
-		return;
-	}
+	int rc, dom, depth, type;
 
 	/*
-	 * This card id is enabled in the configuration. If we already have
-	 * a card device with this id, check if type and functions are still
-	 * the very same. Also verify that at least one queue is available.
+	 * Go through the configuration for the domains and compare them
+	 * to the existing queue devices. Also take care of the config
+	 * and error state for the queue devices.
 	 */
-	if (ac) {
-		/* find the first valid queue */
-		for (dom = 0; dom < AP_DOMAINS; dom++) {
-			qid = AP_MKQID(id, dom);
-			if (ap_query_queue(qid, &depth, &type, &func) == 0)
-				break;
-		}
-		borked = 0;
-		if (dom >= AP_DOMAINS) {
-			/* no accessible queue on this card */
-			borked = 1;
-		} else if (ac->raw_hwtype != type) {
-			/* card type has changed */
-			AP_DBF(DBF_INFO, "card=%02x type changed.\n", id);
-			borked = 1;
-		} else if (ac->functions != func) {
-			/* card functions have changed */
-			AP_DBF(DBF_INFO, "card=%02x functions changed.\n", id);
-			borked = 1;
-		}
-		if (borked) {
-			/* unregister card device and associated queues */
-			bus_for_each_dev(&ap_bus_type, NULL,
-					 (void *)(long) id,
-					 __ap_queue_devices_with_id_unregister);
-			device_unregister(dev);
-			put_device(dev);
-			/* go back if there is no valid queue on this card */
-			if (dom >= AP_DOMAINS)
-				return;
-			ac = NULL;
-		}
-	}
 
-	/*
-	 * Go through all possible queue ids. Check and maybe create or release
-	 * queue devices for this card. If there exists no card device yet,
-	 * create a card device also.
-	 */
-	for (dom = 0; dom < AP_DOMAINS; dom++) {
-		qid = AP_MKQID(id, dom);
+	for (dom = 0; dom <= ap_max_domain_id; dom++) {
+		qid = AP_MKQID(ac->id, dom);
 		dev = bus_find_device(&ap_bus_type, NULL,
 				      (void *)(long) qid,
 				      __match_queue_device_with_qid);
 		aq = dev ? to_ap_queue(dev) : NULL;
 		if (!ap_test_config_usage_domain(dom)) {
 			if (dev) {
-				/* Queue device exists but has been
-				 * removed from configuration.
-				 */
+				AP_DBF_INFO("%s(%d,%d) not in config any more, rm queue device\n",
+					    __func__, ac->id, dom);
 				device_unregister(dev);
 				put_device(dev);
 			}
 			continue;
 		}
-		/* try to fetch infos about this queue */
-		rc = ap_query_queue(qid, &depth, &type, &func);
-		if (dev) {
-			if (rc == -ENODEV)
-				borked = 1;
-			else {
-				spin_lock_bh(&aq->lock);
-				borked = aq->state == AP_STATE_BORKED;
-				spin_unlock_bh(&aq->lock);
-			}
-			if (borked) {
-				/* Remove broken device */
-				AP_DBF(DBF_DEBUG,
-				       "removing broken queue=%02x.%04x\n",
-				       id, dom);
+		/* domain is valid, get info from this APQN */
+		if (!ap_queue_info(qid, &type, &func, &depth, &decfg)) {
+			if (aq) {
+				AP_DBF_INFO(
+					"%s(%d,%d) ap_queue_info() not successful, rm queue device\n",
+					__func__, ac->id, dom);
 				device_unregister(dev);
+				put_device(dev);
 			}
-			put_device(dev);
 			continue;
 		}
-		if (rc)
-			continue;
-		/* a new queue device is needed, check out comp type */
-		comp_type = ap_get_compatible_type(qid, type, func);
-		if (!comp_type)
-			continue;
-		/* maybe a card device needs to be created first */
-		if (!ac) {
-			ac = ap_card_create(id, depth, type, comp_type, func);
-			if (!ac)
+		/* if no queue device exists, create a new one */
+		if (!aq) {
+			aq = ap_queue_create(qid, ac->ap_dev.device_type);
+			if (!aq) {
+				AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n",
+					    __func__, ac->id, dom);
 				continue;
-			ac->ap_dev.device.bus = &ap_bus_type;
-			ac->ap_dev.device.parent = ap_root_device;
-			dev_set_name(&ac->ap_dev.device, "card%02x", id);
-			/* Register card device with AP bus */
-			rc = device_register(&ac->ap_dev.device);
+			}
+			aq->card = ac;
+			aq->config = !decfg;
+			dev = &aq->ap_dev.device;
+			dev->bus = &ap_bus_type;
+			dev->parent = &ac->ap_dev.device;
+			dev_set_name(dev, "%02x.%04x", ac->id, dom);
+			/* register queue device */
+			rc = device_register(dev);
 			if (rc) {
-				put_device(&ac->ap_dev.device);
-				ac = NULL;
-				break;
+				AP_DBF_WARN("%s(%d,%d) device_register() failed\n",
+					    __func__, ac->id, dom);
+				goto put_dev_and_continue;
 			}
 			/* get it and thus adjust reference counter */
-			get_device(&ac->ap_dev.device);
+			get_device(dev);
+			if (decfg)
+				AP_DBF_INFO("%s(%d,%d) new (decfg) queue device created\n",
+					    __func__, ac->id, dom);
+			else
+				AP_DBF_INFO("%s(%d,%d) new queue device created\n",
+					    __func__, ac->id, dom);
+			goto put_dev_and_continue;
 		}
-		/* now create the new queue device */
-		aq = ap_queue_create(qid, comp_type);
-		if (!aq)
-			continue;
-		aq->card = ac;
-		aq->ap_dev.device.bus = &ap_bus_type;
-		aq->ap_dev.device.parent = &ac->ap_dev.device;
-		dev_set_name(&aq->ap_dev.device, "%02x.%04x", id, dom);
-		/* Register queue device */
-		rc = device_register(&aq->ap_dev.device);
-		if (rc) {
-			put_device(&aq->ap_dev.device);
-			continue;
+		/* Check config state on the already existing queue device */
+		spin_lock_bh(&aq->lock);
+		if (decfg && aq->config) {
+			/* config off this queue device */
+			aq->config = false;
+			if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+				aq->dev_state = AP_DEV_STATE_ERROR;
+				aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
+			}
+			spin_unlock_bh(&aq->lock);
+			AP_DBF_INFO("%s(%d,%d) queue device config off\n",
+				    __func__, ac->id, dom);
+			/* 'receive' pending messages with -EAGAIN */
+			ap_flush_queue(aq);
+			goto put_dev_and_continue;
 		}
-	} /* end domain loop */
+		if (!decfg && !aq->config) {
+			/* config on this queue device */
+			aq->config = true;
+			if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+				aq->dev_state = AP_DEV_STATE_OPERATING;
+				aq->sm_state = AP_SM_STATE_RESET_START;
+			}
+			spin_unlock_bh(&aq->lock);
+			AP_DBF_INFO("%s(%d,%d) queue device config on\n",
+				    __func__, ac->id, dom);
+			goto put_dev_and_continue;
+		}
+		/* handle other error states */
+		if (!decfg && aq->dev_state == AP_DEV_STATE_ERROR) {
+			spin_unlock_bh(&aq->lock);
+			/* 'receive' pending messages with -EAGAIN */
+			ap_flush_queue(aq);
+			/* re-init (with reset) the queue device */
+			ap_queue_init_state(aq);
+			AP_DBF_INFO("%s(%d,%d) queue device reinit enforced\n",
+				    __func__, ac->id, dom);
+			goto put_dev_and_continue;
+		}
+		spin_unlock_bh(&aq->lock);
+put_dev_and_continue:
+		put_device(dev);
+	}
+}
 
-	if (ac)
-		put_device(&ac->ap_dev.device);
+/*
+ * Helper function for ap_scan_bus().
+ * Does the scan bus job for the given adapter id.
+ */
+static inline void ap_scan_adapter(int ap)
+{
+	bool decfg;
+	ap_qid_t qid;
+	unsigned int func;
+	struct device *dev;
+	struct ap_card *ac;
+	int rc, dom, depth, type, comp_type;
+
+	/* Is there currently a card device for this adapter ? */
+	dev = bus_find_device(&ap_bus_type, NULL,
+			      (void *)(long) ap,
+			      __match_card_device_with_id);
+	ac = dev ? to_ap_card(dev) : NULL;
+
+	/* Adapter not in configuration ? */
+	if (!ap_test_config_card_id(ap)) {
+		if (ac) {
+			AP_DBF_INFO("%s(%d) ap not in config any more, rm card and queue devices\n",
+				    __func__, ap);
+			ap_scan_rm_card_dev_and_queue_devs(ac);
+			put_device(dev);
+		}
+		return;
+	}
+
+	/*
+	 * Adapter ap is valid in the current configuration. So do some checks:
+	 * If no card device exists, build one. If a card device exists, check
+	 * for type and functions changed. For all this we need to find a valid
+	 * APQN first.
+	 */
+
+	for (dom = 0; dom <= ap_max_domain_id; dom++)
+		if (ap_test_config_usage_domain(dom)) {
+			qid = AP_MKQID(ap, dom);
+			if (ap_queue_info(qid, &type, &func, &depth, &decfg))
+				break;
+		}
+	if (dom > ap_max_domain_id) {
+		/* Could not find a valid APQN for this adapter */
+		if (ac) {
+			AP_DBF_INFO(
+				"%s(%d) no type info (no APQN found), rm card and queue devices\n",
+				__func__, ap);
+			ap_scan_rm_card_dev_and_queue_devs(ac);
+			put_device(dev);
+		} else {
+			AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n",
+				   __func__, ap);
+		}
+		return;
+	}
+	if (!type) {
+		/* No apdater type info available, an unusable adapter */
+		if (ac) {
+			AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devices\n",
+				    __func__, ap);
+			ap_scan_rm_card_dev_and_queue_devs(ac);
+			put_device(dev);
+		} else {
+			AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n",
+				   __func__, ap);
+		}
+		return;
+	}
+
+	if (ac) {
+		/* Check APQN against existing card device for changes */
+		if (ac->raw_hwtype != type) {
+			AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devices\n",
+				    __func__, ap, type);
+			ap_scan_rm_card_dev_and_queue_devs(ac);
+			put_device(dev);
+			ac = NULL;
+		} else if (ac->functions != func) {
+			AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devices\n",
+				    __func__, ap, type);
+			ap_scan_rm_card_dev_and_queue_devs(ac);
+			put_device(dev);
+			ac = NULL;
+		} else {
+			if (decfg && ac->config) {
+				ac->config = false;
+				AP_DBF_INFO("%s(%d) card device config off\n",
+					    __func__, ap);
+
+			}
+			if (!decfg && !ac->config) {
+				ac->config = true;
+				AP_DBF_INFO("%s(%d) card device config on\n",
+					    __func__, ap);
+			}
+		}
+	}
+
+	if (!ac) {
+		/* Build a new card device */
+		comp_type = ap_get_compatible_type(qid, type, func);
+		if (!comp_type) {
+			AP_DBF_WARN("%s(%d) type %d, can't get compatibility type\n",
+				    __func__, ap, type);
+			return;
+		}
+		ac = ap_card_create(ap, depth, type, comp_type, func);
+		if (!ac) {
+			AP_DBF_WARN("%s(%d) ap_card_create() failed\n",
+				    __func__, ap);
+			return;
+		}
+		ac->config = !decfg;
+		dev = &ac->ap_dev.device;
+		dev->bus = &ap_bus_type;
+		dev->parent = ap_root_device;
+		dev_set_name(dev, "card%02x", ap);
+		/* Register the new card device with AP bus */
+		rc = device_register(dev);
+		if (rc) {
+			AP_DBF_WARN("%s(%d) device_register() failed\n",
+				    __func__, ap);
+			put_device(dev);
+			return;
+		}
+		/* get it and thus adjust reference counter */
+		get_device(dev);
+		if (decfg)
+			AP_DBF_INFO("%s(%d) new (decfg) card device type=%d func=0x%08x created\n",
+				    __func__, ap, type, func);
+		else
+			AP_DBF_INFO("%s(%d) new card device type=%d func=0x%08x created\n",
+				    __func__, ap, type, func);
+	}
+
+	/* Verify the domains and the queue devices for this card */
+	ap_scan_domains(ac);
+
+	/* release the card device */
+	put_device(&ac->ap_dev.device);
 }
 
 /**
@@ -1537,16 +1575,16 @@
  */
 static void ap_scan_bus(struct work_struct *unused)
 {
-	int id;
+	int ap;
 
-	AP_DBF(DBF_DEBUG, "%s running\n", __func__);
-
-	ap_query_configuration(ap_configuration);
+	ap_fetch_qci_info(ap_qci_info);
 	ap_select_domain();
 
+	AP_DBF_DBG("%s running\n", __func__);
+
 	/* loop over all possible adapters */
-	for (id = 0; id < AP_DEVICES; id++)
-		_ap_scan_bus_adapter(id);
+	for (ap = 0; ap <= ap_max_adapter_id; ap++)
+		ap_scan_adapter(ap);
 
 	/* check if there is at least one queue available with default domain */
 	if (ap_domain_index >= 0) {
@@ -1557,9 +1595,8 @@
 		if (dev)
 			put_device(dev);
 		else
-			AP_DBF(DBF_INFO,
-			       "no queue device with default domain %d available\n",
-			       ap_domain_index);
+			AP_DBF_INFO("no queue device with default domain %d available\n",
+				    ap_domain_index);
 	}
 
 	mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
@@ -1567,8 +1604,6 @@
 
 static void ap_config_timeout(struct timer_list *unused)
 {
-	if (ap_suspend_flag)
-		return;
 	queue_work(system_long_wq, &ap_scan_work);
 }
 
@@ -1611,7 +1646,6 @@
  */
 static int __init ap_module_init(void)
 {
-	int max_domain_id;
 	int rc, i;
 
 	rc = ap_debug_init();
@@ -1623,33 +1657,28 @@
 		return -ENODEV;
 	}
 
+	/* init ap_queue hashtable */
+	hash_init(ap_queues);
+
 	/* set up the AP permissions (ioctls, ap and aq masks) */
 	ap_perms_init();
 
 	/* Get AP configuration data if available */
-	ap_init_configuration();
+	ap_init_qci_info();
 
-	if (ap_configuration)
-		max_domain_id =
-			ap_max_domain_id ? ap_max_domain_id : AP_DOMAINS - 1;
-	else
-		max_domain_id = 15;
-	if (ap_domain_index < -1 || ap_domain_index > max_domain_id ||
+	/* check default domain setting */
+	if (ap_domain_index < -1 || ap_domain_index > ap_max_domain_id ||
 	    (ap_domain_index >= 0 &&
 	     !test_bit_inv(ap_domain_index, ap_perms.aqm))) {
 		pr_warn("%d is not a valid cryptographic domain\n",
 			ap_domain_index);
 		ap_domain_index = -1;
 	}
-	/* In resume callback we need to know if the user had set the domain.
-	 * If so, we can not just reset it.
-	 */
-	if (ap_domain_index >= 0)
-		user_set_domain = 1;
 
+	/* enable interrupts if available */
 	if (ap_interrupts_available()) {
 		rc = register_adapter_interrupt(&ap_airq);
-		ap_airq_flag = (rc == 0);
+		ap_irq_flag = (rc == 0);
 	}
 
 	/* Create /sys/bus/ap. */
@@ -1677,7 +1706,6 @@
 	 */
 	if (MACHINE_IS_VM)
 		poll_timeout = 1500000;
-	spin_lock_init(&ap_poll_timer_lock);
 	hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 	ap_poll_timer.function = ap_poll_timeout;
 
@@ -1688,17 +1716,10 @@
 			goto out_work;
 	}
 
-	rc = register_pm_notifier(&ap_power_notifier);
-	if (rc)
-		goto out_pm;
-
 	queue_work(system_long_wq, &ap_scan_work);
-	initialised = true;
 
 	return 0;
 
-out_pm:
-	ap_poll_thread_stop();
 out_work:
 	hrtimer_cancel(&ap_poll_timer);
 	root_device_unregister(ap_root_device);
@@ -1707,9 +1728,9 @@
 		bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
 	bus_unregister(&ap_bus_type);
 out:
-	if (ap_using_interrupts())
+	if (ap_irq_flag)
 		unregister_adapter_interrupt(&ap_airq);
-	kfree(ap_configuration);
+	kfree(ap_qci_info);
 	return rc;
 }
 device_initcall(ap_module_init);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 4348fdf..ccdbd95 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -15,6 +15,7 @@
 
 #include <linux/device.h>
 #include <linux/types.h>
+#include <linux/hashtable.h>
 #include <asm/isc.h>
 #include <asm/ap.h>
 
@@ -27,8 +28,8 @@
 
 extern int ap_domain_index;
 
-extern spinlock_t ap_list_lock;
-extern struct list_head ap_card_list;
+extern DECLARE_HASHTABLE(ap_queues, 8);
+extern spinlock_t ap_queues_lock;
 
 static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
 {
@@ -49,6 +50,7 @@
 #define AP_RESPONSE_NO_FIRST_PART	0x13
 #define AP_RESPONSE_MESSAGE_TOO_BIG	0x15
 #define AP_RESPONSE_REQ_FAC_NOT_INST	0x16
+#define AP_RESPONSE_INVALID_DOMAIN	0x42
 
 /*
  * Known device types
@@ -76,46 +78,47 @@
 #define AP_FUNC_APXA  6
 
 /*
- * AP interrupt states
+ * AP queue state machine states
  */
-#define AP_INTR_DISABLED	0	/* AP interrupt disabled */
-#define AP_INTR_ENABLED		1	/* AP interrupt enabled */
-
-/*
- * AP device states
- */
-enum ap_state {
-	AP_STATE_RESET_START,
-	AP_STATE_RESET_WAIT,
-	AP_STATE_SETIRQ_WAIT,
-	AP_STATE_IDLE,
-	AP_STATE_WORKING,
-	AP_STATE_QUEUE_FULL,
-	AP_STATE_SUSPEND_WAIT,
-	AP_STATE_REMOVE,	/* about to be removed from driver */
-	AP_STATE_UNBOUND,	/* momentary not bound to a driver */
-	AP_STATE_BORKED,	/* broken */
-	NR_AP_STATES
+enum ap_sm_state {
+	AP_SM_STATE_RESET_START = 0,
+	AP_SM_STATE_RESET_WAIT,
+	AP_SM_STATE_SETIRQ_WAIT,
+	AP_SM_STATE_IDLE,
+	AP_SM_STATE_WORKING,
+	AP_SM_STATE_QUEUE_FULL,
+	NR_AP_SM_STATES
 };
 
 /*
- * AP device events
+ * AP queue state machine events
  */
-enum ap_event {
-	AP_EVENT_POLL,
-	AP_EVENT_TIMEOUT,
-	NR_AP_EVENTS
+enum ap_sm_event {
+	AP_SM_EVENT_POLL,
+	AP_SM_EVENT_TIMEOUT,
+	NR_AP_SM_EVENTS
 };
 
 /*
- * AP wait behaviour
+ * AP queue state wait behaviour
  */
-enum ap_wait {
-	AP_WAIT_AGAIN,		/* retry immediately */
-	AP_WAIT_TIMEOUT,	/* wait for timeout */
-	AP_WAIT_INTERRUPT,	/* wait for thin interrupt (if available) */
-	AP_WAIT_NONE,		/* no wait */
-	NR_AP_WAIT
+enum ap_sm_wait {
+	AP_SM_WAIT_AGAIN = 0,	/* retry immediately */
+	AP_SM_WAIT_TIMEOUT,	/* wait for timeout */
+	AP_SM_WAIT_INTERRUPT,	/* wait for thin interrupt (if available) */
+	AP_SM_WAIT_NONE,	/* no wait */
+	NR_AP_SM_WAIT
+};
+
+/*
+ * AP queue device states
+ */
+enum ap_dev_state {
+	AP_DEV_STATE_UNINITIATED = 0,	/* fresh and virgin, not touched */
+	AP_DEV_STATE_OPERATING,		/* queue dev is working normal */
+	AP_DEV_STATE_SHUTDOWN,		/* remove/unbind/shutdown in progress */
+	AP_DEV_STATE_ERROR,		/* device is in error state */
+	NR_AP_DEV_STATES
 };
 
 struct ap_device;
@@ -136,8 +139,6 @@
 
 	int (*probe)(struct ap_device *);
 	void (*remove)(struct ap_device *);
-	void (*suspend)(struct ap_device *);
-	void (*resume)(struct ap_device *);
 };
 
 #define to_ap_drv(x) container_of((x), struct ap_driver, driver)
@@ -155,13 +156,12 @@
 
 struct ap_card {
 	struct ap_device ap_dev;
-	struct list_head list;		/* Private list of AP cards. */
-	struct list_head queues;	/* List of assoc. AP queues */
 	void *private;			/* ap driver private pointer. */
 	int raw_hwtype;			/* AP raw hardware type. */
 	unsigned int functions;		/* AP device function bitfield. */
 	int queue_depth;		/* AP queue depth.*/
 	int id;				/* AP card number. */
+	bool config;			/* configured state */
 	atomic64_t total_request_count;	/* # requests ever for this AP device.*/
 };
 
@@ -169,14 +169,15 @@
 
 struct ap_queue {
 	struct ap_device ap_dev;
-	struct list_head list;		/* Private list of AP queues. */
+	struct hlist_node hnode;	/* Node for the ap_queues hashtable */
 	struct ap_card *card;		/* Ptr to assoc. AP card. */
 	spinlock_t lock;		/* Per device lock. */
 	void *private;			/* ap driver private pointer. */
+	enum ap_dev_state dev_state;	/* queue device state */
+	bool config;			/* configured state */
 	ap_qid_t qid;			/* AP queue id. */
-	int interrupt;			/* indicate if interrupts are enabled */
+	bool interrupt;			/* indicate if interrupts are enabled */
 	int queue_count;		/* # messages currently on AP queue. */
-	enum ap_state state;		/* State of the AP device. */
 	int pendingq_count;		/* # requests on pendingq list. */
 	int requestq_count;		/* # requests on requestq list. */
 	u64 total_request_count;	/* # requests ever for this AP device.*/
@@ -185,26 +186,54 @@
 	struct list_head pendingq;	/* List of message sent to AP queue. */
 	struct list_head requestq;	/* List of message yet to be sent. */
 	struct ap_message *reply;	/* Per device reply message. */
+	enum ap_sm_state sm_state;	/* ap queue state machine state */
+	int last_err_rc;		/* last error state response code */
 };
 
 #define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device)
 
-typedef enum ap_wait (ap_func_t)(struct ap_queue *queue);
+typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue);
+
+/* failure injection cmd struct */
+struct ap_fi {
+	union {
+		u16 cmd;		/* fi flags + action */
+		struct {
+			u8 flags;	/* fi flags only */
+			u8 action;	/* fi action only */
+		};
+	};
+};
+
+/* all currently known fi actions */
+enum ap_fi_actions {
+	AP_FI_ACTION_CCA_AGENT_FF   = 0x01,
+	AP_FI_ACTION_CCA_DOM_INVAL  = 0x02,
+	AP_FI_ACTION_NQAP_QID_INVAL = 0x03,
+};
+
+/* all currently known fi flags */
+enum ap_fi_flags {
+	AP_FI_FLAG_NO_RETRY	  = 0x01,
+	AP_FI_FLAG_TOGGLE_SPECIAL = 0x02,
+};
 
 struct ap_message {
 	struct list_head list;		/* Request queueing. */
 	unsigned long long psmid;	/* Message id. */
-	void *message;			/* Pointer to message buffer. */
-	size_t length;			/* Message length. */
+	void *msg;			/* Pointer to message buffer. */
+	unsigned int len;		/* Message length. */
+	u16 flags;			/* Flags, see AP_MSG_FLAG_xxx */
+	struct ap_fi fi;		/* Failure Injection cmd */
 	int rc;				/* Return code for this message */
-
 	void *private;			/* ap driver private pointer. */
-	unsigned int special:1;		/* Used for special commands. */
 	/* receive is called from tasklet context */
 	void (*receive)(struct ap_queue *, struct ap_message *,
 			struct ap_message *);
 };
 
+#define AP_MSG_FLAG_SPECIAL  1		/* flag msg as 'special' with NQAP */
+
 /**
  * ap_init_message() - Initialize ap_message.
  * Initialize a message before using. Otherwise this might result in
@@ -222,16 +251,10 @@
  */
 static inline void ap_release_message(struct ap_message *ap_msg)
 {
-	kzfree(ap_msg->message);
-	kzfree(ap_msg->private);
+	kfree_sensitive(ap_msg->msg);
+	kfree_sensitive(ap_msg->private);
 }
 
-#define for_each_ap_card(_ac) \
-	list_for_each_entry(_ac, &ap_card_list, list)
-
-#define for_each_ap_queue(_aq, _ac) \
-	list_for_each_entry(_aq, &(_ac)->queues, list)
-
 /*
  * Note: don't use ap_send/ap_recv after using ap_queue_message
  * for the first time. Otherwise the ap message queue will get
@@ -240,15 +263,15 @@
 int ap_send(ap_qid_t, unsigned long long, void *, size_t);
 int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
 
-enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event);
-enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event);
+enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event);
+enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event);
 
-void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
+int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
 void ap_flush_queue(struct ap_queue *aq);
 
 void *ap_airq_ptr(void);
-void ap_wait(enum ap_wait wait);
+void ap_wait(enum ap_sm_wait wait);
 void ap_request_timeout(struct timer_list *t);
 void ap_bus_force_rescan(void);
 
@@ -259,8 +282,6 @@
 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
 void ap_queue_prepare_remove(struct ap_queue *aq);
 void ap_queue_remove(struct ap_queue *aq);
-void ap_queue_suspend(struct ap_device *ap_dev);
-void ap_queue_resume(struct ap_device *ap_dev);
 void ap_queue_init_state(struct ap_queue *aq);
 
 struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
@@ -275,6 +296,16 @@
 extern struct mutex ap_perms_mutex;
 
 /*
+ * Get ap_queue device for this qid.
+ * Returns ptr to the struct ap_queue device or NULL if there
+ * was no ap_queue device with this qid found. When something is
+ * found, the reference count of the embedded device is increased.
+ * So the caller has to decrease the reference count after use
+ * with a call to put_device(&aq->ap_dev.device).
+ */
+struct ap_queue *ap_get_qdev(ap_qid_t qid);
+
+/*
  * check APQN for owned/reserved by ap bus and default driver(s).
  * Checks if this APQN is or will be in use by the ap bus
  * and the default set of drivers.
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index e85bfca..d98bdd2 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <asm/facility.h>
+#include <asm/sclp.h>
 
 #include "ap_bus.h"
 
@@ -23,7 +24,7 @@
 {
 	struct ap_card *ac = to_ap_card(dev);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
 }
 
 static DEVICE_ATTR_RO(hwtype);
@@ -33,7 +34,7 @@
 {
 	struct ap_card *ac = to_ap_card(dev);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
 }
 
 static DEVICE_ATTR_RO(raw_hwtype);
@@ -43,7 +44,7 @@
 {
 	struct ap_card *ac = to_ap_card(dev);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
 }
 
 static DEVICE_ATTR_RO(depth);
@@ -53,7 +54,7 @@
 {
 	struct ap_card *ac = to_ap_card(dev);
 
-	return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
+	return scnprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
 }
 
 static DEVICE_ATTR_RO(ap_functions);
@@ -66,23 +67,25 @@
 	u64 req_cnt;
 
 	req_cnt = 0;
-	spin_lock_bh(&ap_list_lock);
+	spin_lock_bh(&ap_queues_lock);
 	req_cnt = atomic64_read(&ac->total_request_count);
-	spin_unlock_bh(&ap_list_lock);
-	return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+	spin_unlock_bh(&ap_queues_lock);
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
 }
 
 static ssize_t request_count_store(struct device *dev,
 				   struct device_attribute *attr,
 				   const char *buf, size_t count)
 {
-	struct ap_card *ac = to_ap_card(dev);
+	int bkt;
 	struct ap_queue *aq;
+	struct ap_card *ac = to_ap_card(dev);
 
-	spin_lock_bh(&ap_list_lock);
-	for_each_ap_queue(aq, ac)
-		aq->total_request_count = 0;
-	spin_unlock_bh(&ap_list_lock);
+	spin_lock_bh(&ap_queues_lock);
+	hash_for_each(ap_queues, bkt, aq, hnode)
+		if (ac == aq->card)
+			aq->total_request_count = 0;
+	spin_unlock_bh(&ap_queues_lock);
 	atomic64_set(&ac->total_request_count, 0);
 
 	return count;
@@ -93,16 +96,18 @@
 static ssize_t requestq_count_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
-	struct ap_card *ac = to_ap_card(dev);
+	int bkt;
 	struct ap_queue *aq;
 	unsigned int reqq_cnt;
+	struct ap_card *ac = to_ap_card(dev);
 
 	reqq_cnt = 0;
-	spin_lock_bh(&ap_list_lock);
-	for_each_ap_queue(aq, ac)
-		reqq_cnt += aq->requestq_count;
-	spin_unlock_bh(&ap_list_lock);
-	return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+	spin_lock_bh(&ap_queues_lock);
+	hash_for_each(ap_queues, bkt, aq, hnode)
+		if (ac == aq->card)
+			reqq_cnt += aq->requestq_count;
+	spin_unlock_bh(&ap_queues_lock);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
 }
 
 static DEVICE_ATTR_RO(requestq_count);
@@ -110,16 +115,18 @@
 static ssize_t pendingq_count_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
-	struct ap_card *ac = to_ap_card(dev);
+	int bkt;
 	struct ap_queue *aq;
 	unsigned int penq_cnt;
+	struct ap_card *ac = to_ap_card(dev);
 
 	penq_cnt = 0;
-	spin_lock_bh(&ap_list_lock);
-	for_each_ap_queue(aq, ac)
-		penq_cnt += aq->pendingq_count;
-	spin_unlock_bh(&ap_list_lock);
-	return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+	spin_lock_bh(&ap_queues_lock);
+	hash_for_each(ap_queues, bkt, aq, hnode)
+		if (ac == aq->card)
+			penq_cnt += aq->pendingq_count;
+	spin_unlock_bh(&ap_queues_lock);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
 }
 
 static DEVICE_ATTR_RO(pendingq_count);
@@ -127,11 +134,44 @@
 static ssize_t modalias_show(struct device *dev,
 			     struct device_attribute *attr, char *buf)
 {
-	return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
+	return scnprintf(buf, PAGE_SIZE, "ap:t%02X\n",
+			 to_ap_dev(dev)->device_type);
 }
 
 static DEVICE_ATTR_RO(modalias);
 
+static ssize_t config_show(struct device *dev,
+			   struct device_attribute *attr, char *buf)
+{
+	struct ap_card *ac = to_ap_card(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ac->config ? 1 : 0);
+}
+
+static ssize_t config_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	int rc = 0, cfg;
+	struct ap_card *ac = to_ap_card(dev);
+
+	if (sscanf(buf, "%d\n", &cfg) != 1 || cfg < 0 || cfg > 1)
+		return -EINVAL;
+
+	if (cfg && !ac->config)
+		rc = sclp_ap_configure(ac->id);
+	else if (!cfg && ac->config)
+		rc = sclp_ap_deconfigure(ac->id);
+	if (rc)
+		return rc;
+
+	ac->config = cfg ? true : false;
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(config);
+
 static struct attribute *ap_card_dev_attrs[] = {
 	&dev_attr_hwtype.attr,
 	&dev_attr_raw_hwtype.attr,
@@ -141,6 +181,7 @@
 	&dev_attr_requestq_count.attr,
 	&dev_attr_pendingq_count.attr,
 	&dev_attr_modalias.attr,
+	&dev_attr_config.attr,
 	NULL
 };
 
@@ -162,11 +203,6 @@
 {
 	struct ap_card *ac = to_ap_card(dev);
 
-	if (!list_empty(&ac->list)) {
-		spin_lock_bh(&ap_list_lock);
-		list_del_init(&ac->list);
-		spin_unlock_bh(&ap_list_lock);
-	}
 	kfree(ac);
 }
 
@@ -178,8 +214,6 @@
 	ac = kzalloc(sizeof(*ac), GFP_KERNEL);
 	if (!ac)
 		return NULL;
-	INIT_LIST_HEAD(&ac->list);
-	INIT_LIST_HEAD(&ac->queues);
 	ac->ap_dev.device.release = ap_card_device_release;
 	ac->ap_dev.device.type = &ap_card_type;
 	ac->ap_dev.device_type = comp_type;
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
index dc675eb..34b0350 100644
--- a/drivers/s390/crypto/ap_debug.h
+++ b/drivers/s390/crypto/ap_debug.h
@@ -20,6 +20,14 @@
 
 #define AP_DBF(...)					\
 	debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
+#define AP_DBF_ERR(...)					\
+	debug_sprintf_event(ap_dbf_info, DBF_ERR, ##__VA_ARGS__)
+#define AP_DBF_WARN(...)					\
+	debug_sprintf_event(ap_dbf_info, DBF_WARN, ##__VA_ARGS__)
+#define AP_DBF_INFO(...)					\
+	debug_sprintf_event(ap_dbf_info, DBF_INFO, ##__VA_ARGS__)
+#define AP_DBF_DBG(...)					\
+	debug_sprintf_event(ap_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
 
 extern debug_info_t *ap_dbf_info;
 
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index a317ab4..ff0018f 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -19,7 +19,7 @@
 static void __ap_flush_queue(struct ap_queue *aq);
 
 /**
- * ap_queue_enable_interruption(): Enable interruption on an AP queue.
+ * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
  * @qid: The AP queue number
  * @ind: the notification indicator byte
  *
@@ -27,7 +27,7 @@
  * value it waits a while and tests the AP queue if interrupts
  * have been switched on using ap_test_queue().
  */
-static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
+static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
 {
 	struct ap_queue_status status;
 	struct ap_qirq_ctrl qirqctrl = { 0 };
@@ -69,9 +69,9 @@
  */
 static inline struct ap_queue_status
 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
-	  unsigned int special)
+	  int special)
 {
-	if (special == 1)
+	if (special)
 		qid |= 0x400000UL;
 	return ap_nqap(qid, psmid, msg, length);
 }
@@ -119,9 +119,9 @@
 
 /* State machine definitions and helpers */
 
-static enum ap_wait ap_sm_nop(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
 {
-	return AP_WAIT_NONE;
+	return AP_SM_WAIT_NONE;
 }
 
 /**
@@ -129,18 +129,21 @@
  *	not change the state of the device.
  * @aq: pointer to the AP queue
  *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
  */
 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
 {
 	struct ap_queue_status status;
 	struct ap_message *ap_msg;
+	bool found = false;
 
 	status = ap_dqap(aq->qid, &aq->reply->psmid,
-			 aq->reply->message, aq->reply->length);
+			 aq->reply->msg, aq->reply->len);
 	switch (status.response_code) {
 	case AP_RESPONSE_NORMAL:
-		aq->queue_count--;
+		aq->queue_count = max_t(int, 0, aq->queue_count - 1);
+		if (!status.queue_empty && !aq->queue_count)
+			aq->queue_count++;
 		if (aq->queue_count > 0)
 			mod_timer(&aq->timeout,
 				  jiffies + aq->request_timeout);
@@ -150,9 +153,15 @@
 			list_del_init(&ap_msg->list);
 			aq->pendingq_count--;
 			ap_msg->receive(aq, ap_msg, aq->reply);
+			found = true;
 			break;
 		}
-		/* fall through */
+		if (!found) {
+			AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
+				    __func__, aq->reply->psmid,
+				    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+		}
+		fallthrough;
 	case AP_RESPONSE_NO_PENDING_REPLY:
 		if (!status.queue_empty || aq->queue_count <= 0)
 			break;
@@ -172,56 +181,36 @@
  * ap_sm_read(): Receive pending reply messages from an AP queue.
  * @aq: pointer to the AP queue
  *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
  */
-static enum ap_wait ap_sm_read(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
 {
 	struct ap_queue_status status;
 
 	if (!aq->reply)
-		return AP_WAIT_NONE;
+		return AP_SM_WAIT_NONE;
 	status = ap_sm_recv(aq);
 	switch (status.response_code) {
 	case AP_RESPONSE_NORMAL:
 		if (aq->queue_count > 0) {
-			aq->state = AP_STATE_WORKING;
-			return AP_WAIT_AGAIN;
+			aq->sm_state = AP_SM_STATE_WORKING;
+			return AP_SM_WAIT_AGAIN;
 		}
-		aq->state = AP_STATE_IDLE;
-		return AP_WAIT_NONE;
+		aq->sm_state = AP_SM_STATE_IDLE;
+		return AP_SM_WAIT_NONE;
 	case AP_RESPONSE_NO_PENDING_REPLY:
 		if (aq->queue_count > 0)
-			return AP_WAIT_INTERRUPT;
-		aq->state = AP_STATE_IDLE;
-		return AP_WAIT_NONE;
+			return aq->interrupt ?
+				AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
+		aq->sm_state = AP_SM_STATE_IDLE;
+		return AP_SM_WAIT_NONE;
 	default:
-		aq->state = AP_STATE_BORKED;
-		return AP_WAIT_NONE;
-	}
-}
-
-/**
- * ap_sm_suspend_read(): Receive pending reply messages from an AP queue
- * without changing the device state in between. In suspend mode we don't
- * allow sending new requests, therefore just fetch pending replies.
- * @aq: pointer to the AP queue
- *
- * Returns AP_WAIT_NONE or AP_WAIT_AGAIN
- */
-static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq)
-{
-	struct ap_queue_status status;
-
-	if (!aq->reply)
-		return AP_WAIT_NONE;
-	status = ap_sm_recv(aq);
-	switch (status.response_code) {
-	case AP_RESPONSE_NORMAL:
-		if (aq->queue_count > 0)
-			return AP_WAIT_AGAIN;
-		/* fall through */
-	default:
-		return AP_WAIT_NONE;
+		aq->dev_state = AP_DEV_STATE_ERROR;
+		aq->last_err_rc = status.response_code;
+		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+			    __func__, status.response_code,
+			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+		return AP_SM_WAIT_NONE;
 	}
 }
 
@@ -229,48 +218,65 @@
  * ap_sm_write(): Send messages from the request queue to an AP queue.
  * @aq: pointer to the AP queue
  *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
  */
-static enum ap_wait ap_sm_write(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
 {
 	struct ap_queue_status status;
 	struct ap_message *ap_msg;
+	ap_qid_t qid = aq->qid;
 
 	if (aq->requestq_count <= 0)
-		return AP_WAIT_NONE;
+		return AP_SM_WAIT_NONE;
 	/* Start the next request on the queue. */
 	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
-	status = __ap_send(aq->qid, ap_msg->psmid,
-			   ap_msg->message, ap_msg->length, ap_msg->special);
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) {
+		AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n",
+			    __func__, ap_msg->fi.cmd);
+		qid = 0xFF00;
+	}
+#endif
+	status = __ap_send(qid, ap_msg->psmid,
+			   ap_msg->msg, ap_msg->len,
+			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
 	switch (status.response_code) {
 	case AP_RESPONSE_NORMAL:
-		aq->queue_count++;
+		aq->queue_count = max_t(int, 1, aq->queue_count + 1);
 		if (aq->queue_count == 1)
 			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
 		list_move_tail(&ap_msg->list, &aq->pendingq);
 		aq->requestq_count--;
 		aq->pendingq_count++;
 		if (aq->queue_count < aq->card->queue_depth) {
-			aq->state = AP_STATE_WORKING;
-			return AP_WAIT_AGAIN;
+			aq->sm_state = AP_SM_STATE_WORKING;
+			return AP_SM_WAIT_AGAIN;
 		}
-		/* fall through */
+		fallthrough;
 	case AP_RESPONSE_Q_FULL:
-		aq->state = AP_STATE_QUEUE_FULL;
-		return AP_WAIT_INTERRUPT;
+		aq->sm_state = AP_SM_STATE_QUEUE_FULL;
+		return aq->interrupt ?
+			AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
 	case AP_RESPONSE_RESET_IN_PROGRESS:
-		aq->state = AP_STATE_RESET_WAIT;
-		return AP_WAIT_TIMEOUT;
+		aq->sm_state = AP_SM_STATE_RESET_WAIT;
+		return AP_SM_WAIT_TIMEOUT;
+	case AP_RESPONSE_INVALID_DOMAIN:
+		AP_DBF(DBF_WARN, "AP_RESPONSE_INVALID_DOMAIN on NQAP\n");
+		fallthrough;
 	case AP_RESPONSE_MESSAGE_TOO_BIG:
 	case AP_RESPONSE_REQ_FAC_NOT_INST:
 		list_del_init(&ap_msg->list);
 		aq->requestq_count--;
 		ap_msg->rc = -EINVAL;
 		ap_msg->receive(aq, ap_msg, NULL);
-		return AP_WAIT_AGAIN;
+		return AP_SM_WAIT_AGAIN;
 	default:
-		aq->state = AP_STATE_BORKED;
-		return AP_WAIT_NONE;
+		aq->dev_state = AP_DEV_STATE_ERROR;
+		aq->last_err_rc = status.response_code;
+		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+			    __func__, status.response_code,
+			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+		return AP_SM_WAIT_NONE;
 	}
 }
 
@@ -278,9 +284,9 @@
  * ap_sm_read_write(): Send and receive messages to/from an AP queue.
  * @aq: pointer to the AP queue
  *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
  */
-static enum ap_wait ap_sm_read_write(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
 {
 	return min(ap_sm_read(aq), ap_sm_write(aq));
 }
@@ -291,7 +297,7 @@
  *
  * Submit the Reset command to an AP queue.
  */
-static enum ap_wait ap_sm_reset(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
 {
 	struct ap_queue_status status;
 
@@ -299,17 +305,16 @@
 	switch (status.response_code) {
 	case AP_RESPONSE_NORMAL:
 	case AP_RESPONSE_RESET_IN_PROGRESS:
-		aq->state = AP_STATE_RESET_WAIT;
-		aq->interrupt = AP_INTR_DISABLED;
-		return AP_WAIT_TIMEOUT;
-	case AP_RESPONSE_BUSY:
-		return AP_WAIT_TIMEOUT;
-	case AP_RESPONSE_Q_NOT_AVAIL:
-	case AP_RESPONSE_DECONFIGURED:
-	case AP_RESPONSE_CHECKSTOPPED:
+		aq->sm_state = AP_SM_STATE_RESET_WAIT;
+		aq->interrupt = false;
+		return AP_SM_WAIT_TIMEOUT;
 	default:
-		aq->state = AP_STATE_BORKED;
-		return AP_WAIT_NONE;
+		aq->dev_state = AP_DEV_STATE_ERROR;
+		aq->last_err_rc = status.response_code;
+		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+			    __func__, status.response_code,
+			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+		return AP_SM_WAIT_NONE;
 	}
 }
 
@@ -319,7 +324,7 @@
  *
  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
  */
-static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
 {
 	struct ap_queue_status status;
 	void *lsi_ptr;
@@ -334,21 +339,25 @@
 	switch (status.response_code) {
 	case AP_RESPONSE_NORMAL:
 		lsi_ptr = ap_airq_ptr();
-		if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
-			aq->state = AP_STATE_SETIRQ_WAIT;
+		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
+			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
 		else
-			aq->state = (aq->queue_count > 0) ?
-				AP_STATE_WORKING : AP_STATE_IDLE;
-		return AP_WAIT_AGAIN;
+			aq->sm_state = (aq->queue_count > 0) ?
+				AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
+		return AP_SM_WAIT_AGAIN;
 	case AP_RESPONSE_BUSY:
 	case AP_RESPONSE_RESET_IN_PROGRESS:
-		return AP_WAIT_TIMEOUT;
+		return AP_SM_WAIT_TIMEOUT;
 	case AP_RESPONSE_Q_NOT_AVAIL:
 	case AP_RESPONSE_DECONFIGURED:
 	case AP_RESPONSE_CHECKSTOPPED:
 	default:
-		aq->state = AP_STATE_BORKED;
-		return AP_WAIT_NONE;
+		aq->dev_state = AP_DEV_STATE_ERROR;
+		aq->last_err_rc = status.response_code;
+		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+			    __func__, status.response_code,
+			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+		return AP_SM_WAIT_NONE;
 	}
 }
 
@@ -358,7 +367,7 @@
  *
  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
  */
-static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
 {
 	struct ap_queue_status status;
 
@@ -371,107 +380,76 @@
 
 	if (status.irq_enabled == 1) {
 		/* Irqs are now enabled */
-		aq->interrupt = AP_INTR_ENABLED;
-		aq->state = (aq->queue_count > 0) ?
-			AP_STATE_WORKING : AP_STATE_IDLE;
+		aq->interrupt = true;
+		aq->sm_state = (aq->queue_count > 0) ?
+			AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
 	}
 
 	switch (status.response_code) {
 	case AP_RESPONSE_NORMAL:
 		if (aq->queue_count > 0)
-			return AP_WAIT_AGAIN;
-		/* fallthrough */
+			return AP_SM_WAIT_AGAIN;
+		fallthrough;
 	case AP_RESPONSE_NO_PENDING_REPLY:
-		return AP_WAIT_TIMEOUT;
+		return AP_SM_WAIT_TIMEOUT;
 	default:
-		aq->state = AP_STATE_BORKED;
-		return AP_WAIT_NONE;
+		aq->dev_state = AP_DEV_STATE_ERROR;
+		aq->last_err_rc = status.response_code;
+		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+			    __func__, status.response_code,
+			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+		return AP_SM_WAIT_NONE;
 	}
 }
 
 /*
  * AP state machine jump table
  */
-static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
-	[AP_STATE_RESET_START] = {
-		[AP_EVENT_POLL] = ap_sm_reset,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
+	[AP_SM_STATE_RESET_START] = {
+		[AP_SM_EVENT_POLL] = ap_sm_reset,
+		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
 	},
-	[AP_STATE_RESET_WAIT] = {
-		[AP_EVENT_POLL] = ap_sm_reset_wait,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	[AP_SM_STATE_RESET_WAIT] = {
+		[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
+		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
 	},
-	[AP_STATE_SETIRQ_WAIT] = {
-		[AP_EVENT_POLL] = ap_sm_setirq_wait,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	[AP_SM_STATE_SETIRQ_WAIT] = {
+		[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
+		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
 	},
-	[AP_STATE_IDLE] = {
-		[AP_EVENT_POLL] = ap_sm_write,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	[AP_SM_STATE_IDLE] = {
+		[AP_SM_EVENT_POLL] = ap_sm_write,
+		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
 	},
-	[AP_STATE_WORKING] = {
-		[AP_EVENT_POLL] = ap_sm_read_write,
-		[AP_EVENT_TIMEOUT] = ap_sm_reset,
+	[AP_SM_STATE_WORKING] = {
+		[AP_SM_EVENT_POLL] = ap_sm_read_write,
+		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
 	},
-	[AP_STATE_QUEUE_FULL] = {
-		[AP_EVENT_POLL] = ap_sm_read,
-		[AP_EVENT_TIMEOUT] = ap_sm_reset,
-	},
-	[AP_STATE_SUSPEND_WAIT] = {
-		[AP_EVENT_POLL] = ap_sm_suspend_read,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
-	},
-	[AP_STATE_REMOVE] = {
-		[AP_EVENT_POLL] = ap_sm_nop,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
-	},
-	[AP_STATE_UNBOUND] = {
-		[AP_EVENT_POLL] = ap_sm_nop,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
-	},
-	[AP_STATE_BORKED] = {
-		[AP_EVENT_POLL] = ap_sm_nop,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	[AP_SM_STATE_QUEUE_FULL] = {
+		[AP_SM_EVENT_POLL] = ap_sm_read,
+		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
 	},
 };
 
-enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event)
+enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
 {
-	return ap_jumptable[aq->state][event](aq);
+	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+		return ap_jumptable[aq->sm_state][event](aq);
+	else
+		return AP_SM_WAIT_NONE;
 }
 
-enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event)
+enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
 {
-	enum ap_wait wait;
+	enum ap_sm_wait wait;
 
-	while ((wait = ap_sm_event(aq, event)) == AP_WAIT_AGAIN)
+	while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
 		;
 	return wait;
 }
 
 /*
- * Power management for queue devices
- */
-void ap_queue_suspend(struct ap_device *ap_dev)
-{
-	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
-
-	/* Poll on the device until all requests are finished. */
-	spin_lock_bh(&aq->lock);
-	aq->state = AP_STATE_SUSPEND_WAIT;
-	while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE)
-		;
-	aq->state = AP_STATE_BORKED;
-	spin_unlock_bh(&aq->lock);
-}
-EXPORT_SYMBOL(ap_queue_suspend);
-
-void ap_queue_resume(struct ap_device *ap_dev)
-{
-}
-EXPORT_SYMBOL(ap_queue_resume);
-
-/*
  * AP queue related attributes.
  */
 static ssize_t request_count_show(struct device *dev,
@@ -479,12 +457,20 @@
 				  char *buf)
 {
 	struct ap_queue *aq = to_ap_queue(dev);
+	bool valid = false;
 	u64 req_cnt;
 
 	spin_lock_bh(&aq->lock);
-	req_cnt = aq->total_request_count;
+	if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+		req_cnt = aq->total_request_count;
+		valid = true;
+	}
 	spin_unlock_bh(&aq->lock);
-	return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+
+	if (valid)
+		return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+	else
+		return scnprintf(buf, PAGE_SIZE, "-\n");
 }
 
 static ssize_t request_count_store(struct device *dev,
@@ -509,9 +495,10 @@
 	unsigned int reqq_cnt = 0;
 
 	spin_lock_bh(&aq->lock);
-	reqq_cnt = aq->requestq_count;
+	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+		reqq_cnt = aq->requestq_count;
 	spin_unlock_bh(&aq->lock);
-	return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
 }
 
 static DEVICE_ATTR_RO(requestq_count);
@@ -523,9 +510,10 @@
 	unsigned int penq_cnt = 0;
 
 	spin_lock_bh(&aq->lock);
-	penq_cnt = aq->pendingq_count;
+	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+		penq_cnt = aq->pendingq_count;
 	spin_unlock_bh(&aq->lock);
-	return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
 }
 
 static DEVICE_ATTR_RO(pendingq_count);
@@ -537,17 +525,17 @@
 	int rc = 0;
 
 	spin_lock_bh(&aq->lock);
-	switch (aq->state) {
-	case AP_STATE_RESET_START:
-	case AP_STATE_RESET_WAIT:
-		rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
+	switch (aq->sm_state) {
+	case AP_SM_STATE_RESET_START:
+	case AP_SM_STATE_RESET_WAIT:
+		rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n");
 		break;
-	case AP_STATE_WORKING:
-	case AP_STATE_QUEUE_FULL:
-		rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
+	case AP_SM_STATE_WORKING:
+	case AP_SM_STATE_QUEUE_FULL:
+		rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
 		break;
 	default:
-		rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
+		rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
 	}
 	spin_unlock_bh(&aq->lock);
 	return rc;
@@ -561,8 +549,8 @@
 
 	spin_lock_bh(&aq->lock);
 	__ap_flush_queue(aq);
-	aq->state = AP_STATE_RESET_START;
-	ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+	aq->sm_state = AP_SM_STATE_RESET_START;
+	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
 	spin_unlock_bh(&aq->lock);
 
 	AP_DBF(DBF_INFO, "reset queue=%02x.%04x triggered by user\n",
@@ -580,24 +568,150 @@
 	int rc = 0;
 
 	spin_lock_bh(&aq->lock);
-	if (aq->state == AP_STATE_SETIRQ_WAIT)
-		rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
-	else if (aq->interrupt == AP_INTR_ENABLED)
-		rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
+	if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
+		rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
+	else if (aq->interrupt)
+		rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
 	else
-		rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
+		rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
 	spin_unlock_bh(&aq->lock);
 	return rc;
 }
 
 static DEVICE_ATTR_RO(interrupt);
 
+static ssize_t config_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+	int rc;
+
+	spin_lock_bh(&aq->lock);
+	rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->config ? 1 : 0);
+	spin_unlock_bh(&aq->lock);
+	return rc;
+}
+
+static DEVICE_ATTR_RO(config);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+static ssize_t states_show(struct device *dev,
+			   struct device_attribute *attr, char *buf)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+	int rc = 0;
+
+	spin_lock_bh(&aq->lock);
+	/* queue device state */
+	switch (aq->dev_state) {
+	case AP_DEV_STATE_UNINITIATED:
+		rc = scnprintf(buf, PAGE_SIZE, "UNINITIATED\n");
+		break;
+	case AP_DEV_STATE_OPERATING:
+		rc = scnprintf(buf, PAGE_SIZE, "OPERATING");
+		break;
+	case AP_DEV_STATE_SHUTDOWN:
+		rc = scnprintf(buf, PAGE_SIZE, "SHUTDOWN");
+		break;
+	case AP_DEV_STATE_ERROR:
+		rc = scnprintf(buf, PAGE_SIZE, "ERROR");
+		break;
+	default:
+		rc = scnprintf(buf, PAGE_SIZE, "UNKNOWN");
+	}
+	/* state machine state */
+	if (aq->dev_state) {
+		switch (aq->sm_state) {
+		case AP_SM_STATE_RESET_START:
+			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+					" [RESET_START]\n");
+			break;
+		case AP_SM_STATE_RESET_WAIT:
+			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+					" [RESET_WAIT]\n");
+			break;
+		case AP_SM_STATE_SETIRQ_WAIT:
+			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+					" [SETIRQ_WAIT]\n");
+			break;
+		case AP_SM_STATE_IDLE:
+			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+					" [IDLE]\n");
+			break;
+		case AP_SM_STATE_WORKING:
+			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+					" [WORKING]\n");
+			break;
+		case AP_SM_STATE_QUEUE_FULL:
+			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+					" [FULL]\n");
+			break;
+		default:
+			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+					" [UNKNOWN]\n");
+		}
+	}
+	spin_unlock_bh(&aq->lock);
+
+	return rc;
+}
+static DEVICE_ATTR_RO(states);
+
+static ssize_t last_err_rc_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+	int rc;
+
+	spin_lock_bh(&aq->lock);
+	rc = aq->last_err_rc;
+	spin_unlock_bh(&aq->lock);
+
+	switch (rc) {
+	case AP_RESPONSE_NORMAL:
+		return scnprintf(buf, PAGE_SIZE, "NORMAL\n");
+	case AP_RESPONSE_Q_NOT_AVAIL:
+		return scnprintf(buf, PAGE_SIZE, "Q_NOT_AVAIL\n");
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+		return scnprintf(buf, PAGE_SIZE, "RESET_IN_PROGRESS\n");
+	case AP_RESPONSE_DECONFIGURED:
+		return scnprintf(buf, PAGE_SIZE, "DECONFIGURED\n");
+	case AP_RESPONSE_CHECKSTOPPED:
+		return scnprintf(buf, PAGE_SIZE, "CHECKSTOPPED\n");
+	case AP_RESPONSE_BUSY:
+		return scnprintf(buf, PAGE_SIZE, "BUSY\n");
+	case AP_RESPONSE_INVALID_ADDRESS:
+		return scnprintf(buf, PAGE_SIZE, "INVALID_ADDRESS\n");
+	case AP_RESPONSE_OTHERWISE_CHANGED:
+		return scnprintf(buf, PAGE_SIZE, "OTHERWISE_CHANGED\n");
+	case AP_RESPONSE_Q_FULL:
+		return scnprintf(buf, PAGE_SIZE, "Q_FULL/NO_PENDING_REPLY\n");
+	case AP_RESPONSE_INDEX_TOO_BIG:
+		return scnprintf(buf, PAGE_SIZE, "INDEX_TOO_BIG\n");
+	case AP_RESPONSE_NO_FIRST_PART:
+		return scnprintf(buf, PAGE_SIZE, "NO_FIRST_PART\n");
+	case AP_RESPONSE_MESSAGE_TOO_BIG:
+		return scnprintf(buf, PAGE_SIZE, "MESSAGE_TOO_BIG\n");
+	case AP_RESPONSE_REQ_FAC_NOT_INST:
+		return scnprintf(buf, PAGE_SIZE, "REQ_FAC_NOT_INST\n");
+	default:
+		return scnprintf(buf, PAGE_SIZE, "response code %d\n", rc);
+	}
+}
+static DEVICE_ATTR_RO(last_err_rc);
+#endif
+
 static struct attribute *ap_queue_dev_attrs[] = {
 	&dev_attr_request_count.attr,
 	&dev_attr_requestq_count.attr,
 	&dev_attr_pendingq_count.attr,
 	&dev_attr_reset.attr,
 	&dev_attr_interrupt.attr,
+	&dev_attr_config.attr,
+#ifdef CONFIG_ZCRYPT_DEBUG
+	&dev_attr_states.attr,
+	&dev_attr_last_err_rc.attr,
+#endif
 	NULL
 };
 
@@ -619,11 +733,10 @@
 {
 	struct ap_queue *aq = to_ap_queue(dev);
 
-	if (!list_empty(&aq->list)) {
-		spin_lock_bh(&ap_list_lock);
-		list_del_init(&aq->list);
-		spin_unlock_bh(&ap_list_lock);
-	}
+	spin_lock_bh(&ap_queues_lock);
+	hash_del(&aq->hnode);
+	spin_unlock_bh(&ap_queues_lock);
+
 	kfree(aq);
 }
 
@@ -638,10 +751,8 @@
 	aq->ap_dev.device.type = &ap_queue_type;
 	aq->ap_dev.device_type = device_type;
 	aq->qid = qid;
-	aq->state = AP_STATE_UNBOUND;
-	aq->interrupt = AP_INTR_DISABLED;
+	aq->interrupt = false;
 	spin_lock_init(&aq->lock);
-	INIT_LIST_HEAD(&aq->list);
 	INIT_LIST_HEAD(&aq->pendingq);
 	INIT_LIST_HEAD(&aq->requestq);
 	timer_setup(&aq->timeout, ap_request_timeout, 0);
@@ -654,7 +765,7 @@
 	aq->reply = reply;
 
 	spin_lock_bh(&aq->lock);
-	ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
 	spin_unlock_bh(&aq->lock);
 }
 EXPORT_SYMBOL(ap_queue_init_reply);
@@ -664,22 +775,30 @@
  * @aq: The AP device to queue the message to
  * @ap_msg: The message that is to be added
  */
-void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
+int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
 {
-	/* For asynchronous message handling a valid receive-callback
-	 * is required.
-	 */
+	int rc = 0;
+
+	/* msg needs to have a valid receive-callback */
 	BUG_ON(!ap_msg->receive);
 
 	spin_lock_bh(&aq->lock);
-	/* Queue the message. */
-	list_add_tail(&ap_msg->list, &aq->requestq);
-	aq->requestq_count++;
-	aq->total_request_count++;
-	atomic64_inc(&aq->card->total_request_count);
+
+	/* only allow to queue new messages if device state is ok */
+	if (aq->dev_state == AP_DEV_STATE_OPERATING) {
+		list_add_tail(&ap_msg->list, &aq->requestq);
+		aq->requestq_count++;
+		aq->total_request_count++;
+		atomic64_inc(&aq->card->total_request_count);
+	} else
+		rc = -ENODEV;
+
 	/* Send/receive as many request from the queue as possible. */
-	ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
+	ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
+
 	spin_unlock_bh(&aq->lock);
+
+	return rc;
 }
 EXPORT_SYMBOL(ap_queue_message);
 
@@ -750,8 +869,8 @@
 	spin_lock_bh(&aq->lock);
 	/* flush queue */
 	__ap_flush_queue(aq);
-	/* set REMOVE state to prevent new messages are queued in */
-	aq->state = AP_STATE_REMOVE;
+	/* move queue device state to SHUTDOWN in progress */
+	aq->dev_state = AP_DEV_STATE_SHUTDOWN;
 	spin_unlock_bh(&aq->lock);
 	del_timer_sync(&aq->timeout);
 }
@@ -759,23 +878,23 @@
 void ap_queue_remove(struct ap_queue *aq)
 {
 	/*
-	 * all messages have been flushed and the state is
-	 * AP_STATE_REMOVE. Now reset with zero which also
-	 * clears the irq registration and move the state
-	 * to AP_STATE_UNBOUND to signal that this queue
-	 * is not used by any driver currently.
+	 * all messages have been flushed and the device state
+	 * is SHUTDOWN. Now reset with zero which also clears
+	 * the irq registration and move the device state
+	 * to the initial value AP_DEV_STATE_UNINITIATED.
 	 */
 	spin_lock_bh(&aq->lock);
 	ap_zapq(aq->qid);
-	aq->state = AP_STATE_UNBOUND;
+	aq->dev_state = AP_DEV_STATE_UNINITIATED;
 	spin_unlock_bh(&aq->lock);
 }
 
 void ap_queue_init_state(struct ap_queue *aq)
 {
 	spin_lock_bh(&aq->lock);
-	aq->state = AP_STATE_RESET_START;
-	ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+	aq->dev_state = AP_DEV_STATE_OPERATING;
+	aq->sm_state = AP_SM_STATE_RESET_START;
+	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
 	spin_unlock_bh(&aq->lock);
 }
 EXPORT_SYMBOL(ap_queue_init_state);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 0658aa5..dd84995 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -25,13 +25,15 @@
 
 #include "zcrypt_api.h"
 #include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("IBM Corporation");
 MODULE_DESCRIPTION("s390 protected key interface");
 
-#define KEYBLOBBUFSIZE 8192  /* key buffer size used for internal processing */
-#define MAXAPQNSINLIST 64    /* max 64 apqns within a apqn list */
+#define KEYBLOBBUFSIZE 8192	/* key buffer size used for internal processing */
+#define PROTKEYBLOBBUFSIZE 256	/* protected key buffer size used internal */
+#define MAXAPQNSINLIST 64	/* max 64 apqns within a apqn list */
 
 /*
  * debug feature data and functions
@@ -68,6 +70,17 @@
 	u8  protkey[MAXPROTKEYSIZE]; /* the protected key blob */
 } __packed;
 
+/* inside view of a clear key token (type 0x00 version 0x02) */
+struct clearaeskeytoken {
+	u8  type;	 /* 0x00 for PAES specific key tokens */
+	u8  res0[3];
+	u8  version;	 /* 0x02 for clear AES key token */
+	u8  res1[3];
+	u32 keytype;	 /* key type, one of the PKEY_KEYTYPE values */
+	u32 len;	 /* bytes actually stored in clearkey[] */
+	u8  clearkey[]; /* clear key value */
+} __packed;
+
 /*
  * Create a protected key from a clear key value.
  */
@@ -175,6 +188,73 @@
 }
 
 /*
+ * Construct EP11 key with given clear key value.
+ */
+static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
+			    u8 *keybuf, size_t *keybuflen)
+{
+	int i, rc;
+	u16 card, dom;
+	u32 nr_apqns, *apqns = NULL;
+
+	/* build a list of apqns suitable for ep11 keys with cpacf support */
+	rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+			    ZCRYPT_CEX7, EP11_API_V, NULL);
+	if (rc)
+		goto out;
+
+	/* go through the list of apqns and try to bild an ep11 key */
+	for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+		card = apqns[i] >> 16;
+		dom = apqns[i] & 0xFFFF;
+		rc = ep11_clr2keyblob(card, dom, clrkeylen * 8,
+				      0, clrkey, keybuf, keybuflen);
+		if (rc == 0)
+			break;
+	}
+
+out:
+	kfree(apqns);
+	if (rc)
+		DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/*
+ * Find card and transform EP11 secure key into protected key.
+ */
+static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey)
+{
+	int i, rc;
+	u16 card, dom;
+	u32 nr_apqns, *apqns = NULL;
+	struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+	/* build a list of apqns suitable for this key */
+	rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+			    ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+	if (rc)
+		goto out;
+
+	/* go through the list of apqns and try to derive an pkey */
+	for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+		card = apqns[i] >> 16;
+		dom = apqns[i] & 0xFFFF;
+		pkey->len = sizeof(pkey->protkey);
+		rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
+					pkey->protkey, &pkey->len, &pkey->type);
+		if (rc == 0)
+			break;
+	}
+
+out:
+	kfree(apqns);
+	if (rc)
+		DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/*
  * Verify key and give back some info about the key.
  */
 static int pkey_verifykey(const struct pkey_seckey *seckey,
@@ -307,26 +387,96 @@
 static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
 			       struct pkey_protkey *protkey)
 {
+	int rc = -EINVAL;
+	u8 *tmpbuf = NULL;
 	struct keytoken_header *hdr = (struct keytoken_header *)key;
-	struct protaeskeytoken *t;
 
 	switch (hdr->version) {
-	case TOKVER_PROTECTED_KEY:
-		if (keylen != sizeof(struct protaeskeytoken))
-			return -EINVAL;
+	case TOKVER_PROTECTED_KEY: {
+		struct protaeskeytoken *t;
 
+		if (keylen != sizeof(struct protaeskeytoken))
+			goto out;
 		t = (struct protaeskeytoken *)key;
 		protkey->len = t->len;
 		protkey->type = t->keytype;
 		memcpy(protkey->protkey, t->protkey,
 		       sizeof(protkey->protkey));
+		rc = pkey_verifyprotkey(protkey);
+		break;
+	}
+	case TOKVER_CLEAR_KEY: {
+		struct clearaeskeytoken *t;
+		struct pkey_clrkey ckey;
+		union u_tmpbuf {
+			u8 skey[SECKEYBLOBSIZE];
+			u8 ep11key[MAXEP11AESKEYBLOBSIZE];
+		};
+		size_t tmpbuflen = sizeof(union u_tmpbuf);
 
-		return pkey_verifyprotkey(protkey);
+		if (keylen < sizeof(struct clearaeskeytoken))
+			goto out;
+		t = (struct clearaeskeytoken *)key;
+		if (keylen != sizeof(*t) + t->len)
+			goto out;
+		if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16)
+		    || (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24)
+		    || (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32))
+			memcpy(ckey.clrkey, t->clearkey, t->len);
+		else
+			goto out;
+		/* alloc temp key buffer space */
+		tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC);
+		if (!tmpbuf) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		/* try direct way with the PCKMO instruction */
+		rc = pkey_clr2protkey(t->keytype, &ckey, protkey);
+		if (rc == 0)
+			break;
+		/* PCKMO failed, so try the CCA secure key way */
+		rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype,
+				    ckey.clrkey, tmpbuf);
+		if (rc == 0)
+			rc = pkey_skey2pkey(tmpbuf, protkey);
+		if (rc == 0)
+			break;
+		/* if the CCA way also failed, let's try via EP11 */
+		rc = pkey_clr2ep11key(ckey.clrkey, t->len,
+				      tmpbuf, &tmpbuflen);
+		if (rc == 0)
+			rc = pkey_ep11key2pkey(tmpbuf, protkey);
+		/* now we should really have an protected key */
+		DEBUG_ERR("%s unable to build protected key from clear",
+			  __func__);
+		break;
+	}
+	case TOKVER_EP11_AES: {
+		/* check ep11 key for exportable as protected key */
+		rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
+		if (rc)
+			goto out;
+		rc = pkey_ep11key2pkey(key, protkey);
+		break;
+	}
+	case TOKVER_EP11_AES_WITH_HEADER:
+		/* check ep11 key with header for exportable as protected key */
+		rc = ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1);
+		if (rc)
+			goto out;
+		rc = pkey_ep11key2pkey(key + sizeof(struct ep11kblob_header),
+				       protkey);
+		break;
 	default:
 		DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
 			  __func__, hdr->version);
-		return -EINVAL;
+		rc = -EINVAL;
 	}
+
+out:
+	kfree(tmpbuf);
+	return rc;
 }
 
 /*
@@ -405,6 +555,10 @@
 		if (*keybufsize < SECKEYBLOBSIZE)
 			return -EINVAL;
 		break;
+	case PKEY_TYPE_EP11:
+		if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+			return -EINVAL;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -421,7 +575,10 @@
 	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
 		card = apqns[i].card;
 		dom = apqns[i].domain;
-		if (ktype == PKEY_TYPE_CCA_DATA) {
+		if (ktype == PKEY_TYPE_EP11) {
+			rc = ep11_genaeskey(card, dom, ksize, kflags,
+					    keybuf, keybufsize);
+		} else if (ktype == PKEY_TYPE_CCA_DATA) {
 			rc = cca_genseckey(card, dom, ksize, keybuf);
 			*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
 		} else /* TOKVER_CCA_VLSC */
@@ -452,6 +609,10 @@
 		if (*keybufsize < SECKEYBLOBSIZE)
 			return -EINVAL;
 		break;
+	case PKEY_TYPE_EP11:
+		if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+			return -EINVAL;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -468,7 +629,10 @@
 	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
 		card = apqns[i].card;
 		dom = apqns[i].domain;
-		if (ktype == PKEY_TYPE_CCA_DATA) {
+		if (ktype == PKEY_TYPE_EP11) {
+			rc = ep11_clr2keyblob(card, dom, ksize, kflags,
+					      clrkey, keybuf, keybufsize);
+		} else if (ktype == PKEY_TYPE_CCA_DATA) {
 			rc = cca_clr2seckey(card, dom, ksize,
 					    clrkey, keybuf);
 			*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
@@ -491,11 +655,11 @@
 	u32 _nr_apqns, *_apqns = NULL;
 	struct keytoken_header *hdr = (struct keytoken_header *)key;
 
-	if (keylen < sizeof(struct keytoken_header) ||
-	    hdr->type != TOKTYPE_CCA_INTERNAL)
+	if (keylen < sizeof(struct keytoken_header))
 		return -EINVAL;
 
-	if (hdr->version == TOKVER_CCA_AES) {
+	if (hdr->type == TOKTYPE_CCA_INTERNAL
+	    && hdr->version == TOKVER_CCA_AES) {
 		struct secaeskeytoken *t = (struct secaeskeytoken *)key;
 
 		rc = cca_check_secaeskeytoken(debug_info, 3, key, 0);
@@ -507,13 +671,14 @@
 			*ksize = (enum pkey_key_size) t->bitsize;
 
 		rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
-				   ZCRYPT_CEX3C, t->mkvp, 0, 1);
+				   ZCRYPT_CEX3C, AES_MK_SET, t->mkvp, 0, 1);
 		if (rc == 0 && flags)
 			*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
 		if (rc == -ENODEV) {
 			rc = cca_findcard2(&_apqns, &_nr_apqns,
 					   *cardnr, *domain,
-					   ZCRYPT_CEX3C, 0, t->mkvp, 1);
+					   ZCRYPT_CEX3C, AES_MK_SET,
+					   0, t->mkvp, 1);
 			if (rc == 0 && flags)
 				*flags = PKEY_FLAGS_MATCH_ALT_MKVP;
 		}
@@ -523,7 +688,8 @@
 		*cardnr = ((struct pkey_apqn *)_apqns)->card;
 		*domain = ((struct pkey_apqn *)_apqns)->domain;
 
-	} else if (hdr->version == TOKVER_CCA_VLSC) {
+	} else if (hdr->type == TOKTYPE_CCA_INTERNAL
+		   && hdr->version == TOKVER_CCA_VLSC) {
 		struct cipherkeytoken *t = (struct cipherkeytoken *)key;
 
 		rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1);
@@ -542,13 +708,14 @@
 		}
 
 		rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
-				   ZCRYPT_CEX6, t->mkvp0, 0, 1);
+				   ZCRYPT_CEX6, AES_MK_SET, t->mkvp0, 0, 1);
 		if (rc == 0 && flags)
 			*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
 		if (rc == -ENODEV) {
 			rc = cca_findcard2(&_apqns, &_nr_apqns,
 					   *cardnr, *domain,
-					   ZCRYPT_CEX6, 0, t->mkvp0, 1);
+					   ZCRYPT_CEX6, AES_MK_SET,
+					   0, t->mkvp0, 1);
 			if (rc == 0 && flags)
 				*flags = PKEY_FLAGS_MATCH_ALT_MKVP;
 		}
@@ -558,6 +725,29 @@
 		*cardnr = ((struct pkey_apqn *)_apqns)->card;
 		*domain = ((struct pkey_apqn *)_apqns)->domain;
 
+	} else if (hdr->type == TOKTYPE_NON_CCA
+		   && hdr->version == TOKVER_EP11_AES) {
+		struct ep11keyblob *kb = (struct ep11keyblob *)key;
+
+		rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
+		if (rc)
+			goto out;
+		if (ktype)
+			*ktype = PKEY_TYPE_EP11;
+		if (ksize)
+			*ksize = kb->head.keybitlen;
+
+		rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+				    ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+		if (rc)
+			goto out;
+
+		if (flags)
+			*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+		*cardnr = ((struct pkey_apqn *)_apqns)->card;
+		*domain = ((struct pkey_apqn *)_apqns)->domain;
+
 	} else
 		rc = -EINVAL;
 
@@ -580,30 +770,32 @@
 	if (keylen < sizeof(struct keytoken_header))
 		return -EINVAL;
 
-	switch (hdr->type) {
-	case TOKTYPE_NON_CCA:
-		return pkey_nonccatok2pkey(key, keylen, pkey);
-	case TOKTYPE_CCA_INTERNAL:
-		switch (hdr->version) {
-		case TOKVER_CCA_AES:
+	if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+		if (hdr->version == TOKVER_CCA_AES) {
 			if (keylen != sizeof(struct secaeskeytoken))
 				return -EINVAL;
 			if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
 				return -EINVAL;
-			break;
-		case TOKVER_CCA_VLSC:
+		} else if (hdr->version == TOKVER_CCA_VLSC) {
 			if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
 				return -EINVAL;
 			if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
 				return -EINVAL;
-			break;
-		default:
+		} else {
 			DEBUG_ERR("%s unknown CCA internal token version %d\n",
 				  __func__, hdr->version);
 			return -EINVAL;
 		}
-		break;
-	default:
+	} else if (hdr->type == TOKTYPE_NON_CCA) {
+		if (hdr->version == TOKVER_EP11_AES) {
+			if (keylen < sizeof(struct ep11keyblob))
+				return -EINVAL;
+			if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
+				return -EINVAL;
+		} else {
+			return pkey_nonccatok2pkey(key, keylen, pkey);
+		}
+	} else {
 		DEBUG_ERR("%s unknown/unsupported blob type %d\n",
 			  __func__, hdr->type);
 		return -EINVAL;
@@ -613,12 +805,22 @@
 	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
 		card = apqns[i].card;
 		dom = apqns[i].domain;
-		if (hdr->version == TOKVER_CCA_AES)
+		if (hdr->type == TOKTYPE_CCA_INTERNAL
+		    && hdr->version == TOKVER_CCA_AES)
 			rc = cca_sec2protkey(card, dom, key, pkey->protkey,
 					     &pkey->len, &pkey->type);
-		else /* TOKVER_CCA_VLSC */
+		else if (hdr->type == TOKTYPE_CCA_INTERNAL
+			 && hdr->version == TOKVER_CCA_VLSC)
 			rc = cca_cipher2protkey(card, dom, key, pkey->protkey,
 						&pkey->len, &pkey->type);
+		else { /* EP11 AES secure key blob */
+			struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+			pkey->len = sizeof(pkey->protkey);
+			rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
+						pkey->protkey, &pkey->len,
+						&pkey->type);
+		}
 		if (rc == 0)
 			break;
 	}
@@ -629,16 +831,48 @@
 static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
 			  struct pkey_apqn *apqns, size_t *nr_apqns)
 {
-	int rc = EINVAL;
+	int rc;
 	u32 _nr_apqns, *_apqns = NULL;
 	struct keytoken_header *hdr = (struct keytoken_header *)key;
 
-	if (keylen < sizeof(struct keytoken_header) ||
-	    hdr->type != TOKTYPE_CCA_INTERNAL ||
-	    flags == 0)
+	if (keylen < sizeof(struct keytoken_header) || flags == 0)
 		return -EINVAL;
 
-	if (hdr->version == TOKVER_CCA_AES || hdr->version == TOKVER_CCA_VLSC) {
+	if (hdr->type == TOKTYPE_NON_CCA
+	    && (hdr->version == TOKVER_EP11_AES_WITH_HEADER
+		|| hdr->version == TOKVER_EP11_ECC_WITH_HEADER)
+	    && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+		int minhwtype = 0, api = 0;
+		struct ep11keyblob *kb = (struct ep11keyblob *)
+			(key + sizeof(struct ep11kblob_header));
+
+		if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+			return -EINVAL;
+		if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+			minhwtype = ZCRYPT_CEX7;
+			api = EP11_API_V;
+		}
+		rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+				    minhwtype, api, kb->wkvp);
+		if (rc)
+			goto out;
+	} else if (hdr->type == TOKTYPE_NON_CCA
+		   && hdr->version == TOKVER_EP11_AES
+		   && is_ep11_keyblob(key)) {
+		int minhwtype = 0, api = 0;
+		struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+		if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+			return -EINVAL;
+		if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+			minhwtype = ZCRYPT_CEX7;
+			api = EP11_API_V;
+		}
+		rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+				    minhwtype, api, kb->wkvp);
+		if (rc)
+			goto out;
+	} else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
 		int minhwtype = ZCRYPT_CEX3C;
 		u64 cur_mkvp = 0, old_mkvp = 0;
 
@@ -649,7 +883,7 @@
 				cur_mkvp = t->mkvp;
 			if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
 				old_mkvp = t->mkvp;
-		} else {
+		} else if (hdr->version == TOKVER_CCA_VLSC) {
 			struct cipherkeytoken *t = (struct cipherkeytoken *)key;
 
 			minhwtype = ZCRYPT_CEX6;
@@ -657,19 +891,43 @@
 				cur_mkvp = t->mkvp0;
 			if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
 				old_mkvp = t->mkvp0;
+		} else {
+			/* unknown cca internal token type */
+			return -EINVAL;
 		}
 		rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
-				   minhwtype, cur_mkvp, old_mkvp, 1);
+				   minhwtype, AES_MK_SET,
+				   cur_mkvp, old_mkvp, 1);
 		if (rc)
 			goto out;
-		if (apqns) {
-			if (*nr_apqns < _nr_apqns)
-				rc = -ENOSPC;
-			else
-				memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+	} else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+		u64 cur_mkvp = 0, old_mkvp = 0;
+		struct eccprivkeytoken *t = (struct eccprivkeytoken *)key;
+
+		if (t->secid == 0x20) {
+			if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+				cur_mkvp = t->mkvp;
+			if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+				old_mkvp = t->mkvp;
+		} else {
+			/* unknown cca internal 2 token type */
+			return -EINVAL;
 		}
-		*nr_apqns = _nr_apqns;
+		rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+				   ZCRYPT_CEX7, APKA_MK_SET,
+				   cur_mkvp, old_mkvp, 1);
+		if (rc)
+			goto out;
+	} else
+		return -EINVAL;
+
+	if (apqns) {
+		if (*nr_apqns < _nr_apqns)
+			rc = -ENOSPC;
+		else
+			memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
 	}
+	*nr_apqns = _nr_apqns;
 
 out:
 	kfree(_apqns);
@@ -680,7 +938,7 @@
 			      u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
 			      struct pkey_apqn *apqns, size_t *nr_apqns)
 {
-	int rc = -EINVAL;
+	int rc;
 	u32 _nr_apqns, *_apqns = NULL;
 
 	if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) {
@@ -694,61 +952,174 @@
 		if (ktype == PKEY_TYPE_CCA_CIPHER)
 			minhwtype = ZCRYPT_CEX6;
 		rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
-				   minhwtype, cur_mkvp, old_mkvp, 1);
+				   minhwtype, AES_MK_SET,
+				   cur_mkvp, old_mkvp, 1);
 		if (rc)
 			goto out;
-		if (apqns) {
-			if (*nr_apqns < _nr_apqns)
-				rc = -ENOSPC;
-			else
-				memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
-		}
-		*nr_apqns = _nr_apqns;
+	} else if (ktype == PKEY_TYPE_CCA_ECC) {
+		u64 cur_mkvp = 0, old_mkvp = 0;
+
+		if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+			cur_mkvp = *((u64 *) cur_mkvp);
+		if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+			old_mkvp = *((u64 *) alt_mkvp);
+		rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+				   ZCRYPT_CEX7, APKA_MK_SET,
+				   cur_mkvp, old_mkvp, 1);
+		if (rc)
+			goto out;
+
+	} else if (ktype == PKEY_TYPE_EP11 ||
+		   ktype == PKEY_TYPE_EP11_AES ||
+		   ktype == PKEY_TYPE_EP11_ECC) {
+		u8 *wkvp = NULL;
+
+		if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+			wkvp = cur_mkvp;
+		rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+				    ZCRYPT_CEX7, EP11_API_V, wkvp);
+		if (rc)
+			goto out;
+
+	} else
+		return -EINVAL;
+
+	if (apqns) {
+		if (*nr_apqns < _nr_apqns)
+			rc = -ENOSPC;
+		else
+			memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
 	}
+	*nr_apqns = _nr_apqns;
 
 out:
 	kfree(_apqns);
 	return rc;
 }
 
+static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
+			      const u8 *key, size_t keylen, u32 *protkeytype,
+			      u8 *protkey, u32 *protkeylen)
+{
+	int i, card, dom, rc;
+	struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+	/* check for at least one apqn given */
+	if (!apqns || !nr_apqns)
+		return -EINVAL;
+
+	if (keylen < sizeof(struct keytoken_header))
+		return -EINVAL;
+
+	if (hdr->type == TOKTYPE_NON_CCA
+	    && hdr->version == TOKVER_EP11_AES_WITH_HEADER
+	    && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+		/* EP11 AES key blob with header */
+		if (ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1))
+			return -EINVAL;
+	} else if (hdr->type == TOKTYPE_NON_CCA
+		   && hdr->version == TOKVER_EP11_ECC_WITH_HEADER
+		   && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+		/* EP11 ECC key blob with header */
+		if (ep11_check_ecc_key_with_hdr(debug_info, 3, key, keylen, 1))
+			return -EINVAL;
+	} else if (hdr->type == TOKTYPE_NON_CCA
+		   && hdr->version == TOKVER_EP11_AES
+		   && is_ep11_keyblob(key)) {
+		/* EP11 AES key blob with header in session field */
+		if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
+			return -EINVAL;
+	} else	if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+		if (hdr->version == TOKVER_CCA_AES) {
+			/* CCA AES data key */
+			if (keylen != sizeof(struct secaeskeytoken))
+				return -EINVAL;
+			if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
+				return -EINVAL;
+		} else if (hdr->version == TOKVER_CCA_VLSC) {
+			/* CCA AES cipher key */
+			if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
+				return -EINVAL;
+			if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
+				return -EINVAL;
+		} else {
+			DEBUG_ERR("%s unknown CCA internal token version %d\n",
+				  __func__, hdr->version);
+			return -EINVAL;
+		}
+	} else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+		/* CCA ECC (private) key */
+		if (keylen < sizeof(struct eccprivkeytoken))
+			return -EINVAL;
+		if (cca_check_sececckeytoken(debug_info, 3, key, keylen, 1))
+			return -EINVAL;
+	} else if (hdr->type == TOKTYPE_NON_CCA) {
+		struct pkey_protkey pkey;
+
+		rc = pkey_nonccatok2pkey(key, keylen, &pkey);
+		if (rc)
+			return rc;
+		memcpy(protkey, pkey.protkey, pkey.len);
+		*protkeylen = pkey.len;
+		*protkeytype = pkey.type;
+		return 0;
+	} else {
+		DEBUG_ERR("%s unknown/unsupported blob type %d\n",
+			  __func__, hdr->type);
+		return -EINVAL;
+	}
+
+	/* simple try all apqns from the list */
+	for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+		card = apqns[i].card;
+		dom = apqns[i].domain;
+		if (hdr->type == TOKTYPE_NON_CCA
+		    && (hdr->version == TOKVER_EP11_AES_WITH_HEADER
+			|| hdr->version == TOKVER_EP11_ECC_WITH_HEADER)
+		    && is_ep11_keyblob(key + sizeof(struct ep11kblob_header)))
+			rc = ep11_kblob2protkey(card, dom, key, hdr->len,
+						protkey, protkeylen, protkeytype);
+		else if (hdr->type == TOKTYPE_NON_CCA
+			 && hdr->version == TOKVER_EP11_AES
+			 && is_ep11_keyblob(key))
+			rc = ep11_kblob2protkey(card, dom, key, hdr->len,
+						protkey, protkeylen, protkeytype);
+		else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+			 hdr->version == TOKVER_CCA_AES)
+			rc = cca_sec2protkey(card, dom, key, protkey,
+					     protkeylen, protkeytype);
+		else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+			 hdr->version == TOKVER_CCA_VLSC)
+			rc = cca_cipher2protkey(card, dom, key, protkey,
+						protkeylen, protkeytype);
+		else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA)
+			rc = cca_ecc2protkey(card, dom, key, protkey,
+					     protkeylen, protkeytype);
+		else
+			return -EINVAL;
+	}
+
+	return rc;
+}
+
 /*
  * File io functions
  */
 
 static void *_copy_key_from_user(void __user *ukey, size_t keylen)
 {
-	void *kkey;
-
 	if (!ukey || keylen < MINKEYBLOBSIZE || keylen > KEYBLOBBUFSIZE)
 		return ERR_PTR(-EINVAL);
-	kkey = kmalloc(keylen, GFP_KERNEL);
-	if (!kkey)
-		return ERR_PTR(-ENOMEM);
-	if (copy_from_user(kkey, ukey, keylen)) {
-		kfree(kkey);
-		return ERR_PTR(-EFAULT);
-	}
 
-	return kkey;
+	return memdup_user(ukey, keylen);
 }
 
 static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
 {
-	void *kapqns = NULL;
-	size_t nbytes;
+	if (!uapqns || nr_apqns == 0)
+		return NULL;
 
-	if (uapqns && nr_apqns > 0) {
-		nbytes = nr_apqns * sizeof(struct pkey_apqn);
-		kapqns = kmalloc(nbytes, GFP_KERNEL);
-		if (!kapqns)
-			return ERR_PTR(-ENOMEM);
-		if (copy_from_user(kapqns, uapqns, nbytes)) {
-			kfree(kapqns);
-			return ERR_PTR(-EFAULT);
-		}
-	}
-
-	return kapqns;
+	return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn));
 }
 
 static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
@@ -1131,6 +1502,55 @@
 		kfree(apqns);
 		break;
 	}
+	case PKEY_KBLOB2PROTK3: {
+		struct pkey_kblob2pkey3 __user *utp = (void __user *) arg;
+		struct pkey_kblob2pkey3 ktp;
+		struct pkey_apqn *apqns = NULL;
+		u32 protkeylen = PROTKEYBLOBBUFSIZE;
+		u8 *kkey, *protkey;
+
+		if (copy_from_user(&ktp, utp, sizeof(ktp)))
+			return -EFAULT;
+		apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
+		if (IS_ERR(apqns))
+			return PTR_ERR(apqns);
+		kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+		if (IS_ERR(kkey)) {
+			kfree(apqns);
+			return PTR_ERR(kkey);
+		}
+		protkey = kmalloc(protkeylen, GFP_KERNEL);
+		if (!protkey) {
+			kfree(apqns);
+			kfree(kkey);
+			return -ENOMEM;
+		}
+		rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries, kkey,
+					ktp.keylen, &ktp.pkeytype,
+					protkey, &protkeylen);
+		DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
+		kfree(apqns);
+		kfree(kkey);
+		if (rc) {
+			kfree(protkey);
+			break;
+		}
+		if (ktp.pkey && ktp.pkeylen) {
+			if (protkeylen > ktp.pkeylen) {
+				kfree(protkey);
+				return -EINVAL;
+			}
+			if (copy_to_user(ktp.pkey, protkey, protkeylen)) {
+				kfree(protkey);
+				return -EFAULT;
+			}
+		}
+		kfree(protkey);
+		ktp.pkeylen = protkeylen;
+		if (copy_to_user(utp, &ktp, sizeof(ktp)))
+			return -EFAULT;
+		break;
+	}
 	default:
 		/* unknown/unsupported ioctl cmd */
 		return -ENOTTY;
@@ -1379,8 +1799,9 @@
 					    bool is_xts, char *buf, loff_t off,
 					    size_t count)
 {
-	size_t keysize;
-	int rc;
+	int i, rc, card, dom;
+	u32 nr_apqns, *apqns = NULL;
+	size_t keysize = CCACIPHERTOKENSIZE;
 
 	if (off != 0 || count < CCACIPHERTOKENSIZE)
 		return -EINVAL;
@@ -1388,22 +1809,31 @@
 		if (count < 2 * CCACIPHERTOKENSIZE)
 			return -EINVAL;
 
-	keysize = CCACIPHERTOKENSIZE;
-	rc = cca_gencipherkey(-1, -1, keybits, 0, buf, &keysize);
+	/* build a list of apqns able to generate an cipher key */
+	rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+			   ZCRYPT_CEX6, 0, 0, 0, 0);
 	if (rc)
 		return rc;
-	memset(buf + keysize, 0, CCACIPHERTOKENSIZE - keysize);
+
+	memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+	/* simple try all apqns from the list */
+	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+		card = apqns[i] >> 16;
+		dom = apqns[i] & 0xFFFF;
+		rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
+		if (rc == 0)
+			break;
+	}
+	if (rc)
+		return rc;
 
 	if (is_xts) {
 		keysize = CCACIPHERTOKENSIZE;
-		rc = cca_gencipherkey(-1, -1, keybits, 0,
-				      buf + CCACIPHERTOKENSIZE, &keysize);
-		if (rc)
-			return rc;
-		memset(buf + CCACIPHERTOKENSIZE + keysize, 0,
-		       CCACIPHERTOKENSIZE - keysize);
-
-		return 2 * CCACIPHERTOKENSIZE;
+		buf += CCACIPHERTOKENSIZE;
+		rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
+		if (rc == 0)
+			return 2 * CCACIPHERTOKENSIZE;
 	}
 
 	return CCACIPHERTOKENSIZE;
@@ -1479,10 +1909,134 @@
 	.bin_attrs = ccacipher_attrs,
 };
 
+/*
+ * Sysfs attribute read function for all ep11 aes key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ * This function and the sysfs attributes using it provide EP11 key blobs
+ * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
+ * 320 bytes.
+ */
+static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+				       bool is_xts, char *buf, loff_t off,
+				       size_t count)
+{
+	int i, rc, card, dom;
+	u32 nr_apqns, *apqns = NULL;
+	size_t keysize = MAXEP11AESKEYBLOBSIZE;
+
+	if (off != 0 || count < MAXEP11AESKEYBLOBSIZE)
+		return -EINVAL;
+	if (is_xts)
+		if (count < 2 * MAXEP11AESKEYBLOBSIZE)
+			return -EINVAL;
+
+	/* build a list of apqns able to generate an cipher key */
+	rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+			    ZCRYPT_CEX7, EP11_API_V, NULL);
+	if (rc)
+		return rc;
+
+	memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+	/* simple try all apqns from the list */
+	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+		card = apqns[i] >> 16;
+		dom = apqns[i] & 0xFFFF;
+		rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+		if (rc == 0)
+			break;
+	}
+	if (rc)
+		return rc;
+
+	if (is_xts) {
+		keysize = MAXEP11AESKEYBLOBSIZE;
+		buf += MAXEP11AESKEYBLOBSIZE;
+		rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+		if (rc == 0)
+			return 2 * MAXEP11AESKEYBLOBSIZE;
+	}
+
+	return MAXEP11AESKEYBLOBSIZE;
+}
+
+static ssize_t ep11_aes_128_read(struct file *filp,
+				 struct kobject *kobj,
+				 struct bin_attribute *attr,
+				 char *buf, loff_t off,
+				 size_t count)
+{
+	return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+				       off, count);
+}
+
+static ssize_t ep11_aes_192_read(struct file *filp,
+				 struct kobject *kobj,
+				 struct bin_attribute *attr,
+				 char *buf, loff_t off,
+				 size_t count)
+{
+	return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+				       off, count);
+}
+
+static ssize_t ep11_aes_256_read(struct file *filp,
+				 struct kobject *kobj,
+				 struct bin_attribute *attr,
+				 char *buf, loff_t off,
+				 size_t count)
+{
+	return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+				       off, count);
+}
+
+static ssize_t ep11_aes_128_xts_read(struct file *filp,
+				     struct kobject *kobj,
+				     struct bin_attribute *attr,
+				     char *buf, loff_t off,
+				     size_t count)
+{
+	return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+				       off, count);
+}
+
+static ssize_t ep11_aes_256_xts_read(struct file *filp,
+				     struct kobject *kobj,
+				     struct bin_attribute *attr,
+				     char *buf, loff_t off,
+				     size_t count)
+{
+	return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+				       off, count);
+}
+
+static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+
+static struct bin_attribute *ep11_attrs[] = {
+	&bin_attr_ep11_aes_128,
+	&bin_attr_ep11_aes_192,
+	&bin_attr_ep11_aes_256,
+	&bin_attr_ep11_aes_128_xts,
+	&bin_attr_ep11_aes_256_xts,
+	NULL
+};
+
+static struct attribute_group ep11_attr_group = {
+	.name	   = "ep11",
+	.bin_attrs = ep11_attrs,
+};
+
 static const struct attribute_group *pkey_attr_groups[] = {
 	&protkey_attr_group,
 	&ccadata_attr_group,
 	&ccacipher_attr_group,
+	&ep11_attr_group,
 	NULL,
 };
 
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 1ec0114..72eb8f9 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -86,7 +86,7 @@
 		case AP_RESPONSE_RESET_IN_PROGRESS:
 			if (!status.irq_enabled)
 				return;
-			/* Fall through */
+			fallthrough;
 		case AP_RESPONSE_BUSY:
 			msleep(20);
 			break;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index ec41a8a..3b9eda3 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -25,6 +25,7 @@
 #include <linux/debugfs.h>
 #include <linux/cdev.h>
 #include <linux/ctype.h>
+#include <linux/capability.h>
 #include <asm/debug.h>
 
 #define CREATE_TRACE_POINTS
@@ -36,6 +37,7 @@
 #include "zcrypt_msgtype6.h"
 #include "zcrypt_msgtype50.h"
 #include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
 
 /*
  * Module description.
@@ -601,13 +603,13 @@
 				       unsigned int pref_weight)
 {
 	if (!pref_zc)
-		return false;
+		return true;
 	weight += atomic_read(&zc->load);
 	pref_weight += atomic_read(&pref_zc->load);
 	if (weight == pref_weight)
-		return atomic64_read(&zc->card->total_request_count) >
+		return atomic64_read(&zc->card->total_request_count) <
 			atomic64_read(&pref_zc->card->total_request_count);
-	return weight > pref_weight;
+	return weight < pref_weight;
 }
 
 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
@@ -616,30 +618,39 @@
 					unsigned int pref_weight)
 {
 	if (!pref_zq)
-		return false;
+		return true;
 	weight += atomic_read(&zq->load);
 	pref_weight += atomic_read(&pref_zq->load);
 	if (weight == pref_weight)
-		return zq->queue->total_request_count >
+		return zq->queue->total_request_count <
 			pref_zq->queue->total_request_count;
-	return weight > pref_weight;
+	return weight < pref_weight;
 }
 
 /*
  * zcrypt ioctls.
  */
 static long zcrypt_rsa_modexpo(struct ap_perms *perms,
+			       struct zcrypt_track *tr,
 			       struct ica_rsa_modexpo *mex)
 {
 	struct zcrypt_card *zc, *pref_zc;
 	struct zcrypt_queue *zq, *pref_zq;
-	unsigned int weight, pref_weight;
+	struct ap_message ap_msg;
+	unsigned int wgt = 0, pref_wgt = 0;
 	unsigned int func_code;
-	int qid = 0, rc = -ENODEV;
+	int cpen, qpen, qid = 0, rc = -ENODEV;
 	struct module *mod;
 
 	trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
 
+	ap_init_message(&ap_msg);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (tr && tr->fi.cmd)
+		ap_msg.fi.cmd = tr->fi.cmd;
+#endif
+
 	if (mex->outputdatalength < mex->inputdatalength) {
 		func_code = 0;
 		rc = -EINVAL;
@@ -661,8 +672,9 @@
 	pref_zq = NULL;
 	spin_lock(&zcrypt_list_lock);
 	for_each_zcrypt_card(zc) {
-		/* Check for online accelarator and CCA cards */
-		if (!zc->online || !(zc->card->functions & 0x18000000))
+		/* Check for useable accelarator or CCA card */
+		if (!zc->online || !zc->card->config ||
+		    !(zc->card->functions & 0x18000000))
 			continue;
 		/* Check for size limits */
 		if (zc->min_mod_size > mex->inputdatalength ||
@@ -672,26 +684,35 @@
 		if (!zcrypt_check_card(perms, zc->card->id))
 			continue;
 		/* get weight index of the card device	*/
-		weight = zc->speed_rating[func_code];
-		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+		wgt = zc->speed_rating[func_code];
+		/* penalty if this msg was previously sent via this card */
+		cpen = (tr && tr->again_counter && tr->last_qid &&
+			AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+			TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+		if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
 			continue;
 		for_each_zcrypt_queue(zq, zc) {
-			/* check if device is online and eligible */
-			if (!zq->online || !zq->ops->rsa_modexpo)
+			/* check if device is useable and eligible */
+			if (!zq->online || !zq->ops->rsa_modexpo ||
+			    !zq->queue->config)
 				continue;
 			/* check if device node has admission for this queue */
 			if (!zcrypt_check_queue(perms,
 						AP_QID_QUEUE(zq->queue->qid)))
 				continue;
-			if (zcrypt_queue_compare(zq, pref_zq,
-						 weight, pref_weight))
+			/* penalty if the msg was previously sent at this qid */
+			qpen = (tr && tr->again_counter && tr->last_qid &&
+				tr->last_qid == zq->queue->qid) ?
+				TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+			if (!zcrypt_queue_compare(zq, pref_zq,
+						  wgt + cpen + qpen, pref_wgt))
 				continue;
 			pref_zc = zc;
 			pref_zq = zq;
-			pref_weight = weight;
+			pref_wgt = wgt + cpen + qpen;
 		}
 	}
-	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 	if (!pref_zq) {
@@ -700,30 +721,44 @@
 	}
 
 	qid = pref_zq->queue->qid;
-	rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
+	rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg);
 
 	spin_lock(&zcrypt_list_lock);
-	zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
+	zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 out:
+	ap_release_message(&ap_msg);
+	if (tr) {
+		tr->last_rc = rc;
+		tr->last_qid = qid;
+	}
 	trace_s390_zcrypt_rep(mex, func_code, rc,
 			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
 	return rc;
 }
 
 static long zcrypt_rsa_crt(struct ap_perms *perms,
+			   struct zcrypt_track *tr,
 			   struct ica_rsa_modexpo_crt *crt)
 {
 	struct zcrypt_card *zc, *pref_zc;
 	struct zcrypt_queue *zq, *pref_zq;
-	unsigned int weight, pref_weight;
+	struct ap_message ap_msg;
+	unsigned int wgt = 0, pref_wgt = 0;
 	unsigned int func_code;
-	int qid = 0, rc = -ENODEV;
+	int cpen, qpen, qid = 0, rc = -ENODEV;
 	struct module *mod;
 
 	trace_s390_zcrypt_req(crt, TP_ICARSACRT);
 
+	ap_init_message(&ap_msg);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (tr && tr->fi.cmd)
+		ap_msg.fi.cmd = tr->fi.cmd;
+#endif
+
 	if (crt->outputdatalength < crt->inputdatalength) {
 		func_code = 0;
 		rc = -EINVAL;
@@ -745,8 +780,9 @@
 	pref_zq = NULL;
 	spin_lock(&zcrypt_list_lock);
 	for_each_zcrypt_card(zc) {
-		/* Check for online accelarator and CCA cards */
-		if (!zc->online || !(zc->card->functions & 0x18000000))
+		/* Check for useable accelarator or CCA card */
+		if (!zc->online || !zc->card->config ||
+		    !(zc->card->functions & 0x18000000))
 			continue;
 		/* Check for size limits */
 		if (zc->min_mod_size > crt->inputdatalength ||
@@ -756,26 +792,35 @@
 		if (!zcrypt_check_card(perms, zc->card->id))
 			continue;
 		/* get weight index of the card device	*/
-		weight = zc->speed_rating[func_code];
-		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+		wgt = zc->speed_rating[func_code];
+		/* penalty if this msg was previously sent via this card */
+		cpen = (tr && tr->again_counter && tr->last_qid &&
+			AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+			TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+		if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
 			continue;
 		for_each_zcrypt_queue(zq, zc) {
-			/* check if device is online and eligible */
-			if (!zq->online || !zq->ops->rsa_modexpo_crt)
+			/* check if device is useable and eligible */
+			if (!zq->online || !zq->ops->rsa_modexpo_crt ||
+			    !zq->queue->config)
 				continue;
 			/* check if device node has admission for this queue */
 			if (!zcrypt_check_queue(perms,
 						AP_QID_QUEUE(zq->queue->qid)))
 				continue;
-			if (zcrypt_queue_compare(zq, pref_zq,
-						 weight, pref_weight))
+			/* penalty if the msg was previously sent at this qid */
+			qpen = (tr && tr->again_counter && tr->last_qid &&
+				tr->last_qid == zq->queue->qid) ?
+				TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+			if (!zcrypt_queue_compare(zq, pref_zq,
+						  wgt + cpen + qpen, pref_wgt))
 				continue;
 			pref_zc = zc;
 			pref_zq = zq;
-			pref_weight = weight;
+			pref_wgt = wgt + cpen + qpen;
 		}
 	}
-	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 	if (!pref_zq) {
@@ -784,35 +829,52 @@
 	}
 
 	qid = pref_zq->queue->qid;
-	rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
+	rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg);
 
 	spin_lock(&zcrypt_list_lock);
-	zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
+	zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 out:
+	ap_release_message(&ap_msg);
+	if (tr) {
+		tr->last_rc = rc;
+		tr->last_qid = qid;
+	}
 	trace_s390_zcrypt_rep(crt, func_code, rc,
 			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
 	return rc;
 }
 
-static long _zcrypt_send_cprb(struct ap_perms *perms,
+static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
+			      struct zcrypt_track *tr,
 			      struct ica_xcRB *xcRB)
 {
 	struct zcrypt_card *zc, *pref_zc;
 	struct zcrypt_queue *zq, *pref_zq;
 	struct ap_message ap_msg;
-	unsigned int weight, pref_weight;
+	unsigned int wgt = 0, pref_wgt = 0;
 	unsigned int func_code;
 	unsigned short *domain, tdom;
-	int qid = 0, rc = -ENODEV;
+	int cpen, qpen, qid = 0, rc = -ENODEV;
 	struct module *mod;
 
 	trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
 
 	xcRB->status = 0;
 	ap_init_message(&ap_msg);
-	rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (tr && tr->fi.cmd)
+		ap_msg.fi.cmd = tr->fi.cmd;
+	if (tr && tr->fi.action == AP_FI_ACTION_CCA_AGENT_FF) {
+		ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid agent_ID 'FF'\n",
+				__func__, tr->fi.cmd);
+		xcRB->agent_ID = 0x4646;
+	}
+#endif
+
+	rc = get_cprb_fc(userspace, xcRB, &ap_msg, &func_code, &domain);
 	if (rc)
 		goto out;
 
@@ -821,7 +883,7 @@
 	 * domain but a control only domain, use the default domain as target.
 	 */
 	tdom = *domain;
-	if (tdom >= 0 && tdom < AP_DOMAINS &&
+	if (tdom < AP_DOMAINS &&
 	    !ap_test_config_usage_domain(tdom) &&
 	    ap_test_config_ctrl_domain(tdom) &&
 	    ap_domain_index >= 0)
@@ -831,8 +893,9 @@
 	pref_zq = NULL;
 	spin_lock(&zcrypt_list_lock);
 	for_each_zcrypt_card(zc) {
-		/* Check for online CCA cards */
-		if (!zc->online || !(zc->card->functions & 0x10000000))
+		/* Check for useable CCA card */
+		if (!zc->online || !zc->card->config ||
+		    !(zc->card->functions & 0x10000000))
 			continue;
 		/* Check for user selected CCA card */
 		if (xcRB->user_defined != AUTOSELECT &&
@@ -842,29 +905,38 @@
 		if (!zcrypt_check_card(perms, zc->card->id))
 			continue;
 		/* get weight index of the card device	*/
-		weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
-		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+		wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
+		/* penalty if this msg was previously sent via this card */
+		cpen = (tr && tr->again_counter && tr->last_qid &&
+			AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+			TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+		if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
 			continue;
 		for_each_zcrypt_queue(zq, zc) {
-			/* check if device is online and eligible */
+			/* check for device useable and eligible */
 			if (!zq->online ||
 			    !zq->ops->send_cprb ||
-			    (tdom != (unsigned short) AUTOSELECT &&
+			    !zq->queue->config ||
+			    (tdom != AUTOSEL_DOM &&
 			     tdom != AP_QID_QUEUE(zq->queue->qid)))
 				continue;
 			/* check if device node has admission for this queue */
 			if (!zcrypt_check_queue(perms,
 						AP_QID_QUEUE(zq->queue->qid)))
 				continue;
-			if (zcrypt_queue_compare(zq, pref_zq,
-						 weight, pref_weight))
+			/* penalty if the msg was previously sent at this qid */
+			qpen = (tr && tr->again_counter && tr->last_qid &&
+				tr->last_qid == zq->queue->qid) ?
+				TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+			if (!zcrypt_queue_compare(zq, pref_zq,
+						  wgt + cpen + qpen, pref_wgt))
 				continue;
 			pref_zc = zc;
 			pref_zq = zq;
-			pref_weight = weight;
+			pref_wgt = wgt + cpen + qpen;
 		}
 	}
-	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 	if (!pref_zq) {
@@ -874,17 +946,29 @@
 
 	/* in case of auto select, provide the correct domain */
 	qid = pref_zq->queue->qid;
-	if (*domain == (unsigned short) AUTOSELECT)
+	if (*domain == AUTOSEL_DOM)
 		*domain = AP_QID_QUEUE(qid);
 
-	rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (tr && tr->fi.action == AP_FI_ACTION_CCA_DOM_INVAL) {
+		ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid domain\n",
+				__func__, tr->fi.cmd);
+		*domain = 99;
+	}
+#endif
+
+	rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcRB, &ap_msg);
 
 	spin_lock(&zcrypt_list_lock);
-	zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
+	zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 out:
 	ap_release_message(&ap_msg);
+	if (tr) {
+		tr->last_rc = rc;
+		tr->last_qid = qid;
+	}
 	trace_s390_zcrypt_rep(xcRB, func_code, rc,
 			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
 	return rc;
@@ -892,7 +976,7 @@
 
 long zcrypt_send_cprb(struct ica_xcRB *xcRB)
 {
-	return _zcrypt_send_cprb(&ap_perms, xcRB);
+	return _zcrypt_send_cprb(false, &ap_perms, NULL, xcRB);
 }
 EXPORT_SYMBOL(zcrypt_send_cprb);
 
@@ -901,7 +985,7 @@
 				 struct ep11_target_dev *targets)
 {
 	while (target_num-- > 0) {
-		if (dev_id == targets->ap_id)
+		if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
 			return true;
 		targets++;
 	}
@@ -912,31 +996,40 @@
 				  unsigned short target_num,
 				  struct ep11_target_dev *targets)
 {
+	int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
+
 	while (target_num-- > 0) {
-		if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
+		if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
+		    (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
 			return true;
 		targets++;
 	}
 	return false;
 }
 
-static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
-				  struct ep11_urb *xcrb)
+static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
+				   struct zcrypt_track *tr,
+				   struct ep11_urb *xcrb)
 {
 	struct zcrypt_card *zc, *pref_zc;
 	struct zcrypt_queue *zq, *pref_zq;
 	struct ep11_target_dev *targets;
 	unsigned short target_num;
-	unsigned int weight, pref_weight;
+	unsigned int wgt = 0, pref_wgt = 0;
 	unsigned int func_code;
 	struct ap_message ap_msg;
-	int qid = 0, rc = -ENODEV;
+	int cpen, qpen, qid = 0, rc = -ENODEV;
 	struct module *mod;
 
 	trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
 
 	ap_init_message(&ap_msg);
 
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (tr && tr->fi.cmd)
+		ap_msg.fi.cmd = tr->fi.cmd;
+#endif
+
 	target_num = (unsigned short) xcrb->targets_num;
 
 	/* empty list indicates autoselect (all available targets) */
@@ -952,7 +1045,7 @@
 		}
 
 		uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
-		if (copy_from_user(targets, uptr,
+		if (z_copy_from_user(userspace, targets, uptr,
 				   target_num * sizeof(*targets))) {
 			func_code = 0;
 			rc = -EFAULT;
@@ -960,7 +1053,7 @@
 		}
 	}
 
-	rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code);
+	rc = get_ep11cprb_fc(userspace, xcrb, &ap_msg, &func_code);
 	if (rc)
 		goto out_free;
 
@@ -968,8 +1061,9 @@
 	pref_zq = NULL;
 	spin_lock(&zcrypt_list_lock);
 	for_each_zcrypt_card(zc) {
-		/* Check for online EP11 cards */
-		if (!zc->online || !(zc->card->functions & 0x04000000))
+		/* Check for useable EP11 card */
+		if (!zc->online || !zc->card->config ||
+		    !(zc->card->functions & 0x04000000))
 			continue;
 		/* Check for user selected EP11 card */
 		if (targets &&
@@ -979,13 +1073,18 @@
 		if (!zcrypt_check_card(perms, zc->card->id))
 			continue;
 		/* get weight index of the card device	*/
-		weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
-		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+		wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
+		/* penalty if this msg was previously sent via this card */
+		cpen = (tr && tr->again_counter && tr->last_qid &&
+			AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+			TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+		if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
 			continue;
 		for_each_zcrypt_queue(zq, zc) {
-			/* check if device is online and eligible */
+			/* check if device is useable and eligible */
 			if (!zq->online ||
 			    !zq->ops->send_ep11_cprb ||
+			    !zq->queue->config ||
 			    (targets &&
 			     !is_desired_ep11_queue(zq->queue->qid,
 						    target_num, targets)))
@@ -994,15 +1093,19 @@
 			if (!zcrypt_check_queue(perms,
 						AP_QID_QUEUE(zq->queue->qid)))
 				continue;
-			if (zcrypt_queue_compare(zq, pref_zq,
-						 weight, pref_weight))
+			/* penalty if the msg was previously sent at this qid */
+			qpen = (tr && tr->again_counter && tr->last_qid &&
+				tr->last_qid == zq->queue->qid) ?
+				TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+			if (!zcrypt_queue_compare(zq, pref_zq,
+						  wgt + cpen + qpen, pref_wgt))
 				continue;
 			pref_zc = zc;
 			pref_zq = zq;
-			pref_weight = weight;
+			pref_wgt = wgt + cpen + qpen;
 		}
 	}
-	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 	if (!pref_zq) {
@@ -1011,26 +1114,36 @@
 	}
 
 	qid = pref_zq->queue->qid;
-	rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
+	rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
 
 	spin_lock(&zcrypt_list_lock);
-	zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
+	zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 out_free:
 	kfree(targets);
 out:
 	ap_release_message(&ap_msg);
+	if (tr) {
+		tr->last_rc = rc;
+		tr->last_qid = qid;
+	}
 	trace_s390_zcrypt_rep(xcrb, func_code, rc,
 			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
 	return rc;
 }
 
+long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
+{
+	return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb);
+}
+EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
+
 static long zcrypt_rng(char *buffer)
 {
 	struct zcrypt_card *zc, *pref_zc;
 	struct zcrypt_queue *zq, *pref_zq;
-	unsigned int weight, pref_weight;
+	unsigned int wgt = 0, pref_wgt = 0;
 	unsigned int func_code;
 	struct ap_message ap_msg;
 	unsigned int domain;
@@ -1048,26 +1161,27 @@
 	pref_zq = NULL;
 	spin_lock(&zcrypt_list_lock);
 	for_each_zcrypt_card(zc) {
-		/* Check for online CCA cards */
-		if (!zc->online || !(zc->card->functions & 0x10000000))
+		/* Check for useable CCA card */
+		if (!zc->online || !zc->card->config ||
+		    !(zc->card->functions & 0x10000000))
 			continue;
 		/* get weight index of the card device	*/
-		weight = zc->speed_rating[func_code];
-		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+		wgt = zc->speed_rating[func_code];
+		if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
 			continue;
 		for_each_zcrypt_queue(zq, zc) {
-			/* check if device is online and eligible */
-			if (!zq->online || !zq->ops->rng)
+			/* check if device is useable and eligible */
+			if (!zq->online || !zq->ops->rng ||
+			    !zq->queue->config)
 				continue;
-			if (zcrypt_queue_compare(zq, pref_zq,
-						 weight, pref_weight))
+			if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
 				continue;
 			pref_zc = zc;
 			pref_zq = zq;
-			pref_weight = weight;
+			pref_wgt = wgt;
 		}
 	}
-	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 	if (!pref_zq) {
@@ -1079,7 +1193,7 @@
 	rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
 
 	spin_lock(&zcrypt_list_lock);
-	zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
+	zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
 	spin_unlock(&zcrypt_list_lock);
 
 out:
@@ -1288,6 +1402,187 @@
 	return requestq_count;
 }
 
+static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+	int rc;
+	struct zcrypt_track tr;
+	struct ica_rsa_modexpo mex;
+	struct ica_rsa_modexpo __user *umex = (void __user *) arg;
+
+	memset(&tr, 0, sizeof(tr));
+	if (copy_from_user(&mex, umex, sizeof(mex)))
+		return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (mex.inputdatalength & (1U << 31)) {
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		tr.fi.cmd = (u16)(mex.inputdatalength >> 16);
+	}
+	mex.inputdatalength &= 0x0000FFFF;
+#endif
+
+	do {
+		rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
+		if (rc == -EAGAIN)
+			tr.again_counter++;
+#ifdef CONFIG_ZCRYPT_DEBUG
+		if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
+			break;
+#endif
+	} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	/* on failure: retry once again after a requested rescan */
+	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+		do {
+			rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
+			if (rc == -EAGAIN)
+				tr.again_counter++;
+		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+		rc = -EIO;
+	if (rc) {
+		ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
+		return rc;
+	}
+	return put_user(mex.outputdatalength, &umex->outputdatalength);
+}
+
+static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+	int rc;
+	struct zcrypt_track tr;
+	struct ica_rsa_modexpo_crt crt;
+	struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
+
+	memset(&tr, 0, sizeof(tr));
+	if (copy_from_user(&crt, ucrt, sizeof(crt)))
+		return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (crt.inputdatalength & (1U << 31)) {
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		tr.fi.cmd = (u16)(crt.inputdatalength >> 16);
+	}
+	crt.inputdatalength &= 0x0000FFFF;
+#endif
+
+	do {
+		rc = zcrypt_rsa_crt(perms, &tr, &crt);
+		if (rc == -EAGAIN)
+			tr.again_counter++;
+#ifdef CONFIG_ZCRYPT_DEBUG
+		if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
+			break;
+#endif
+	} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	/* on failure: retry once again after a requested rescan */
+	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+		do {
+			rc = zcrypt_rsa_crt(perms, &tr, &crt);
+			if (rc == -EAGAIN)
+				tr.again_counter++;
+		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+		rc = -EIO;
+	if (rc) {
+		ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
+		return rc;
+	}
+	return put_user(crt.outputdatalength, &ucrt->outputdatalength);
+}
+
+static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+	int rc;
+	struct ica_xcRB xcRB;
+	struct zcrypt_track tr;
+	struct ica_xcRB __user *uxcRB = (void __user *) arg;
+
+	memset(&tr, 0, sizeof(tr));
+	if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
+		return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (xcRB.status & (1U << 31)) {
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		tr.fi.cmd = (u16)(xcRB.status >> 16);
+	}
+	xcRB.status &= 0x0000FFFF;
+#endif
+
+	do {
+		rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB);
+		if (rc == -EAGAIN)
+			tr.again_counter++;
+#ifdef CONFIG_ZCRYPT_DEBUG
+		if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
+			break;
+#endif
+	} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	/* on failure: retry once again after a requested rescan */
+	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+		do {
+			rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB);
+			if (rc == -EAGAIN)
+				tr.again_counter++;
+		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+		rc = -EIO;
+	if (rc)
+		ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
+			   rc, xcRB.status);
+	if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
+		return -EFAULT;
+	return rc;
+}
+
+static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+	int rc;
+	struct ep11_urb xcrb;
+	struct zcrypt_track tr;
+	struct ep11_urb __user *uxcrb = (void __user *)arg;
+
+	memset(&tr, 0, sizeof(tr));
+	if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
+		return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (xcrb.req_len & (1ULL << 63)) {
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		tr.fi.cmd = (u16)(xcrb.req_len >> 48);
+	}
+	xcrb.req_len &= 0x0000FFFFFFFFFFFFULL;
+#endif
+
+	do {
+		rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
+		if (rc == -EAGAIN)
+			tr.again_counter++;
+#ifdef CONFIG_ZCRYPT_DEBUG
+		if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
+			break;
+#endif
+	} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	/* on failure: retry once again after a requested rescan */
+	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+		do {
+			rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
+			if (rc == -EAGAIN)
+				tr.again_counter++;
+		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+		rc = -EIO;
+	if (rc)
+		ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
+	if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
+		return -EFAULT;
+	return rc;
+}
+
 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
 				  unsigned long arg)
 {
@@ -1300,87 +1595,14 @@
 		return rc;
 
 	switch (cmd) {
-	case ICARSAMODEXPO: {
-		struct ica_rsa_modexpo __user *umex = (void __user *) arg;
-		struct ica_rsa_modexpo mex;
-
-		if (copy_from_user(&mex, umex, sizeof(mex)))
-			return -EFAULT;
-		do {
-			rc = zcrypt_rsa_modexpo(perms, &mex);
-		} while (rc == -EAGAIN);
-		/* on failure: retry once again after a requested rescan */
-		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
-			do {
-				rc = zcrypt_rsa_modexpo(perms, &mex);
-			} while (rc == -EAGAIN);
-		if (rc) {
-			ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
-			return rc;
-		}
-		return put_user(mex.outputdatalength, &umex->outputdatalength);
-	}
-	case ICARSACRT: {
-		struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
-		struct ica_rsa_modexpo_crt crt;
-
-		if (copy_from_user(&crt, ucrt, sizeof(crt)))
-			return -EFAULT;
-		do {
-			rc = zcrypt_rsa_crt(perms, &crt);
-		} while (rc == -EAGAIN);
-		/* on failure: retry once again after a requested rescan */
-		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
-			do {
-				rc = zcrypt_rsa_crt(perms, &crt);
-			} while (rc == -EAGAIN);
-		if (rc) {
-			ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
-			return rc;
-		}
-		return put_user(crt.outputdatalength, &ucrt->outputdatalength);
-	}
-	case ZSECSENDCPRB: {
-		struct ica_xcRB __user *uxcRB = (void __user *) arg;
-		struct ica_xcRB xcRB;
-
-		if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
-			return -EFAULT;
-		do {
-			rc = _zcrypt_send_cprb(perms, &xcRB);
-		} while (rc == -EAGAIN);
-		/* on failure: retry once again after a requested rescan */
-		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
-			do {
-				rc = _zcrypt_send_cprb(perms, &xcRB);
-			} while (rc == -EAGAIN);
-		if (rc)
-			ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
-				   rc, xcRB.status);
-		if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
-			return -EFAULT;
-		return rc;
-	}
-	case ZSENDEP11CPRB: {
-		struct ep11_urb __user *uxcrb = (void __user *)arg;
-		struct ep11_urb xcrb;
-
-		if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
-			return -EFAULT;
-		do {
-			rc = zcrypt_send_ep11_cprb(perms, &xcrb);
-		} while (rc == -EAGAIN);
-		/* on failure: retry once again after a requested rescan */
-		if ((rc == -ENODEV) && (zcrypt_process_rescan()))
-			do {
-				rc = zcrypt_send_ep11_cprb(perms, &xcrb);
-			} while (rc == -EAGAIN);
-		if (rc)
-			ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
-		if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
-			return -EFAULT;
-		return rc;
-	}
+	case ICARSAMODEXPO:
+		return icarsamodexpo_ioctl(perms, arg);
+	case ICARSACRT:
+		return icarsacrt_ioctl(perms, arg);
+	case ZSECSENDCPRB:
+		return zsecsendcprb_ioctl(perms, arg);
+	case ZSENDEP11CPRB:
+		return zsendep11cprb_ioctl(perms, arg);
 	case ZCRYPT_DEVICE_STATUS: {
 		struct zcrypt_device_status_ext *device_status;
 		size_t total_size = MAX_ZDEV_ENTRIES_EXT
@@ -1506,8 +1728,10 @@
 	struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
 	struct compat_ica_rsa_modexpo mex32;
 	struct ica_rsa_modexpo mex64;
+	struct zcrypt_track tr;
 	long rc;
 
+	memset(&tr, 0, sizeof(tr));
 	if (copy_from_user(&mex32, umex32, sizeof(mex32)))
 		return -EFAULT;
 	mex64.inputdata = compat_ptr(mex32.inputdata);
@@ -1517,13 +1741,19 @@
 	mex64.b_key = compat_ptr(mex32.b_key);
 	mex64.n_modulus = compat_ptr(mex32.n_modulus);
 	do {
-		rc = zcrypt_rsa_modexpo(perms, &mex64);
-	} while (rc == -EAGAIN);
+		rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
+		if (rc == -EAGAIN)
+			tr.again_counter++;
+	} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
 	/* on failure: retry once again after a requested rescan */
 	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
 		do {
-			rc = zcrypt_rsa_modexpo(perms, &mex64);
-		} while (rc == -EAGAIN);
+			rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
+			if (rc == -EAGAIN)
+				tr.again_counter++;
+		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+		rc = -EIO;
 	if (rc)
 		return rc;
 	return put_user(mex64.outputdatalength,
@@ -1548,8 +1778,10 @@
 	struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
 	struct compat_ica_rsa_modexpo_crt crt32;
 	struct ica_rsa_modexpo_crt crt64;
+	struct zcrypt_track tr;
 	long rc;
 
+	memset(&tr, 0, sizeof(tr));
 	if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
 		return -EFAULT;
 	crt64.inputdata = compat_ptr(crt32.inputdata);
@@ -1562,13 +1794,19 @@
 	crt64.nq_prime = compat_ptr(crt32.nq_prime);
 	crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
 	do {
-		rc = zcrypt_rsa_crt(perms, &crt64);
-	} while (rc == -EAGAIN);
+		rc = zcrypt_rsa_crt(perms, &tr, &crt64);
+		if (rc == -EAGAIN)
+			tr.again_counter++;
+	} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
 	/* on failure: retry once again after a requested rescan */
 	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
 		do {
-			rc = zcrypt_rsa_crt(perms, &crt64);
-		} while (rc == -EAGAIN);
+			rc = zcrypt_rsa_crt(perms, &tr, &crt64);
+			if (rc == -EAGAIN)
+				tr.again_counter++;
+		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+		rc = -EIO;
 	if (rc)
 		return rc;
 	return put_user(crt64.outputdatalength,
@@ -1600,9 +1838,11 @@
 {
 	struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
 	struct compat_ica_xcRB xcRB32;
+	struct zcrypt_track tr;
 	struct ica_xcRB xcRB64;
 	long rc;
 
+	memset(&tr, 0, sizeof(tr));
 	if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
 		return -EFAULT;
 	xcRB64.agent_ID = xcRB32.agent_ID;
@@ -1626,13 +1866,19 @@
 	xcRB64.priority_window = xcRB32.priority_window;
 	xcRB64.status = xcRB32.status;
 	do {
-		rc = _zcrypt_send_cprb(perms, &xcRB64);
-	} while (rc == -EAGAIN);
+		rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64);
+		if (rc == -EAGAIN)
+			tr.again_counter++;
+	} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
 	/* on failure: retry once again after a requested rescan */
 	if ((rc == -ENODEV) && (zcrypt_process_rescan()))
 		do {
-			rc = _zcrypt_send_cprb(perms, &xcRB64);
-		} while (rc == -EAGAIN);
+			rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64);
+			if (rc == -EAGAIN)
+				tr.again_counter++;
+		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+		rc = -EIO;
 	xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
 	xcRB32.reply_data_length = xcRB64.reply_data_length;
 	xcRB32.status = xcRB64.status;
@@ -1888,6 +2134,7 @@
 	zcrypt_msgtype6_exit();
 	zcrypt_msgtype50_exit();
 	zcrypt_ccamisc_exit();
+	zcrypt_ep11misc_exit();
 	zcrypt_debug_exit();
 }
 
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index d464618..51c0b8b 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -55,13 +55,30 @@
 
 struct zcrypt_queue;
 
+/* struct to hold tracking information for a userspace request/response */
+struct zcrypt_track {
+	int again_counter;		/* retry attempts counter */
+	int last_qid;			/* last qid used */
+	int last_rc;			/* last return code */
+#ifdef CONFIG_ZCRYPT_DEBUG
+	struct ap_fi fi;		/* failure injection cmd */
+#endif
+};
+
+/* defines related to message tracking */
+#define TRACK_AGAIN_MAX 10
+#define TRACK_AGAIN_CARD_WEIGHT_PENALTY  1000
+#define TRACK_AGAIN_QUEUE_WEIGHT_PENALTY 10000
+
 struct zcrypt_ops {
-	long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *);
+	long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *,
+			    struct ap_message *);
 	long (*rsa_modexpo_crt)(struct zcrypt_queue *,
-				struct ica_rsa_modexpo_crt *);
-	long (*send_cprb)(struct zcrypt_queue *, struct ica_xcRB *,
+				struct ica_rsa_modexpo_crt *,
+				struct ap_message *);
+	long (*send_cprb)(bool userspace, struct zcrypt_queue *, struct ica_xcRB *,
 			  struct ap_message *);
-	long (*send_ep11_cprb)(struct zcrypt_queue *, struct ep11_urb *,
+	long (*send_ep11_cprb)(bool userspace, struct zcrypt_queue *, struct ep11_urb *,
 			       struct ap_message *);
 	long (*rng)(struct zcrypt_queue *, char *, struct ap_message *);
 	struct list_head list;		/* zcrypt ops list. */
@@ -82,7 +99,7 @@
 	int min_mod_size;		/* Min number of bits. */
 	int max_mod_size;		/* Max number of bits. */
 	int max_exp_bit_length;
-	int speed_rating[NUM_OPS];	/* Speed idx of crypto ops. */
+	const int *speed_rating;	/* Speed idx of crypto ops. */
 	atomic_t load;			/* Utilization of the crypto device */
 
 	int request_count;		/* # current requests. */
@@ -140,8 +157,31 @@
 int zcrypt_api_init(void);
 void zcrypt_api_exit(void);
 long zcrypt_send_cprb(struct ica_xcRB *xcRB);
+long zcrypt_send_ep11_cprb(struct ep11_urb *urb);
 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
 int zcrypt_device_status_ext(int card, int queue,
 			     struct zcrypt_device_status_ext *devstatus);
 
+static inline unsigned long z_copy_from_user(bool userspace,
+					     void *to,
+					     const void __user *from,
+					     unsigned long n)
+{
+	if (likely(userspace))
+		return copy_from_user(to, from, n);
+	memcpy(to, (void __force *) from, n);
+	return 0;
+}
+
+static inline unsigned long z_copy_to_user(bool userspace,
+					   void __user *to,
+					   const void *from,
+					   unsigned long n)
+{
+	if (likely(userspace))
+		return copy_to_user(to, from, n);
+	memcpy((void __force *) to, from, n);
+	return 0;
+}
+
 #endif /* _ZCRYPT_API_H_ */
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index d4f35a1..09fe6bb 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -41,7 +41,7 @@
 {
 	struct zcrypt_card *zc = to_ap_card(dev)->private;
 
-	return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
 }
 
 static DEVICE_ATTR_RO(type);
@@ -50,22 +50,28 @@
 			   struct device_attribute *attr,
 			   char *buf)
 {
-	struct zcrypt_card *zc = to_ap_card(dev)->private;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
+	int online = ac->config && zc->online ? 1 : 0;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", zc->online);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", online);
 }
 
 static ssize_t online_store(struct device *dev,
 			    struct device_attribute *attr,
 			    const char *buf, size_t count)
 {
-	struct zcrypt_card *zc = to_ap_card(dev)->private;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
 	struct zcrypt_queue *zq;
 	int online, id;
 
 	if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
 		return -EINVAL;
 
+	if (online && !ac->config)
+		return -ENODEV;
+
 	zc->online = online;
 	id = zc->card->id;
 
@@ -86,7 +92,7 @@
 {
 	struct zcrypt_card *zc = to_ap_card(dev)->private;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
+	return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
 }
 
 static DEVICE_ATTR_RO(load);
@@ -151,11 +157,6 @@
 {
 	int rc;
 
-	rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
-				&zcrypt_card_attr_group);
-	if (rc)
-		return rc;
-
 	spin_lock(&zcrypt_list_lock);
 	list_add_tail(&zc->list, &zcrypt_card_list);
 	spin_unlock(&zcrypt_list_lock);
@@ -164,6 +165,14 @@
 
 	ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id);
 
+	rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
+				&zcrypt_card_attr_group);
+	if (rc) {
+		spin_lock(&zcrypt_list_lock);
+		list_del_init(&zc->list);
+		spin_unlock(&zcrypt_list_lock);
+	}
+
 	return rc;
 }
 EXPORT_SYMBOL(zcrypt_card_register);
@@ -183,5 +192,6 @@
 	spin_unlock(&zcrypt_list_lock);
 	sysfs_remove_group(&zc->card->ap_dev.device.kobj,
 			   &zcrypt_card_attr_group);
+	zcrypt_card_put(zc);
 }
 EXPORT_SYMBOL(zcrypt_card_unregister);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 03999b0..ffab935 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -173,6 +173,49 @@
 EXPORT_SYMBOL(cca_check_secaescipherkey);
 
 /*
+ * Simple check if the token is a valid CCA secure ECC private
+ * key token. Returns 0 on success or errno value on failure.
+ */
+int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
+			     const u8 *token, size_t keysize,
+			     int checkcpacfexport)
+{
+	struct eccprivkeytoken *t = (struct eccprivkeytoken *) token;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+	if (t->type != TOKTYPE_CCA_INTERNAL_PKA) {
+		if (dbg)
+			DBF("%s token check failed, type 0x%02x != 0x%02x\n",
+			    __func__, (int) t->type, TOKTYPE_CCA_INTERNAL_PKA);
+		return -EINVAL;
+	}
+	if (t->len > keysize) {
+		if (dbg)
+			DBF("%s token check failed, len %d > keysize %zu\n",
+			    __func__, (int) t->len, keysize);
+		return -EINVAL;
+	}
+	if (t->secid != 0x20) {
+		if (dbg)
+			DBF("%s token check failed, secid 0x%02x != 0x20\n",
+			    __func__, (int) t->secid);
+		return -EINVAL;
+	}
+	if (checkcpacfexport && !(t->kutc & 0x01)) {
+		if (dbg)
+			DBF("%s token check failed, XPRTCPAC bit is 0\n",
+			    __func__);
+		return -EINVAL;
+	}
+
+#undef DBF
+
+	return 0;
+}
+EXPORT_SYMBOL(cca_check_sececckeytoken);
+
+/*
  * Allocate consecutive memory for request CPRB, request param
  * block, reply CPRB and reply param block and fill in values
  * for the common fields. Returns 0 on success or errno value
@@ -205,9 +248,9 @@
 	preqcblk->rpl_msgbl = cprbplusparamblen;
 	if (paramblen) {
 		preqcblk->req_parmb =
-			((u8 *) preqcblk) + sizeof(struct CPRBX);
+			((u8 __user *) preqcblk) + sizeof(struct CPRBX);
 		preqcblk->rpl_parmb =
-			((u8 *) prepcblk) + sizeof(struct CPRBX);
+			((u8 __user *) prepcblk) + sizeof(struct CPRBX);
 	}
 
 	*pcprbmem = cprbmem;
@@ -249,24 +292,6 @@
 }
 
 /*
- * Helper function which calls zcrypt_send_cprb with
- * memory management segment adjusted to kernel space
- * so that the copy_from_user called within this
- * function do in fact copy from kernel space.
- */
-static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb)
-{
-	int rc;
-	mm_segment_t old_fs = get_fs();
-
-	set_fs(KERNEL_DS);
-	rc = zcrypt_send_cprb(xcrb);
-	set_fs(old_fs);
-
-	return rc;
-}
-
-/*
  * Generate (random) CCA AES DATA secure key.
  */
 int cca_genseckey(u16 cardnr, u16 domain,
@@ -274,7 +299,7 @@
 {
 	int i, rc, keysize;
 	int seckeysize;
-	u8 *mem;
+	u8 *mem, *ptr;
 	struct CPRBX *preqcblk, *prepcblk;
 	struct ica_xcRB xcrb;
 	struct kgreqparm {
@@ -320,7 +345,7 @@
 	preqcblk->domain = domain;
 
 	/* fill request cprb param block with KG request */
-	preqparm = (struct kgreqparm *) preqcblk->req_parmb;
+	preqparm = (struct kgreqparm __force *) preqcblk->req_parmb;
 	memcpy(preqparm->subfunc_code, "KG", 2);
 	preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
 	preqparm->lv1.len = sizeof(struct lv1);
@@ -359,7 +384,7 @@
 	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
 
 	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
-	rc = _zcrypt_send_cprb(&xcrb);
+	rc = zcrypt_send_cprb(&xcrb);
 	if (rc) {
 		DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n",
 			  __func__, (int) cardnr, (int) domain, rc);
@@ -377,8 +402,9 @@
 	}
 
 	/* process response cprb param block */
-	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
-	prepparm = (struct kgrepparm *) prepcblk->rpl_parmb;
+	ptr =  ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepcblk->rpl_parmb = (u8 __user *) ptr;
+	prepparm = (struct kgrepparm *) ptr;
 
 	/* check length of the returned secure key token */
 	seckeysize = prepparm->lv3.keyblock.toklen
@@ -415,7 +441,7 @@
 		   const u8 *clrkey, u8 seckey[SECKEYBLOBSIZE])
 {
 	int rc, keysize, seckeysize;
-	u8 *mem;
+	u8 *mem, *ptr;
 	struct CPRBX *preqcblk, *prepcblk;
 	struct ica_xcRB xcrb;
 	struct cmreqparm {
@@ -460,7 +486,7 @@
 	preqcblk->domain = domain;
 
 	/* fill request cprb param block with CM request */
-	preqparm = (struct cmreqparm *) preqcblk->req_parmb;
+	preqparm = (struct cmreqparm __force *) preqcblk->req_parmb;
 	memcpy(preqparm->subfunc_code, "CM", 2);
 	memcpy(preqparm->rule_array, "AES     ", 8);
 	preqparm->rule_array_len =
@@ -496,7 +522,7 @@
 	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
 
 	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
-	rc = _zcrypt_send_cprb(&xcrb);
+	rc = zcrypt_send_cprb(&xcrb);
 	if (rc) {
 		DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
 			  __func__, (int) cardnr, (int) domain, rc);
@@ -514,8 +540,9 @@
 	}
 
 	/* process response cprb param block */
-	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
-	prepparm = (struct cmrepparm *) prepcblk->rpl_parmb;
+	ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepcblk->rpl_parmb = (u8 __user *) ptr;
+	prepparm = (struct cmrepparm *) ptr;
 
 	/* check length of the returned secure key token */
 	seckeysize = prepparm->lv3.keyblock.toklen
@@ -554,7 +581,7 @@
 		    u8 *protkey, u32 *protkeylen, u32 *protkeytype)
 {
 	int rc;
-	u8 *mem;
+	u8 *mem, *ptr;
 	struct CPRBX *preqcblk, *prepcblk;
 	struct ica_xcRB xcrb;
 	struct uskreqparm {
@@ -592,7 +619,7 @@
 				u8  pad2[1];
 				u8  vptype;
 				u8  vp[32];  /* verification pattern */
-			} keyblock;
+			} ckb;
 		} lv3;
 	} __packed * prepparm;
 
@@ -605,7 +632,7 @@
 	preqcblk->domain = domain;
 
 	/* fill request cprb param block with USK request */
-	preqparm = (struct uskreqparm *) preqcblk->req_parmb;
+	preqparm = (struct uskreqparm __force *) preqcblk->req_parmb;
 	memcpy(preqparm->subfunc_code, "US", 2);
 	preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
 	preqparm->lv1.len = sizeof(struct lv1);
@@ -622,7 +649,7 @@
 	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
 
 	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
-	rc = _zcrypt_send_cprb(&xcrb);
+	rc = zcrypt_send_cprb(&xcrb);
 	if (rc) {
 		DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
 			  __func__, (int) cardnr, (int) domain, rc);
@@ -646,19 +673,21 @@
 	}
 
 	/* process response cprb param block */
-	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
-	prepparm = (struct uskrepparm *) prepcblk->rpl_parmb;
+	ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepcblk->rpl_parmb = (u8 __user *) ptr;
+	prepparm = (struct uskrepparm *) ptr;
 
 	/* check the returned keyblock */
-	if (prepparm->lv3.keyblock.version != 0x01) {
-		DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x01\n",
-			  __func__, (int) prepparm->lv3.keyblock.version);
+	if (prepparm->lv3.ckb.version != 0x01 &&
+	    prepparm->lv3.ckb.version != 0x02) {
+		DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+			  __func__, (int) prepparm->lv3.ckb.version);
 		rc = -EIO;
 		goto out;
 	}
 
 	/* copy the tanslated protected key */
-	switch (prepparm->lv3.keyblock.len) {
+	switch (prepparm->lv3.ckb.len) {
 	case 16+32:
 		/* AES 128 protected key */
 		if (protkeytype)
@@ -676,13 +705,13 @@
 		break;
 	default:
 		DEBUG_ERR("%s unknown/unsupported keylen %d\n",
-			  __func__, prepparm->lv3.keyblock.len);
+			  __func__, prepparm->lv3.ckb.len);
 		rc = -EIO;
 		goto out;
 	}
-	memcpy(protkey, prepparm->lv3.keyblock.key, prepparm->lv3.keyblock.len);
+	memcpy(protkey, prepparm->lv3.ckb.key, prepparm->lv3.ckb.len);
 	if (protkeylen)
-		*protkeylen = prepparm->lv3.keyblock.len;
+		*protkeylen = prepparm->lv3.ckb.len;
 
 out:
 	free_cprbmem(mem, PARMBSIZE, 0);
@@ -713,7 +742,7 @@
 		     u8 *keybuf, size_t *keybufsize)
 {
 	int rc;
-	u8 *mem;
+	u8 *mem, *ptr;
 	struct CPRBX *preqcblk, *prepcblk;
 	struct ica_xcRB xcrb;
 	struct gkreqparm {
@@ -795,7 +824,7 @@
 	preqcblk->req_parml = sizeof(struct gkreqparm);
 
 	/* prepare request param block with GK request */
-	preqparm = (struct gkreqparm *) preqcblk->req_parmb;
+	preqparm = (struct gkreqparm __force *) preqcblk->req_parmb;
 	memcpy(preqparm->subfunc_code, "GK", 2);
 	preqparm->rule_array_len =  sizeof(uint16_t) + 2 * 8;
 	memcpy(preqparm->rule_array, "AES     OP      ", 2*8);
@@ -846,7 +875,7 @@
 	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
 
 	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
-	rc = _zcrypt_send_cprb(&xcrb);
+	rc = zcrypt_send_cprb(&xcrb);
 	if (rc) {
 		DEBUG_ERR(
 			"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
@@ -866,8 +895,9 @@
 	}
 
 	/* process response cprb param block */
-	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
-	prepparm = (struct gkrepparm *) prepcblk->rpl_parmb;
+	ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepcblk->rpl_parmb = (u8 __user *) ptr;
+	prepparm = (struct gkrepparm *) ptr;
 
 	/* do some plausibility checks on the key block */
 	if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
@@ -916,7 +946,7 @@
 			   int *key_token_size)
 {
 	int rc, n;
-	u8 *mem;
+	u8 *mem, *ptr;
 	struct CPRBX *preqcblk, *prepcblk;
 	struct ica_xcRB xcrb;
 	struct rule_array_block {
@@ -973,7 +1003,7 @@
 	preqcblk->req_parml = 0;
 
 	/* prepare request param block with IP request */
-	preq_ra_block = (struct rule_array_block *) preqcblk->req_parmb;
+	preq_ra_block = (struct rule_array_block __force *) preqcblk->req_parmb;
 	memcpy(preq_ra_block->subfunc_code, "IP", 2);
 	preq_ra_block->rule_array_len =  sizeof(uint16_t) + 2 * 8;
 	memcpy(preq_ra_block->rule_array, rule_array_1, 8);
@@ -986,7 +1016,7 @@
 	}
 
 	/* prepare vud block */
-	preq_vud_block = (struct vud_block *)
+	preq_vud_block = (struct vud_block __force *)
 		(preqcblk->req_parmb + preqcblk->req_parml);
 	n = complete ? 0 : (clr_key_bit_size + 7) / 8;
 	preq_vud_block->len = sizeof(struct vud_block) + n;
@@ -1000,7 +1030,7 @@
 	preqcblk->req_parml += preq_vud_block->len;
 
 	/* prepare key block */
-	preq_key_block = (struct key_block *)
+	preq_key_block = (struct key_block __force *)
 		(preqcblk->req_parmb + preqcblk->req_parml);
 	n = *key_token_size;
 	preq_key_block->len = sizeof(struct key_block) + n;
@@ -1013,7 +1043,7 @@
 	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
 
 	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
-	rc = _zcrypt_send_cprb(&xcrb);
+	rc = zcrypt_send_cprb(&xcrb);
 	if (rc) {
 		DEBUG_ERR(
 			"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
@@ -1033,8 +1063,9 @@
 	}
 
 	/* process response cprb param block */
-	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
-	prepparm = (struct iprepparm *) prepcblk->rpl_parmb;
+	ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepcblk->rpl_parmb = (u8 __user *) ptr;
+	prepparm = (struct iprepparm *) ptr;
 
 	/* do some plausibility checks on the key block */
 	if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
@@ -1150,7 +1181,7 @@
 		       u8 *protkey, u32 *protkeylen, u32 *protkeytype)
 {
 	int rc;
-	u8 *mem;
+	u8 *mem, *ptr;
 	struct CPRBX *preqcblk, *prepcblk;
 	struct ica_xcRB xcrb;
 	struct aureqparm {
@@ -1207,7 +1238,7 @@
 	preqcblk->domain = domain;
 
 	/* fill request cprb param block with AU request */
-	preqparm = (struct aureqparm *) preqcblk->req_parmb;
+	preqparm = (struct aureqparm __force *) preqcblk->req_parmb;
 	memcpy(preqparm->subfunc_code, "AU", 2);
 	preqparm->rule_array_len =
 		sizeof(preqparm->rule_array_len)
@@ -1229,7 +1260,7 @@
 	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
 
 	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
-	rc = _zcrypt_send_cprb(&xcrb);
+	rc = zcrypt_send_cprb(&xcrb);
 	if (rc) {
 		DEBUG_ERR(
 			"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
@@ -1256,14 +1287,15 @@
 	}
 
 	/* process response cprb param block */
-	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
-	prepparm = (struct aurepparm *) prepcblk->rpl_parmb;
+	ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepcblk->rpl_parmb = (u8 __user *) ptr;
+	prepparm = (struct aurepparm *) ptr;
 
 	/* check the returned keyblock */
-	if (prepparm->vud.ckb.version != 0x01) {
-		DEBUG_ERR(
-			"%s reply param keyblock version mismatch 0x%02x != 0x01\n",
-			__func__, (int) prepparm->vud.ckb.version);
+	if (prepparm->vud.ckb.version != 0x01 &&
+	    prepparm->vud.ckb.version != 0x02) {
+		DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+			  __func__, (int) prepparm->vud.ckb.version);
 		rc = -EIO;
 		goto out;
 	}
@@ -1309,6 +1341,156 @@
 EXPORT_SYMBOL(cca_cipher2protkey);
 
 /*
+ * Derive protected key from CCA ECC secure private key.
+ */
+int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
+		    u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+	int rc;
+	u8 *mem, *ptr;
+	struct CPRBX *preqcblk, *prepcblk;
+	struct ica_xcRB xcrb;
+	struct aureqparm {
+		u8  subfunc_code[2];
+		u16 rule_array_len;
+		u8  rule_array[8];
+		struct {
+			u16 len;
+			u16 tk_blob_len;
+			u16 tk_blob_tag;
+			u8  tk_blob[66];
+		} vud;
+		struct {
+			u16 len;
+			u16 cca_key_token_len;
+			u16 cca_key_token_flags;
+			u8  cca_key_token[0];
+		} kb;
+	} __packed * preqparm;
+	struct aurepparm {
+		u8  subfunc_code[2];
+		u16 rule_array_len;
+		struct {
+			u16 len;
+			u16 sublen;
+			u16 tag;
+			struct cpacfkeyblock {
+				u8  version;  /* version of this struct */
+				u8  flags[2];
+				u8  algo;
+				u8  form;
+				u8  pad1[3];
+				u16 keylen;
+				u8  key[0];  /* the key (keylen bytes) */
+				u16 keyattrlen;
+				u8  keyattr[32];
+				u8  pad2[1];
+				u8  vptype;
+				u8  vp[32];  /* verification pattern */
+			} ckb;
+		} vud;
+		struct {
+			u16 len;
+		} kb;
+	} __packed * prepparm;
+	int keylen = ((struct eccprivkeytoken *)key)->len;
+
+	/* get already prepared memory for 2 cprbs with param block each */
+	rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+	if (rc)
+		return rc;
+
+	/* fill request cprb struct */
+	preqcblk->domain = domain;
+
+	/* fill request cprb param block with AU request */
+	preqparm = (struct aureqparm __force *) preqcblk->req_parmb;
+	memcpy(preqparm->subfunc_code, "AU", 2);
+	preqparm->rule_array_len =
+		sizeof(preqparm->rule_array_len)
+		+ sizeof(preqparm->rule_array);
+	memcpy(preqparm->rule_array, "EXPT-SK ", 8);
+	/* vud, tk blob */
+	preqparm->vud.len = sizeof(preqparm->vud);
+	preqparm->vud.tk_blob_len = sizeof(preqparm->vud.tk_blob)
+		+ 2 * sizeof(uint16_t);
+	preqparm->vud.tk_blob_tag = 0x00C2;
+	/* kb, cca token */
+	preqparm->kb.len = keylen + 3 * sizeof(uint16_t);
+	preqparm->kb.cca_key_token_len = keylen + 2 * sizeof(uint16_t);
+	memcpy(preqparm->kb.cca_key_token, key, keylen);
+	/* now fill length of param block into cprb */
+	preqcblk->req_parml = sizeof(struct aureqparm) + keylen;
+
+	/* fill xcrb struct */
+	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+	rc = zcrypt_send_cprb(&xcrb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+			__func__, (int) cardnr, (int) domain, rc);
+		goto out;
+	}
+
+	/* check response returncode and reasoncode */
+	if (prepcblk->ccp_rtcode != 0) {
+		DEBUG_ERR(
+			"%s unwrap secure key failure, card response %d/%d\n",
+			__func__,
+			(int) prepcblk->ccp_rtcode,
+			(int) prepcblk->ccp_rscode);
+		rc = -EIO;
+		goto out;
+	}
+	if (prepcblk->ccp_rscode != 0) {
+		DEBUG_WARN(
+			"%s unwrap secure key warning, card response %d/%d\n",
+			__func__,
+			(int) prepcblk->ccp_rtcode,
+			(int) prepcblk->ccp_rscode);
+	}
+
+	/* process response cprb param block */
+	ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepcblk->rpl_parmb = (u8 __user *) ptr;
+	prepparm = (struct aurepparm *) ptr;
+
+	/* check the returned keyblock */
+	if (prepparm->vud.ckb.version != 0x02) {
+		DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n",
+			  __func__, (int) prepparm->vud.ckb.version);
+		rc = -EIO;
+		goto out;
+	}
+	if (prepparm->vud.ckb.algo != 0x81) {
+		DEBUG_ERR(
+			"%s reply param keyblock algo mismatch 0x%02x != 0x81\n",
+			__func__, (int) prepparm->vud.ckb.algo);
+		rc = -EIO;
+		goto out;
+	}
+
+	/* copy the translated protected key */
+	if (prepparm->vud.ckb.keylen > *protkeylen) {
+		DEBUG_ERR("%s prot keylen mismatch %d > buffersize %u\n",
+			  __func__, prepparm->vud.ckb.keylen, *protkeylen);
+		rc = -EIO;
+		goto out;
+	}
+	memcpy(protkey, prepparm->vud.ckb.key, prepparm->vud.ckb.keylen);
+	*protkeylen = prepparm->vud.ckb.keylen;
+	if (protkeytype)
+		*protkeytype = PKEY_KEYTYPE_ECC;
+
+out:
+	free_cprbmem(mem, PARMBSIZE, 0);
+	return rc;
+}
+EXPORT_SYMBOL(cca_ecc2protkey);
+
+/*
  * query cryptographic facility from CCA adapter
  */
 int cca_query_crypto_facility(u16 cardnr, u16 domain,
@@ -1346,7 +1528,7 @@
 	preqcblk->domain = domain;
 
 	/* fill request cprb param block with FQ request */
-	preqparm = (struct fqreqparm *) preqcblk->req_parmb;
+	preqparm = (struct fqreqparm __force *) preqcblk->req_parmb;
 	memcpy(preqparm->subfunc_code, "FQ", 2);
 	memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
 	preqparm->rule_array_len =
@@ -1359,7 +1541,7 @@
 	prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
 
 	/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
-	rc = _zcrypt_send_cprb(&xcrb);
+	rc = zcrypt_send_cprb(&xcrb);
 	if (rc) {
 		DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
 			  __func__, (int) cardnr, (int) domain, rc);
@@ -1377,8 +1559,9 @@
 	}
 
 	/* process response cprb param block */
-	prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
-	prepparm = (struct fqrepparm *) prepcblk->rpl_parmb;
+	ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+	prepcblk->rpl_parmb = (u8 __user *) ptr;
+	prepparm = (struct fqrepparm *) ptr;
 	ptr = prepparm->lvdata;
 
 	/* check and possibly copy reply rule array */
@@ -1516,21 +1699,38 @@
 				       rarray, &rlen, varray, &vlen);
 	if (rc == 0 && rlen >= 10*8 && vlen >= 204) {
 		memcpy(ci->serial, rarray, 8);
-		ci->new_mk_state = (char) rarray[7*8];
-		ci->cur_mk_state = (char) rarray[8*8];
-		ci->old_mk_state = (char) rarray[9*8];
-		if (ci->old_mk_state == '2')
-			memcpy(&ci->old_mkvp, varray + 172, 8);
-		if (ci->cur_mk_state == '2')
-			memcpy(&ci->cur_mkvp, varray + 184, 8);
-		if (ci->new_mk_state == '3')
-			memcpy(&ci->new_mkvp, varray + 196, 8);
-		found = 1;
+		ci->new_aes_mk_state = (char) rarray[7*8];
+		ci->cur_aes_mk_state = (char) rarray[8*8];
+		ci->old_aes_mk_state = (char) rarray[9*8];
+		if (ci->old_aes_mk_state == '2')
+			memcpy(&ci->old_aes_mkvp, varray + 172, 8);
+		if (ci->cur_aes_mk_state == '2')
+			memcpy(&ci->cur_aes_mkvp, varray + 184, 8);
+		if (ci->new_aes_mk_state == '3')
+			memcpy(&ci->new_aes_mkvp, varray + 196, 8);
+		found++;
+	}
+	if (!found)
+		goto out;
+	rlen = vlen = PAGE_SIZE/2;
+	rc = cca_query_crypto_facility(cardnr, domain, "STATICSB",
+				       rarray, &rlen, varray, &vlen);
+	if (rc == 0 && rlen >= 13*8 && vlen >= 240) {
+		ci->new_apka_mk_state = (char) rarray[10*8];
+		ci->cur_apka_mk_state = (char) rarray[11*8];
+		ci->old_apka_mk_state = (char) rarray[12*8];
+		if (ci->old_apka_mk_state == '2')
+			memcpy(&ci->old_apka_mkvp, varray + 208, 8);
+		if (ci->cur_apka_mk_state == '2')
+			memcpy(&ci->cur_apka_mkvp, varray + 220, 8);
+		if (ci->new_apka_mk_state == '3')
+			memcpy(&ci->new_apka_mkvp, varray + 232, 8);
+		found++;
 	}
 
+out:
 	free_page((unsigned long) pg);
-
-	return found ? 0 : -ENOENT;
+	return found == 2 ? 0 : -ENOENT;
 }
 
 /*
@@ -1568,9 +1768,9 @@
 		return -EINVAL;
 
 	/* fetch status of all crypto cards */
-	device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
-				      sizeof(struct zcrypt_device_status_ext),
-				      GFP_KERNEL);
+	device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+				       sizeof(struct zcrypt_device_status_ext),
+				       GFP_KERNEL);
 	if (!device_status)
 		return -ENOMEM;
 	zcrypt_device_status_mask_ext(device_status);
@@ -1584,16 +1784,16 @@
 			/* enabled CCA card, check current mkvp from cache */
 			if (cca_info_cache_fetch(card, dom, &ci) == 0 &&
 			    ci.hwtype >= minhwtype &&
-			    ci.cur_mk_state == '2' &&
-			    ci.cur_mkvp == mkvp) {
+			    ci.cur_aes_mk_state == '2' &&
+			    ci.cur_aes_mkvp == mkvp) {
 				if (!verify)
 					break;
 				/* verify: refresh card info */
 				if (fetch_cca_info(card, dom, &ci) == 0) {
 					cca_info_cache_update(card, dom, &ci);
 					if (ci.hwtype >= minhwtype &&
-					    ci.cur_mk_state == '2' &&
-					    ci.cur_mkvp == mkvp)
+					    ci.cur_aes_mk_state == '2' &&
+					    ci.cur_aes_mkvp == mkvp)
 						break;
 				}
 			}
@@ -1615,12 +1815,12 @@
 			if (fetch_cca_info(card, dom, &ci) == 0) {
 				cca_info_cache_update(card, dom, &ci);
 				if (ci.hwtype >= minhwtype &&
-				    ci.cur_mk_state == '2' &&
-				    ci.cur_mkvp == mkvp)
+				    ci.cur_aes_mk_state == '2' &&
+				    ci.cur_aes_mkvp == mkvp)
 					break;
 				if (ci.hwtype >= minhwtype &&
-				    ci.old_mk_state == '2' &&
-				    ci.old_mkvp == mkvp &&
+				    ci.old_aes_mk_state == '2' &&
+				    ci.old_aes_mkvp == mkvp &&
 				    oi < 0)
 					oi = i;
 			}
@@ -1640,7 +1840,7 @@
 	} else
 		rc = -ENODEV;
 
-	kfree(device_status);
+	kvfree(device_status);
 	return rc;
 }
 
@@ -1674,15 +1874,14 @@
 EXPORT_SYMBOL(cca_findcard);
 
 int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
-		  int minhwtype, u64 cur_mkvp, u64 old_mkvp, int verify)
+		  int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp,
+		  int verify)
 {
 	struct zcrypt_device_status_ext *device_status;
-	int i, n, card, dom, curmatch, oldmatch, rc = 0;
+	u32 *_apqns = NULL, _nr_apqns = 0;
+	int i, card, dom, curmatch, oldmatch, rc = 0;
 	struct cca_info ci;
 
-	*apqns = NULL;
-	*nr_apqns = 0;
-
 	/* fetch status of all crypto cards */
 	device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
 				       sizeof(struct zcrypt_device_status_ext),
@@ -1691,67 +1890,73 @@
 		return -ENOMEM;
 	zcrypt_device_status_mask_ext(device_status);
 
-	/* loop two times: first gather eligible apqns, then store them */
-	while (1) {
-		n = 0;
-		/* walk through all the crypto cards */
-		for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
-			card = AP_QID_CARD(device_status[i].qid);
-			dom = AP_QID_QUEUE(device_status[i].qid);
-			/* check online state */
-			if (!device_status[i].online)
-				continue;
-			/* check for cca functions */
-			if (!(device_status[i].functions & 0x04))
-				continue;
-			/* check cardnr */
-			if (cardnr != 0xFFFF && card != cardnr)
-				continue;
-			/* check domain */
-			if (domain != 0xFFFF && dom != domain)
-				continue;
-			/* get cca info on this apqn */
-			if (cca_get_info(card, dom, &ci, verify))
-				continue;
-			/* current master key needs to be valid */
-			if (ci.cur_mk_state != '2')
-				continue;
-			/* check min hardware type */
-			if (minhwtype > 0 && minhwtype > ci.hwtype)
-				continue;
-			if (cur_mkvp || old_mkvp) {
-				/* check mkvps */
-				curmatch = oldmatch = 0;
-				if (cur_mkvp && cur_mkvp == ci.cur_mkvp)
+	/* allocate 1k space for up to 256 apqns */
+	_apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+	if (!_apqns) {
+		kvfree(device_status);
+		return -ENOMEM;
+	}
+
+	/* walk through all the crypto apqnss */
+	for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+		card = AP_QID_CARD(device_status[i].qid);
+		dom = AP_QID_QUEUE(device_status[i].qid);
+		/* check online state */
+		if (!device_status[i].online)
+			continue;
+		/* check for cca functions */
+		if (!(device_status[i].functions & 0x04))
+			continue;
+		/* check cardnr */
+		if (cardnr != 0xFFFF && card != cardnr)
+			continue;
+		/* check domain */
+		if (domain != 0xFFFF && dom != domain)
+			continue;
+		/* get cca info on this apqn */
+		if (cca_get_info(card, dom, &ci, verify))
+			continue;
+		/* current master key needs to be valid */
+		if (mktype == AES_MK_SET && ci.cur_aes_mk_state != '2')
+			continue;
+		if (mktype == APKA_MK_SET && ci.cur_apka_mk_state != '2')
+			continue;
+		/* check min hardware type */
+		if (minhwtype > 0 && minhwtype > ci.hwtype)
+			continue;
+		if (cur_mkvp || old_mkvp) {
+			/* check mkvps */
+			curmatch = oldmatch = 0;
+			if (mktype == AES_MK_SET) {
+				if (cur_mkvp && cur_mkvp == ci.cur_aes_mkvp)
 					curmatch = 1;
-				if (old_mkvp && ci.old_mk_state == '2' &&
-				    old_mkvp == ci.old_mkvp)
+				if (old_mkvp && ci.old_aes_mk_state == '2' &&
+				    old_mkvp == ci.old_aes_mkvp)
 					oldmatch = 1;
-				if ((cur_mkvp || old_mkvp) &&
-				    (curmatch + oldmatch < 1))
-					continue;
+			} else {
+				if (cur_mkvp && cur_mkvp == ci.cur_apka_mkvp)
+					curmatch = 1;
+				if (old_mkvp && ci.old_apka_mk_state == '2' &&
+				    old_mkvp == ci.old_apka_mkvp)
+					oldmatch = 1;
 			}
-			/* apqn passed all filtering criterons */
-			if (*apqns && n < *nr_apqns)
-				(*apqns)[n] = (((u16)card) << 16) | ((u16) dom);
-			n++;
+			if (curmatch + oldmatch < 1)
+				continue;
 		}
-		/* loop 2nd time: array has been filled */
-		if (*apqns)
-			break;
-		/* loop 1st time: have # of eligible apqns in n */
-		if (!n) {
-			rc = -ENODEV; /* no eligible apqns found */
-			break;
-		}
-		*nr_apqns = n;
-		/* allocate array to store n apqns into */
-		*apqns = kmalloc_array(n, sizeof(u32), GFP_KERNEL);
-		if (!*apqns) {
-			rc = -ENOMEM;
-			break;
-		}
-		verify = 0;
+		/* apqn passed all filtering criterons, add to the array */
+		if (_nr_apqns < 256)
+			_apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom);
+	}
+
+	/* nothing found ? */
+	if (!_nr_apqns) {
+		kfree(_apqns);
+		rc = -ENODEV;
+	} else {
+		/* no re-allocation, simple return the _apqns array */
+		*apqns = _apqns;
+		*nr_apqns = _nr_apqns;
+		rc = 0;
 	}
 
 	kvfree(device_status);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
index 77b6cc7..e710544 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.h
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -14,11 +14,13 @@
 #include <asm/pkey.h>
 
 /* Key token types */
-#define TOKTYPE_NON_CCA		0x00 /* Non-CCA key token */
-#define TOKTYPE_CCA_INTERNAL	0x01 /* CCA internal key token */
+#define TOKTYPE_NON_CCA		 0x00 /* Non-CCA key token */
+#define TOKTYPE_CCA_INTERNAL	 0x01 /* CCA internal sym key token */
+#define TOKTYPE_CCA_INTERNAL_PKA 0x1f /* CCA internal asym key token */
 
 /* For TOKTYPE_NON_CCA: */
 #define TOKVER_PROTECTED_KEY	0x01 /* Protected key token */
+#define TOKVER_CLEAR_KEY	0x02 /* Clear key token */
 
 /* For TOKTYPE_CCA_INTERNAL: */
 #define TOKVER_CCA_AES		0x04 /* CCA AES key token */
@@ -89,7 +91,32 @@
 	u16 kmf1;     /* key management field 1 */
 	u16 kmf2;     /* key management field 2 */
 	u16 kmf3;     /* key management field 3 */
-	u8  vdata[0]; /* variable part data follows */
+	u8  vdata[]; /* variable part data follows */
+} __packed;
+
+/* inside view of an CCA secure ECC private key */
+struct eccprivkeytoken {
+	u8  type;     /* 0x1f for internal asym key token */
+	u8  version;  /* should be 0x00 */
+	u16 len;      /* total key token length in bytes */
+	u8  res1[4];
+	u8  secid;    /* 0x20 for ECC priv key section marker */
+	u8  secver;   /* section version */
+	u16 seclen;   /* section length */
+	u8  wtype;    /* wrapping method, 0x00 clear, 0x01 AES */
+	u8  htype;    /* hash method, 0x02 for SHA-256 */
+	u8  res2[2];
+	u8  kutc;     /* key usage and translation control */
+	u8  ctype;    /* curve type */
+	u8  kfs;      /* key format and security */
+	u8  ksrc;     /* key source */
+	u16 pbitlen;  /* length of prime p in bits */
+	u16 ibmadlen; /* IBM associated data length in bytes */
+	u64 mkvp;     /* master key verification pattern */
+	u8  opk[48];  /* encrypted object protection key data */
+	u16 adatalen; /* associated data length in bytes */
+	u16 fseclen;  /* formated section length in bytes */
+	u8  more_data[]; /* more data follows */
 } __packed;
 
 /* Some defines for the CCA AES cipherkeytoken kmf1 field */
@@ -122,6 +149,14 @@
 			      int checkcpacfexport);
 
 /*
+ * Simple check if the token is a valid CCA secure ECC private
+ * key token. Returns 0 on success or errno value on failure.
+ */
+int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
+			     const u8 *token, size_t keysize,
+			     int checkcpacfexport);
+
+/*
  * Generate (random) CCA AES DATA secure key.
  */
 int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey);
@@ -158,6 +193,12 @@
 		      const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
 
 /*
+ * Derive proteced key from CCA ECC secure private key.
+ */
+int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
+		    u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+/*
  * Query cryptographic facility from CCA adapter
  */
 int cca_query_crypto_facility(u16 cardnr, u16 domain,
@@ -185,6 +226,8 @@
  * - if verify is enabled and a cur_mkvp and/or old_mkvp
  *   value is given, then refetch the cca_info and make sure the current
  *   cur_mkvp or old_mkvp values of the apqn are used.
+ * The mktype determines which set of master keys to use:
+ *   0 = AES_MK_SET - AES MK set, 1 = APKA MK_SET - APKA MK set
  * The array of apqn entries is allocated with kmalloc and returned in *apqns;
  * the number of apqns stored into the list is returned in *nr_apqns. One apqn
  * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
@@ -193,18 +236,28 @@
  * -ENODEV is returned.
  */
 int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
-		  int minhwtype, u64 cur_mkvp, u64 old_mkvp, int verify);
+		  int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp,
+		  int verify);
+
+#define AES_MK_SET  0
+#define APKA_MK_SET 1
 
 /* struct to hold info for each CCA queue */
 struct cca_info {
-	int  hwtype;	    /* one of the defined AP_DEVICE_TYPE_* */
-	char new_mk_state;  /* '1' empty, '2' partially full, '3' full */
-	char cur_mk_state;  /* '1' invalid, '2' valid */
-	char old_mk_state;  /* '1' invalid, '2' valid */
-	u64  new_mkvp;	    /* truncated sha256 hash of new master key */
-	u64  cur_mkvp;	    /* truncated sha256 hash of current master key */
-	u64  old_mkvp;	    /* truncated sha256 hash of old master key */
-	char serial[9];     /* serial number string (8 ascii numbers + 0x00) */
+	int  hwtype;		/* one of the defined AP_DEVICE_TYPE_* */
+	char new_aes_mk_state;	/* '1' empty, '2' partially full, '3' full */
+	char cur_aes_mk_state;	/* '1' invalid, '2' valid */
+	char old_aes_mk_state;	/* '1' invalid, '2' valid */
+	char new_apka_mk_state; /* '1' empty, '2' partially full, '3' full */
+	char cur_apka_mk_state; /* '1' invalid, '2' valid */
+	char old_apka_mk_state; /* '1' invalid, '2' valid */
+	u64  new_aes_mkvp;	/* truncated sha256 of new aes master key */
+	u64  cur_aes_mkvp;	/* truncated sha256 of current aes master key */
+	u64  old_aes_mkvp;	/* truncated sha256 of old aes master key */
+	u64  new_apka_mkvp;	/* truncated sha256 of new apka master key */
+	u64  cur_apka_mkvp;	/* truncated sha256 of current apka mk */
+	u64  old_apka_mkvp;	/* truncated sha256 of old apka mk */
+	char serial[9];		/* serial number (8 ascii numbers + 0x00) */
 };
 
 /*
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 7cbb384..226a561 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -94,8 +94,7 @@
 	if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
 		zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
 		zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
-		memcpy(zc->speed_rating, CEX2A_SPEED_IDX,
-		       sizeof(CEX2A_SPEED_IDX));
+		zc->speed_rating = CEX2A_SPEED_IDX;
 		zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
 		zc->type_string = "CEX2A";
 		zc->user_space_type = ZCRYPT_CEX2A;
@@ -108,8 +107,7 @@
 			zc->max_mod_size = CEX3A_MAX_MOD_SIZE;
 			zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
 		}
-		memcpy(zc->speed_rating, CEX3A_SPEED_IDX,
-		       sizeof(CEX3A_SPEED_IDX));
+		zc->speed_rating = CEX3A_SPEED_IDX;
 		zc->type_string = "CEX3A";
 		zc->user_space_type = ZCRYPT_CEX3A;
 	} else {
@@ -204,8 +202,6 @@
 static struct ap_driver zcrypt_cex2a_queue_driver = {
 	.probe = zcrypt_cex2a_queue_probe,
 	.remove = zcrypt_cex2a_queue_remove,
-	.suspend = ap_queue_suspend,
-	.resume = ap_queue_resume,
 	.ids = zcrypt_cex2a_queue_ids,
 	.flags = AP_DRIVER_FLAG_DEFAULT,
 };
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index c78c0d1..7a8cbdb 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -25,6 +25,7 @@
 #include "zcrypt_msgtype6.h"
 #include "zcrypt_cex2c.h"
 #include "zcrypt_cca_key.h"
+#include "zcrypt_ccamisc.h"
 
 #define CEX2C_MIN_MOD_SIZE	 16	/*  128 bits	*/
 #define CEX2C_MAX_MOD_SIZE	256	/* 2048 bits	*/
@@ -58,6 +59,118 @@
 
 MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_queue_ids);
 
+/*
+ * CCA card additional device attributes
+ */
+static ssize_t cca_serialnr_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct cca_info ci;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
+
+	memset(&ci, 0, sizeof(ci));
+
+	if (ap_domain_index >= 0)
+		cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
+}
+
+static struct device_attribute dev_attr_cca_serialnr =
+	__ATTR(serialnr, 0444, cca_serialnr_show, NULL);
+
+static struct attribute *cca_card_attrs[] = {
+	&dev_attr_cca_serialnr.attr,
+	NULL,
+};
+
+static const struct attribute_group cca_card_attr_grp = {
+	.attrs = cca_card_attrs,
+};
+
+ /*
+  * CCA queue additional device attributes
+  */
+static ssize_t cca_mkvps_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	int n = 0;
+	struct cca_info ci;
+	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+	static const char * const cao_state[] = { "invalid", "valid" };
+	static const char * const new_state[] = { "empty", "partial", "full" };
+
+	memset(&ci, 0, sizeof(ci));
+
+	cca_get_info(AP_QID_CARD(zq->queue->qid),
+		     AP_QID_QUEUE(zq->queue->qid),
+		     &ci, zq->online);
+
+	if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
+		n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
+			      new_state[ci.new_aes_mk_state - '1'],
+			      ci.new_aes_mkvp);
+	else
+		n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
+
+	if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "AES CUR: %s 0x%016llx\n",
+			       cao_state[ci.cur_aes_mk_state - '1'],
+			       ci.cur_aes_mkvp);
+	else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
+
+	if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "AES OLD: %s 0x%016llx\n",
+			       cao_state[ci.old_aes_mk_state - '1'],
+			       ci.old_aes_mkvp);
+	else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+
+	if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "APKA NEW: %s 0x%016llx\n",
+			       new_state[ci.new_apka_mk_state - '1'],
+			       ci.new_apka_mkvp);
+	else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "APKA NEW: - -\n");
+
+	if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "APKA CUR: %s 0x%016llx\n",
+			       cao_state[ci.cur_apka_mk_state - '1'],
+			       ci.cur_apka_mkvp);
+	else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "APKA CUR: - -\n");
+
+	if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "APKA OLD: %s 0x%016llx\n",
+			       cao_state[ci.old_apka_mk_state - '1'],
+			       ci.old_apka_mkvp);
+	else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n");
+
+	return n;
+}
+
+static struct device_attribute dev_attr_cca_mkvps =
+	__ATTR(mkvps, 0444, cca_mkvps_show, NULL);
+
+static struct attribute *cca_queue_attrs[] = {
+	&dev_attr_cca_mkvps.attr,
+	NULL,
+};
+
+static const struct attribute_group cca_queue_attr_grp = {
+	.attrs = cca_queue_attrs,
+};
+
 /**
  * Large random number detection function. Its sends a message to a CEX2C/CEX3C
  * card to find out if large random numbers are supported.
@@ -87,24 +200,23 @@
 	int rc, i;
 
 	ap_init_message(&ap_msg);
-	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
-	if (!ap_msg.message)
+	ap_msg.msg = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!ap_msg.msg)
 		return -ENOMEM;
 
 	rng_type6CPRB_msgX(&ap_msg, 4, &domain);
 
-	msg = ap_msg.message;
+	msg = ap_msg.msg;
 	msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
 
-	rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.message,
-		     ap_msg.length);
+	rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.msg, ap_msg.len);
 	if (rc)
 		goto out_free;
 
 	/* Wait for the test message to complete. */
 	for (i = 0; i < 2 * HZ; i++) {
 		msleep(1000 / HZ);
-		rc = ap_recv(aq->qid, &psmid, ap_msg.message, 4096);
+		rc = ap_recv(aq->qid, &psmid, ap_msg.msg, 4096);
 		if (rc == 0 && psmid == 0x0102030405060708ULL)
 			break;
 	}
@@ -115,13 +227,13 @@
 		goto out_free;
 	}
 
-	reply = ap_msg.message;
+	reply = ap_msg.msg;
 	if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
 		rc = 1;
 	else
 		rc = 0;
 out_free:
-	free_page((unsigned long) ap_msg.message);
+	free_page((unsigned long) ap_msg.msg);
 	return rc;
 }
 
@@ -154,8 +266,7 @@
 	case AP_DEVICE_TYPE_CEX2C:
 		zc->user_space_type = ZCRYPT_CEX2C;
 		zc->type_string = "CEX2C";
-		memcpy(zc->speed_rating, CEX2C_SPEED_IDX,
-		       sizeof(CEX2C_SPEED_IDX));
+		zc->speed_rating = CEX2C_SPEED_IDX;
 		zc->min_mod_size = CEX2C_MIN_MOD_SIZE;
 		zc->max_mod_size = CEX2C_MAX_MOD_SIZE;
 		zc->max_exp_bit_length = CEX2C_MAX_MOD_SIZE;
@@ -163,8 +274,7 @@
 	case AP_DEVICE_TYPE_CEX3C:
 		zc->user_space_type = ZCRYPT_CEX3C;
 		zc->type_string = "CEX3C";
-		memcpy(zc->speed_rating, CEX3C_SPEED_IDX,
-		       sizeof(CEX3C_SPEED_IDX));
+		zc->speed_rating = CEX3C_SPEED_IDX;
 		zc->min_mod_size = CEX3C_MIN_MOD_SIZE;
 		zc->max_mod_size = CEX3C_MAX_MOD_SIZE;
 		zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
@@ -179,6 +289,17 @@
 	if (rc) {
 		ac->private = NULL;
 		zcrypt_card_free(zc);
+		return rc;
+	}
+
+	if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
+		rc = sysfs_create_group(&ap_dev->device.kobj,
+					&cca_card_attr_grp);
+		if (rc) {
+			zcrypt_card_unregister(zc);
+			ac->private = NULL;
+			zcrypt_card_free(zc);
+		}
 	}
 
 	return rc;
@@ -190,8 +311,11 @@
  */
 static void zcrypt_cex2c_card_remove(struct ap_device *ap_dev)
 {
+	struct ap_card *ac = to_ap_card(&ap_dev->device);
 	struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
 
+	if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
+		sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
 	if (zc)
 		zcrypt_card_unregister(zc);
 }
@@ -240,7 +364,19 @@
 	if (rc) {
 		aq->private = NULL;
 		zcrypt_queue_free(zq);
+		return rc;
 	}
+
+	if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
+		rc = sysfs_create_group(&ap_dev->device.kobj,
+					&cca_queue_attr_grp);
+		if (rc) {
+			zcrypt_queue_unregister(zq);
+			aq->private = NULL;
+			zcrypt_queue_free(zq);
+		}
+	}
+
 	return rc;
 }
 
@@ -253,6 +389,8 @@
 	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
 	struct zcrypt_queue *zq = aq->private;
 
+	if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
+		sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
 	if (zq)
 		zcrypt_queue_unregister(zq);
 }
@@ -260,8 +398,6 @@
 static struct ap_driver zcrypt_cex2c_queue_driver = {
 	.probe = zcrypt_cex2c_queue_probe,
 	.remove = zcrypt_cex2c_queue_remove,
-	.suspend = ap_queue_suspend,
-	.resume = ap_queue_resume,
 	.ids = zcrypt_cex2c_queue_ids,
 	.flags = AP_DRIVER_FLAG_DEFAULT,
 };
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 6fabc90..f5195bc 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -19,6 +19,7 @@
 #include "zcrypt_error.h"
 #include "zcrypt_cex4.h"
 #include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
 
 #define CEX4A_MIN_MOD_SIZE	  1	/*    8 bits	*/
 #define CEX4A_MAX_MOD_SIZE_2K	256	/* 2048 bits	*/
@@ -71,11 +72,11 @@
 MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
 
 /*
- * CCA card addditional device attributes
+ * CCA card additional device attributes
  */
-static ssize_t serialnr_show(struct device *dev,
-			     struct device_attribute *attr,
-			     char *buf)
+static ssize_t cca_serialnr_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
 {
 	struct cca_info ci;
 	struct ap_card *ac = to_ap_card(dev);
@@ -86,25 +87,27 @@
 	if (ap_domain_index >= 0)
 		cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
 
-	return snprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
 }
-static DEVICE_ATTR_RO(serialnr);
+
+static struct device_attribute dev_attr_cca_serialnr =
+	__ATTR(serialnr, 0444, cca_serialnr_show, NULL);
 
 static struct attribute *cca_card_attrs[] = {
-	&dev_attr_serialnr.attr,
+	&dev_attr_cca_serialnr.attr,
 	NULL,
 };
 
-static const struct attribute_group cca_card_attr_group = {
+static const struct attribute_group cca_card_attr_grp = {
 	.attrs = cca_card_attrs,
 };
 
-/*
- * CCA queue addditional device attributes
- */
-static ssize_t mkvps_show(struct device *dev,
-			  struct device_attribute *attr,
-			  char *buf)
+ /*
+  * CCA queue additional device attributes
+  */
+static ssize_t cca_mkvps_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
 {
 	int n = 0;
 	struct cca_info ci;
@@ -118,37 +121,282 @@
 		     AP_QID_QUEUE(zq->queue->qid),
 		     &ci, zq->online);
 
-	if (ci.new_mk_state >= '1' && ci.new_mk_state <= '3')
-		n = snprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
-			     new_state[ci.new_mk_state - '1'], ci.new_mkvp);
+	if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
+		n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
+			      new_state[ci.new_aes_mk_state - '1'],
+			      ci.new_aes_mkvp);
 	else
-		n = snprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
+		n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
 
-	if (ci.cur_mk_state >= '1' && ci.cur_mk_state <= '2')
-		n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: %s 0x%016llx\n",
-			      cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp);
+	if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "AES CUR: %s 0x%016llx\n",
+			       cao_state[ci.cur_aes_mk_state - '1'],
+			       ci.cur_aes_mkvp);
 	else
-		n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
+		n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
 
-	if (ci.old_mk_state >= '1' && ci.old_mk_state <= '2')
-		n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: %s 0x%016llx\n",
-			      cao_state[ci.old_mk_state - '1'], ci.old_mkvp);
+	if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "AES OLD: %s 0x%016llx\n",
+			       cao_state[ci.old_aes_mk_state - '1'],
+			       ci.old_aes_mkvp);
 	else
-		n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+		n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+
+	if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "APKA NEW: %s 0x%016llx\n",
+			       new_state[ci.new_apka_mk_state - '1'],
+			       ci.new_apka_mkvp);
+	else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "APKA NEW: - -\n");
+
+	if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "APKA CUR: %s 0x%016llx\n",
+			       cao_state[ci.cur_apka_mk_state - '1'],
+			       ci.cur_apka_mkvp);
+	else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "APKA CUR: - -\n");
+
+	if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2')
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "APKA OLD: %s 0x%016llx\n",
+			       cao_state[ci.old_apka_mk_state - '1'],
+			       ci.old_apka_mkvp);
+	else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n");
 
 	return n;
 }
-static DEVICE_ATTR_RO(mkvps);
+
+static struct device_attribute dev_attr_cca_mkvps =
+	__ATTR(mkvps, 0444, cca_mkvps_show, NULL);
 
 static struct attribute *cca_queue_attrs[] = {
-	&dev_attr_mkvps.attr,
+	&dev_attr_cca_mkvps.attr,
 	NULL,
 };
 
-static const struct attribute_group cca_queue_attr_group = {
+static const struct attribute_group cca_queue_attr_grp = {
 	.attrs = cca_queue_attrs,
 };
 
+/*
+ * EP11 card additional device attributes
+ */
+static ssize_t ep11_api_ordinalnr_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct ep11_card_info ci;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
+
+	memset(&ci, 0, sizeof(ci));
+
+	ep11_get_card_info(ac->id, &ci, zc->online);
+
+	if (ci.API_ord_nr > 0)
+		return scnprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr);
+	else
+		return scnprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_api_ordinalnr =
+	__ATTR(API_ordinalnr, 0444, ep11_api_ordinalnr_show, NULL);
+
+static ssize_t ep11_fw_version_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct ep11_card_info ci;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
+
+	memset(&ci, 0, sizeof(ci));
+
+	ep11_get_card_info(ac->id, &ci, zc->online);
+
+	if (ci.FW_version > 0)
+		return scnprintf(buf, PAGE_SIZE, "%d.%d\n",
+				 (int)(ci.FW_version >> 8),
+				 (int)(ci.FW_version & 0xFF));
+	else
+		return scnprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_fw_version =
+	__ATTR(FW_version, 0444, ep11_fw_version_show, NULL);
+
+static ssize_t ep11_serialnr_show(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	struct ep11_card_info ci;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
+
+	memset(&ci, 0, sizeof(ci));
+
+	ep11_get_card_info(ac->id, &ci, zc->online);
+
+	if (ci.serial[0])
+		return scnprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial);
+	else
+		return scnprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_serialnr =
+	__ATTR(serialnr, 0444, ep11_serialnr_show, NULL);
+
+static const struct {
+	int	    mode_bit;
+	const char *mode_txt;
+} ep11_op_modes[] = {
+	{ 0, "FIPS2009" },
+	{ 1, "BSI2009" },
+	{ 2, "FIPS2011" },
+	{ 3, "BSI2011" },
+	{ 6, "BSICC2017" },
+	{ 0, NULL }
+};
+
+static ssize_t ep11_card_op_modes_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	int i, n = 0;
+	struct ep11_card_info ci;
+	struct ap_card *ac = to_ap_card(dev);
+	struct zcrypt_card *zc = ac->private;
+
+	memset(&ci, 0, sizeof(ci));
+
+	ep11_get_card_info(ac->id, &ci, zc->online);
+
+	for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+		if (ci.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
+			if (n > 0)
+				buf[n++] = ' ';
+			n += scnprintf(buf + n, PAGE_SIZE - n,
+				       "%s", ep11_op_modes[i].mode_txt);
+		}
+	}
+	n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+
+	return n;
+}
+
+static struct device_attribute dev_attr_ep11_card_op_modes =
+	__ATTR(op_modes, 0444, ep11_card_op_modes_show, NULL);
+
+static struct attribute *ep11_card_attrs[] = {
+	&dev_attr_ep11_api_ordinalnr.attr,
+	&dev_attr_ep11_fw_version.attr,
+	&dev_attr_ep11_serialnr.attr,
+	&dev_attr_ep11_card_op_modes.attr,
+	NULL,
+};
+
+static const struct attribute_group ep11_card_attr_grp = {
+	.attrs = ep11_card_attrs,
+};
+
+/*
+ * EP11 queue additional device attributes
+ */
+
+static ssize_t ep11_mkvps_show(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	int n = 0;
+	struct ep11_domain_info di;
+	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+	static const char * const cwk_state[] = { "invalid", "valid" };
+	static const char * const nwk_state[] = { "empty", "uncommitted",
+						  "committed" };
+
+	memset(&di, 0, sizeof(di));
+
+	if (zq->online)
+		ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+				     AP_QID_QUEUE(zq->queue->qid),
+				     &di);
+
+	if (di.cur_wk_state == '0') {
+		n = scnprintf(buf, PAGE_SIZE, "WK CUR: %s -\n",
+			      cwk_state[di.cur_wk_state - '0']);
+	} else if (di.cur_wk_state == '1') {
+		n = scnprintf(buf, PAGE_SIZE, "WK CUR: %s 0x",
+			      cwk_state[di.cur_wk_state - '0']);
+		bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp));
+		n += 2 * sizeof(di.cur_wkvp);
+		n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+	} else
+		n = scnprintf(buf, PAGE_SIZE, "WK CUR: - -\n");
+
+	if (di.new_wk_state == '0') {
+		n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n",
+			       nwk_state[di.new_wk_state - '0']);
+	} else if (di.new_wk_state >= '1' && di.new_wk_state <= '2') {
+		n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x",
+			       nwk_state[di.new_wk_state - '0']);
+		bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp));
+		n += 2 * sizeof(di.new_wkvp);
+		n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+	} else
+		n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n");
+
+	return n;
+}
+
+static struct device_attribute dev_attr_ep11_mkvps =
+	__ATTR(mkvps, 0444, ep11_mkvps_show, NULL);
+
+static ssize_t ep11_queue_op_modes_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	int i, n = 0;
+	struct ep11_domain_info di;
+	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+	memset(&di, 0, sizeof(di));
+
+	if (zq->online)
+		ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+				     AP_QID_QUEUE(zq->queue->qid),
+				     &di);
+
+	for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+		if (di.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
+			if (n > 0)
+				buf[n++] = ' ';
+			n += scnprintf(buf + n, PAGE_SIZE - n,
+				       "%s", ep11_op_modes[i].mode_txt);
+		}
+	}
+	n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+
+	return n;
+}
+
+static struct device_attribute dev_attr_ep11_queue_op_modes =
+	__ATTR(op_modes, 0444, ep11_queue_op_modes_show, NULL);
+
+static struct attribute *ep11_queue_attrs[] = {
+	&dev_attr_ep11_mkvps.attr,
+	&dev_attr_ep11_queue_op_modes.attr,
+	NULL,
+};
+
+static const struct attribute_group ep11_queue_attr_grp = {
+	.attrs = ep11_queue_attrs,
+};
+
 /**
  * Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always
  * accepts the AP device since the bus_match already checked
@@ -161,31 +409,31 @@
 	 * Normalized speed ratings per crypto adapter
 	 * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
 	 */
-	static const int CEX4A_SPEED_IDX[] = {
+	static const int CEX4A_SPEED_IDX[NUM_OPS] = {
 		 14,  19, 249, 42, 228, 1458, 0, 0};
-	static const int CEX5A_SPEED_IDX[] = {
+	static const int CEX5A_SPEED_IDX[NUM_OPS] = {
 		  8,   9,  20, 18,  66,	 458, 0, 0};
-	static const int CEX6A_SPEED_IDX[] = {
+	static const int CEX6A_SPEED_IDX[NUM_OPS] = {
 		  6,   9,  20, 17,  65,	 438, 0, 0};
-	static const int CEX7A_SPEED_IDX[] = {
+	static const int CEX7A_SPEED_IDX[NUM_OPS] = {
 		  6,   8,  17, 15,  54,	 362, 0, 0};
 
-	static const int CEX4C_SPEED_IDX[] = {
+	static const int CEX4C_SPEED_IDX[NUM_OPS] = {
 		 59,  69, 308, 83, 278, 2204, 209, 40};
 	static const int CEX5C_SPEED_IDX[] = {
 		 24,  31,  50, 37,  90,	 479,  27, 10};
-	static const int CEX6C_SPEED_IDX[] = {
+	static const int CEX6C_SPEED_IDX[NUM_OPS] = {
 		 16,  20,  32, 27,  77,	 455,  24,  9};
-	static const int CEX7C_SPEED_IDX[] = {
+	static const int CEX7C_SPEED_IDX[NUM_OPS] = {
 		 14,  16,  26, 23,  64,	 376,  23,  8};
 
-	static const int CEX4P_SPEED_IDX[] = {
+	static const int CEX4P_SPEED_IDX[NUM_OPS] = {
 		  0,   0,   0,	 0,   0,   0,	0,  50};
-	static const int CEX5P_SPEED_IDX[] = {
+	static const int CEX5P_SPEED_IDX[NUM_OPS] = {
 		  0,   0,   0,	 0,   0,   0,	0,  10};
-	static const int CEX6P_SPEED_IDX[] = {
+	static const int CEX6P_SPEED_IDX[NUM_OPS] = {
 		  0,   0,   0,	 0,   0,   0,	0,   9};
-	static const int CEX7P_SPEED_IDX[] = {
+	static const int CEX7P_SPEED_IDX[NUM_OPS] = {
 		  0,   0,   0,	 0,   0,   0,	0,   8};
 
 	struct ap_card *ac = to_ap_card(&ap_dev->device);
@@ -201,26 +449,22 @@
 		if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
 			zc->type_string = "CEX4A";
 			zc->user_space_type = ZCRYPT_CEX4;
-			memcpy(zc->speed_rating, CEX4A_SPEED_IDX,
-			       sizeof(CEX4A_SPEED_IDX));
+			zc->speed_rating = CEX4A_SPEED_IDX;
 		} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
 			zc->type_string = "CEX5A";
 			zc->user_space_type = ZCRYPT_CEX5;
-			memcpy(zc->speed_rating, CEX5A_SPEED_IDX,
-			       sizeof(CEX5A_SPEED_IDX));
+			zc->speed_rating = CEX5A_SPEED_IDX;
 		} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
 			zc->type_string = "CEX6A";
 			zc->user_space_type = ZCRYPT_CEX6;
-			memcpy(zc->speed_rating, CEX6A_SPEED_IDX,
-			       sizeof(CEX6A_SPEED_IDX));
+			zc->speed_rating = CEX6A_SPEED_IDX;
 		} else {
 			zc->type_string = "CEX7A";
 			/* wrong user space type, just for compatibility
 			 * with the ZCRYPT_STATUS_MASK ioctl.
 			 */
 			zc->user_space_type = ZCRYPT_CEX6;
-			memcpy(zc->speed_rating, CEX7A_SPEED_IDX,
-			       sizeof(CEX7A_SPEED_IDX));
+			zc->speed_rating = CEX7A_SPEED_IDX;
 		}
 		zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
 		if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
@@ -240,32 +484,28 @@
 			 * just keep it for cca compatibility
 			 */
 			zc->user_space_type = ZCRYPT_CEX3C;
-			memcpy(zc->speed_rating, CEX4C_SPEED_IDX,
-			       sizeof(CEX4C_SPEED_IDX));
+			zc->speed_rating = CEX4C_SPEED_IDX;
 		} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
 			zc->type_string = "CEX5C";
 			/* wrong user space type, must be CEX5
 			 * just keep it for cca compatibility
 			 */
 			zc->user_space_type = ZCRYPT_CEX3C;
-			memcpy(zc->speed_rating, CEX5C_SPEED_IDX,
-			       sizeof(CEX5C_SPEED_IDX));
+			zc->speed_rating = CEX5C_SPEED_IDX;
 		} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
 			zc->type_string = "CEX6C";
 			/* wrong user space type, must be CEX6
 			 * just keep it for cca compatibility
 			 */
 			zc->user_space_type = ZCRYPT_CEX3C;
-			memcpy(zc->speed_rating, CEX6C_SPEED_IDX,
-			       sizeof(CEX6C_SPEED_IDX));
+			zc->speed_rating = CEX6C_SPEED_IDX;
 		} else {
 			zc->type_string = "CEX7C";
 			/* wrong user space type, must be CEX7
 			 * just keep it for cca compatibility
 			 */
 			zc->user_space_type = ZCRYPT_CEX3C;
-			memcpy(zc->speed_rating, CEX7C_SPEED_IDX,
-			       sizeof(CEX7C_SPEED_IDX));
+			zc->speed_rating = CEX7C_SPEED_IDX;
 		}
 		zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
 		zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
@@ -274,26 +514,22 @@
 		if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
 			zc->type_string = "CEX4P";
 			zc->user_space_type = ZCRYPT_CEX4;
-			memcpy(zc->speed_rating, CEX4P_SPEED_IDX,
-			       sizeof(CEX4P_SPEED_IDX));
+			zc->speed_rating = CEX4P_SPEED_IDX;
 		} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
 			zc->type_string = "CEX5P";
 			zc->user_space_type = ZCRYPT_CEX5;
-			memcpy(zc->speed_rating, CEX5P_SPEED_IDX,
-			       sizeof(CEX5P_SPEED_IDX));
+			zc->speed_rating = CEX5P_SPEED_IDX;
 		} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
 			zc->type_string = "CEX6P";
 			zc->user_space_type = ZCRYPT_CEX6;
-			memcpy(zc->speed_rating, CEX6P_SPEED_IDX,
-			       sizeof(CEX6P_SPEED_IDX));
+			zc->speed_rating = CEX6P_SPEED_IDX;
 		} else {
 			zc->type_string = "CEX7P";
 			/* wrong user space type, just for compatibility
 			 * with the ZCRYPT_STATUS_MASK ioctl.
 			 */
 			zc->user_space_type = ZCRYPT_CEX6;
-			memcpy(zc->speed_rating, CEX7P_SPEED_IDX,
-			       sizeof(CEX7P_SPEED_IDX));
+			zc->speed_rating = CEX7P_SPEED_IDX;
 		}
 		zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
 		zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
@@ -308,17 +544,27 @@
 	if (rc) {
 		ac->private = NULL;
 		zcrypt_card_free(zc);
-		goto out;
+		return rc;
 	}
 
 	if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
 		rc = sysfs_create_group(&ap_dev->device.kobj,
-					&cca_card_attr_group);
-		if (rc)
+					&cca_card_attr_grp);
+		if (rc) {
 			zcrypt_card_unregister(zc);
+			ac->private = NULL;
+			zcrypt_card_free(zc);
+		}
+	} else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
+		rc = sysfs_create_group(&ap_dev->device.kobj,
+					&ep11_card_attr_grp);
+		if (rc) {
+			zcrypt_card_unregister(zc);
+			ac->private = NULL;
+			zcrypt_card_free(zc);
+		}
 	}
 
-out:
 	return rc;
 }
 
@@ -332,7 +578,9 @@
 	struct zcrypt_card *zc = ac->private;
 
 	if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
-		sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_group);
+		sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
+	else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
+		sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp);
 	if (zc)
 		zcrypt_card_unregister(zc);
 }
@@ -389,17 +637,27 @@
 	if (rc) {
 		aq->private = NULL;
 		zcrypt_queue_free(zq);
-		goto out;
+		return rc;
 	}
 
 	if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
 		rc = sysfs_create_group(&ap_dev->device.kobj,
-					&cca_queue_attr_group);
-		if (rc)
+					&cca_queue_attr_grp);
+		if (rc) {
 			zcrypt_queue_unregister(zq);
+			aq->private = NULL;
+			zcrypt_queue_free(zq);
+		}
+	} else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
+		rc = sysfs_create_group(&ap_dev->device.kobj,
+					&ep11_queue_attr_grp);
+		if (rc) {
+			zcrypt_queue_unregister(zq);
+			aq->private = NULL;
+			zcrypt_queue_free(zq);
+		}
 	}
 
-out:
 	return rc;
 }
 
@@ -413,7 +671,9 @@
 	struct zcrypt_queue *zq = aq->private;
 
 	if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
-		sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_group);
+		sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
+	else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
+		sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp);
 	if (zq)
 		zcrypt_queue_unregister(zq);
 }
@@ -421,8 +681,6 @@
 static struct ap_driver zcrypt_cex4_queue_driver = {
 	.probe = zcrypt_cex4_queue_probe,
 	.remove = zcrypt_cex4_queue_remove,
-	.suspend = ap_queue_suspend,
-	.resume = ap_queue_resume,
 	.ids = zcrypt_cex4_queue_ids,
 	.flags = AP_DRIVER_FLAG_DEFAULT,
 };
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
index 241dbb5..3225489 100644
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -21,6 +21,14 @@
 
 #define ZCRYPT_DBF(...)					\
 	debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
+#define ZCRYPT_DBF_ERR(...)					\
+	debug_sprintf_event(zcrypt_dbf_info, DBF_ERR, ##__VA_ARGS__)
+#define ZCRYPT_DBF_WARN(...)					\
+	debug_sprintf_event(zcrypt_dbf_info, DBF_WARN, ##__VA_ARGS__)
+#define ZCRYPT_DBF_INFO(...)					\
+	debug_sprintf_event(zcrypt_dbf_info, DBF_INFO, ##__VA_ARGS__)
+#define ZCRYPT_DBF_DBG(...)					\
+	debug_sprintf_event(zcrypt_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
 
 extern debug_info_t *zcrypt_dbf_info;
 
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
new file mode 100644
index 0000000..9ce5a71
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -0,0 +1,1470 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  Copyright IBM Corp. 2019
+ *  Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ *  Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+#include <crypto/aes.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_ep11misc.h"
+#include "zcrypt_ccamisc.h"
+
+#define DEBUG_DBG(...)	ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
+#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
+#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
+#define DEBUG_ERR(...)	ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
+
+/* default iv used here */
+static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+			       0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff };
+
+/* ep11 card info cache */
+struct card_list_entry {
+	struct list_head list;
+	u16 cardnr;
+	struct ep11_card_info info;
+};
+static LIST_HEAD(card_list);
+static DEFINE_SPINLOCK(card_list_lock);
+
+static int card_cache_fetch(u16 cardnr, struct ep11_card_info *ci)
+{
+	int rc = -ENOENT;
+	struct card_list_entry *ptr;
+
+	spin_lock_bh(&card_list_lock);
+	list_for_each_entry(ptr, &card_list, list) {
+		if (ptr->cardnr == cardnr) {
+			memcpy(ci, &ptr->info, sizeof(*ci));
+			rc = 0;
+			break;
+		}
+	}
+	spin_unlock_bh(&card_list_lock);
+
+	return rc;
+}
+
+static void card_cache_update(u16 cardnr, const struct ep11_card_info *ci)
+{
+	int found = 0;
+	struct card_list_entry *ptr;
+
+	spin_lock_bh(&card_list_lock);
+	list_for_each_entry(ptr, &card_list, list) {
+		if (ptr->cardnr == cardnr) {
+			memcpy(&ptr->info, ci, sizeof(*ci));
+			found = 1;
+			break;
+		}
+	}
+	if (!found) {
+		ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
+		if (!ptr) {
+			spin_unlock_bh(&card_list_lock);
+			return;
+		}
+		ptr->cardnr = cardnr;
+		memcpy(&ptr->info, ci, sizeof(*ci));
+		list_add(&ptr->list, &card_list);
+	}
+	spin_unlock_bh(&card_list_lock);
+}
+
+static void card_cache_scrub(u16 cardnr)
+{
+	struct card_list_entry *ptr;
+
+	spin_lock_bh(&card_list_lock);
+	list_for_each_entry(ptr, &card_list, list) {
+		if (ptr->cardnr == cardnr) {
+			list_del(&ptr->list);
+			kfree(ptr);
+			break;
+		}
+	}
+	spin_unlock_bh(&card_list_lock);
+}
+
+static void __exit card_cache_free(void)
+{
+	struct card_list_entry *ptr, *pnext;
+
+	spin_lock_bh(&card_list_lock);
+	list_for_each_entry_safe(ptr, pnext, &card_list, list) {
+		list_del(&ptr->list);
+		kfree(ptr);
+	}
+	spin_unlock_bh(&card_list_lock);
+}
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with header.
+ */
+int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
+				const u8 *key, size_t keylen, int checkcpacfexp)
+{
+	struct ep11kblob_header *hdr = (struct ep11kblob_header *) key;
+	struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr));
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+	if (keylen < sizeof(*hdr) + sizeof(*kb)) {
+		DBF("%s key check failed, keylen %zu < %zu\n",
+		    __func__, keylen, sizeof(*hdr) + sizeof(*kb));
+		return -EINVAL;
+	}
+
+	if (hdr->type != TOKTYPE_NON_CCA) {
+		if (dbg)
+			DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+			    __func__, (int) hdr->type, TOKTYPE_NON_CCA);
+		return -EINVAL;
+	}
+	if (hdr->hver != 0x00) {
+		if (dbg)
+			DBF("%s key check failed, header version 0x%02x != 0x00\n",
+			    __func__, (int) hdr->hver);
+		return -EINVAL;
+	}
+	if (hdr->version != TOKVER_EP11_AES_WITH_HEADER) {
+		if (dbg)
+			DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+			    __func__, (int) hdr->version, TOKVER_EP11_AES_WITH_HEADER);
+		return -EINVAL;
+	}
+	if (hdr->len > keylen) {
+		if (dbg)
+			DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+			    __func__, (int) hdr->len, keylen);
+		return -EINVAL;
+	}
+	if (hdr->len < sizeof(*hdr) + sizeof(*kb)) {
+		if (dbg)
+			DBF("%s key check failed, header len %d < %zu\n",
+			    __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb));
+		return -EINVAL;
+	}
+
+	if (kb->version != EP11_STRUCT_MAGIC) {
+		if (dbg)
+			DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
+			    __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+		return -EINVAL;
+	}
+	if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+		if (dbg)
+			DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
+			    __func__);
+		return -EINVAL;
+	}
+
+#undef DBF
+
+	return 0;
+}
+EXPORT_SYMBOL(ep11_check_aes_key_with_hdr);
+
+/*
+ * Simple check if the key blob is a valid EP11 ECC key blob with header.
+ */
+int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
+				const u8 *key, size_t keylen, int checkcpacfexp)
+{
+	struct ep11kblob_header *hdr = (struct ep11kblob_header *) key;
+	struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr));
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+	if (keylen < sizeof(*hdr) + sizeof(*kb)) {
+		DBF("%s key check failed, keylen %zu < %zu\n",
+		    __func__, keylen, sizeof(*hdr) + sizeof(*kb));
+		return -EINVAL;
+	}
+
+	if (hdr->type != TOKTYPE_NON_CCA) {
+		if (dbg)
+			DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+			    __func__, (int) hdr->type, TOKTYPE_NON_CCA);
+		return -EINVAL;
+	}
+	if (hdr->hver != 0x00) {
+		if (dbg)
+			DBF("%s key check failed, header version 0x%02x != 0x00\n",
+			    __func__, (int) hdr->hver);
+		return -EINVAL;
+	}
+	if (hdr->version != TOKVER_EP11_ECC_WITH_HEADER) {
+		if (dbg)
+			DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+			    __func__, (int) hdr->version, TOKVER_EP11_ECC_WITH_HEADER);
+		return -EINVAL;
+	}
+	if (hdr->len > keylen) {
+		if (dbg)
+			DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+			    __func__, (int) hdr->len, keylen);
+		return -EINVAL;
+	}
+	if (hdr->len < sizeof(*hdr) + sizeof(*kb)) {
+		if (dbg)
+			DBF("%s key check failed, header len %d < %zu\n",
+			    __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb));
+		return -EINVAL;
+	}
+
+	if (kb->version != EP11_STRUCT_MAGIC) {
+		if (dbg)
+			DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
+			    __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+		return -EINVAL;
+	}
+	if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+		if (dbg)
+			DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
+			    __func__);
+		return -EINVAL;
+	}
+
+#undef DBF
+
+	return 0;
+}
+EXPORT_SYMBOL(ep11_check_ecc_key_with_hdr);
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with
+ * the header in the session field (old style EP11 AES key).
+ */
+int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
+		       const u8 *key, size_t keylen, int checkcpacfexp)
+{
+	struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+	if (keylen < sizeof(*kb)) {
+		DBF("%s key check failed, keylen %zu < %zu\n",
+		    __func__, keylen, sizeof(*kb));
+		return -EINVAL;
+	}
+
+	if (kb->head.type != TOKTYPE_NON_CCA) {
+		if (dbg)
+			DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+			    __func__, (int) kb->head.type, TOKTYPE_NON_CCA);
+		return -EINVAL;
+	}
+	if (kb->head.version != TOKVER_EP11_AES) {
+		if (dbg)
+			DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+			    __func__, (int) kb->head.version, TOKVER_EP11_AES);
+		return -EINVAL;
+	}
+	if (kb->head.len > keylen) {
+		if (dbg)
+			DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+			    __func__, (int) kb->head.len, keylen);
+		return -EINVAL;
+	}
+	if (kb->head.len < sizeof(*kb)) {
+		if (dbg)
+			DBF("%s key check failed, header len %d < %zu\n",
+			    __func__, (int) kb->head.len, sizeof(*kb));
+		return -EINVAL;
+	}
+
+	if (kb->version != EP11_STRUCT_MAGIC) {
+		if (dbg)
+			DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
+			    __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+		return -EINVAL;
+	}
+	if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+		if (dbg)
+			DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
+			    __func__);
+		return -EINVAL;
+	}
+
+#undef DBF
+
+	return 0;
+}
+EXPORT_SYMBOL(ep11_check_aes_key);
+
+/*
+ * Allocate and prepare ep11 cprb plus additional payload.
+ */
+static inline struct ep11_cprb *alloc_cprb(size_t payload_len)
+{
+	size_t len = sizeof(struct ep11_cprb) + payload_len;
+	struct ep11_cprb *cprb;
+
+	cprb = kzalloc(len, GFP_KERNEL);
+	if (!cprb)
+		return NULL;
+
+	cprb->cprb_len = sizeof(struct ep11_cprb);
+	cprb->cprb_ver_id = 0x04;
+	memcpy(cprb->func_id, "T4", 2);
+	cprb->ret_code = 0xFFFFFFFF;
+	cprb->payload_len = payload_len;
+
+	return cprb;
+}
+
+/*
+ * Some helper functions related to ASN1 encoding.
+ * Limited to length info <= 2 byte.
+ */
+
+#define ASN1TAGLEN(x) (2 + (x) + ((x) > 127 ? 1 : 0) + ((x) > 255 ? 1 : 0))
+
+static int asn1tag_write(u8 *ptr, u8 tag, const u8 *pvalue, u16 valuelen)
+{
+	ptr[0] = tag;
+	if (valuelen > 255) {
+		ptr[1] = 0x82;
+		*((u16 *)(ptr + 2)) = valuelen;
+		memcpy(ptr + 4, pvalue, valuelen);
+		return 4 + valuelen;
+	}
+	if (valuelen > 127) {
+		ptr[1] = 0x81;
+		ptr[2] = (u8) valuelen;
+		memcpy(ptr + 3, pvalue, valuelen);
+		return 3 + valuelen;
+	}
+	ptr[1] = (u8) valuelen;
+	memcpy(ptr + 2, pvalue, valuelen);
+	return 2 + valuelen;
+}
+
+/* EP11 payload > 127 bytes starts with this struct */
+struct pl_head {
+	u8  tag;
+	u8  lenfmt;
+	u16 len;
+	u8  func_tag;
+	u8  func_len;
+	u32 func;
+	u8  dom_tag;
+	u8  dom_len;
+	u32 dom;
+} __packed;
+
+/* prep ep11 payload head helper function */
+static inline void prep_head(struct pl_head *h,
+			     size_t pl_size, int api, int func)
+{
+	h->tag = 0x30;
+	h->lenfmt = 0x82;
+	h->len = pl_size - 4;
+	h->func_tag = 0x04;
+	h->func_len = sizeof(u32);
+	h->func = (api << 16) + func;
+	h->dom_tag = 0x04;
+	h->dom_len = sizeof(u32);
+}
+
+/* prep urb helper function */
+static inline void prep_urb(struct ep11_urb *u,
+			    struct ep11_target_dev *t, int nt,
+			    struct ep11_cprb *req, size_t req_len,
+			    struct ep11_cprb *rep, size_t rep_len)
+{
+	u->targets = (u8 __user *) t;
+	u->targets_num = nt;
+	u->req = (u8 __user *) req;
+	u->req_len = req_len;
+	u->resp = (u8 __user *) rep;
+	u->resp_len = rep_len;
+}
+
+/* Check ep11 reply payload, return 0 or suggested errno value. */
+static int check_reply_pl(const u8 *pl, const char *func)
+{
+	int len;
+	u32 ret;
+
+	/* start tag */
+	if (*pl++ != 0x30) {
+		DEBUG_ERR("%s reply start tag mismatch\n", func);
+		return -EIO;
+	}
+
+	/* payload length format */
+	if (*pl < 127) {
+		len = *pl;
+		pl++;
+	} else if (*pl == 0x81) {
+		pl++;
+		len = *pl;
+		pl++;
+	} else if (*pl == 0x82) {
+		pl++;
+		len = *((u16 *)pl);
+		pl += 2;
+	} else {
+		DEBUG_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n",
+			  func, *pl);
+		return -EIO;
+	}
+
+	/* len should cover at least 3 fields with 32 bit value each */
+	if (len < 3 * 6) {
+		DEBUG_ERR("%s reply length %d too small\n", func, len);
+		return -EIO;
+	}
+
+	/* function tag, length and value */
+	if (pl[0] != 0x04 || pl[1] != 0x04) {
+		DEBUG_ERR("%s function tag or length mismatch\n", func);
+		return -EIO;
+	}
+	pl += 6;
+
+	/* dom tag, length and value */
+	if (pl[0] != 0x04 || pl[1] != 0x04) {
+		DEBUG_ERR("%s dom tag or length mismatch\n", func);
+		return -EIO;
+	}
+	pl += 6;
+
+	/* return value tag, length and value */
+	if (pl[0] != 0x04 || pl[1] != 0x04) {
+		DEBUG_ERR("%s return value tag or length mismatch\n", func);
+		return -EIO;
+	}
+	pl += 2;
+	ret = *((u32 *)pl);
+	if (ret != 0) {
+		DEBUG_ERR("%s return value 0x%04x != 0\n", func, ret);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+
+/*
+ * Helper function which does an ep11 query with given query type.
+ */
+static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
+			   size_t buflen, u8 *buf)
+{
+	struct ep11_info_req_pl {
+		struct pl_head head;
+		u8  query_type_tag;
+		u8  query_type_len;
+		u32 query_type;
+		u8  query_subtype_tag;
+		u8  query_subtype_len;
+		u32 query_subtype;
+	} __packed * req_pl;
+	struct ep11_info_rep_pl {
+		struct pl_head head;
+		u8  rc_tag;
+		u8  rc_len;
+		u32 rc;
+		u8  data_tag;
+		u8  data_lenfmt;
+		u16 data_len;
+	} __packed * rep_pl;
+	struct ep11_cprb *req = NULL, *rep = NULL;
+	struct ep11_target_dev target;
+	struct ep11_urb *urb = NULL;
+	int api = 1, rc = -ENOMEM;
+
+	/* request cprb and payload */
+	req = alloc_cprb(sizeof(struct ep11_info_req_pl));
+	if (!req)
+		goto out;
+	req_pl = (struct ep11_info_req_pl *) (((u8 *) req) + sizeof(*req));
+	prep_head(&req_pl->head, sizeof(*req_pl), api, 38); /* get xcp info */
+	req_pl->query_type_tag = 0x04;
+	req_pl->query_type_len = sizeof(u32);
+	req_pl->query_type = query_type;
+	req_pl->query_subtype_tag = 0x04;
+	req_pl->query_subtype_len = sizeof(u32);
+
+	/* reply cprb and payload */
+	rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen);
+	if (!rep)
+		goto out;
+	rep_pl = (struct ep11_info_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+	/* urb and target */
+	urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+	if (!urb)
+		goto out;
+	target.ap_id = cardnr;
+	target.dom_id = domain;
+	prep_urb(urb, &target, 1,
+		 req, sizeof(*req) + sizeof(*req_pl),
+		 rep, sizeof(*rep) + sizeof(*rep_pl) + buflen);
+
+	rc = zcrypt_send_ep11_cprb(urb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+			__func__, (int) cardnr, (int) domain, rc);
+		goto out;
+	}
+
+	rc = check_reply_pl((u8 *)rep_pl, __func__);
+	if (rc)
+		goto out;
+	if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+		DEBUG_ERR("%s unknown reply data format\n", __func__);
+		rc = -EIO;
+		goto out;
+	}
+	if (rep_pl->data_len > buflen) {
+		DEBUG_ERR("%s mismatch between reply data len and buffer len\n",
+			  __func__);
+		rc = -ENOSPC;
+		goto out;
+	}
+
+	memcpy(buf, ((u8 *) rep_pl) + sizeof(*rep_pl), rep_pl->data_len);
+
+out:
+	kfree(req);
+	kfree(rep);
+	kfree(urb);
+	return rc;
+}
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify)
+{
+	int rc;
+	struct ep11_module_query_info {
+		u32 API_ord_nr;
+		u32 firmware_id;
+		u8  FW_major_vers;
+		u8  FW_minor_vers;
+		u8  CSP_major_vers;
+		u8  CSP_minor_vers;
+		u8  fwid[32];
+		u8  xcp_config_hash[32];
+		u8  CSP_config_hash[32];
+		u8  serial[16];
+		u8  module_date_time[16];
+		u64 op_mode;
+		u32 PKCS11_flags;
+		u32 ext_flags;
+		u32 domains;
+		u32 sym_state_bytes;
+		u32 digest_state_bytes;
+		u32 pin_blob_bytes;
+		u32 SPKI_bytes;
+		u32 priv_key_blob_bytes;
+		u32 sym_blob_bytes;
+		u32 max_payload_bytes;
+		u32 CP_profile_bytes;
+		u32 max_CP_index;
+	} __packed * pmqi = NULL;
+
+	rc = card_cache_fetch(card, info);
+	if (rc || verify) {
+		pmqi = kmalloc(sizeof(*pmqi), GFP_KERNEL);
+		if (!pmqi)
+			return -ENOMEM;
+		rc = ep11_query_info(card, AUTOSEL_DOM,
+				     0x01 /* module info query */,
+				     sizeof(*pmqi), (u8 *) pmqi);
+		if (rc) {
+			if (rc == -ENODEV)
+				card_cache_scrub(card);
+			goto out;
+		}
+		memset(info, 0, sizeof(*info));
+		info->API_ord_nr = pmqi->API_ord_nr;
+		info->FW_version =
+			(pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers;
+		memcpy(info->serial, pmqi->serial, sizeof(info->serial));
+		info->op_mode = pmqi->op_mode;
+		card_cache_update(card, info);
+	}
+
+out:
+	kfree(pmqi);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_get_card_info);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info)
+{
+	int rc;
+	struct ep11_domain_query_info {
+		u32 dom_index;
+		u8  cur_WK_VP[32];
+		u8  new_WK_VP[32];
+		u32 dom_flags;
+		u64 op_mode;
+	} __packed * p_dom_info;
+
+	p_dom_info = kmalloc(sizeof(*p_dom_info), GFP_KERNEL);
+	if (!p_dom_info)
+		return -ENOMEM;
+
+	rc = ep11_query_info(card, domain, 0x03 /* domain info query */,
+			     sizeof(*p_dom_info), (u8 *) p_dom_info);
+	if (rc)
+		goto out;
+
+	memset(info, 0, sizeof(*info));
+	info->cur_wk_state = '0';
+	info->new_wk_state = '0';
+	if (p_dom_info->dom_flags & 0x10 /* left imprint mode */) {
+		if (p_dom_info->dom_flags & 0x02 /* cur wk valid */) {
+			info->cur_wk_state = '1';
+			memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32);
+		}
+		if (p_dom_info->dom_flags & 0x04 /* new wk present */
+		    || p_dom_info->dom_flags & 0x08 /* new wk committed */) {
+			info->new_wk_state =
+				p_dom_info->dom_flags & 0x08 ? '2' : '1';
+			memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32);
+		}
+	}
+	info->op_mode = p_dom_info->op_mode;
+
+out:
+	kfree(p_dom_info);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_get_domain_info);
+
+/*
+ * Default EP11 AES key generate attributes, used when no keygenflags given:
+ * XCP_BLOB_ENCRYPT | XCP_BLOB_DECRYPT | XCP_BLOB_PROTKEY_EXTRACTABLE
+ */
+#define KEY_ATTR_DEFAULTS 0x00200c00
+
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+		   u8 *keybuf, size_t *keybufsize)
+{
+	struct keygen_req_pl {
+		struct pl_head head;
+		u8  var_tag;
+		u8  var_len;
+		u32 var;
+		u8  keybytes_tag;
+		u8  keybytes_len;
+		u32 keybytes;
+		u8  mech_tag;
+		u8  mech_len;
+		u32 mech;
+		u8  attr_tag;
+		u8  attr_len;
+		u32 attr_header;
+		u32 attr_bool_mask;
+		u32 attr_bool_bits;
+		u32 attr_val_len_type;
+		u32 attr_val_len_value;
+		u8  pin_tag;
+		u8  pin_len;
+	} __packed * req_pl;
+	struct keygen_rep_pl {
+		struct pl_head head;
+		u8  rc_tag;
+		u8  rc_len;
+		u32 rc;
+		u8  data_tag;
+		u8  data_lenfmt;
+		u16 data_len;
+		u8  data[512];
+	} __packed * rep_pl;
+	struct ep11_cprb *req = NULL, *rep = NULL;
+	struct ep11_target_dev target;
+	struct ep11_urb *urb = NULL;
+	struct ep11keyblob *kb;
+	int api, rc = -ENOMEM;
+
+	switch (keybitsize) {
+	case 128:
+	case 192:
+	case 256:
+		break;
+	default:
+		DEBUG_ERR(
+			"%s unknown/unsupported keybitsize %d\n",
+			__func__, keybitsize);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* request cprb and payload */
+	req = alloc_cprb(sizeof(struct keygen_req_pl));
+	if (!req)
+		goto out;
+	req_pl = (struct keygen_req_pl *) (((u8 *) req) + sizeof(*req));
+	api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
+	prep_head(&req_pl->head, sizeof(*req_pl), api, 21); /* GenerateKey */
+	req_pl->var_tag = 0x04;
+	req_pl->var_len = sizeof(u32);
+	req_pl->keybytes_tag = 0x04;
+	req_pl->keybytes_len = sizeof(u32);
+	req_pl->keybytes = keybitsize / 8;
+	req_pl->mech_tag = 0x04;
+	req_pl->mech_len = sizeof(u32);
+	req_pl->mech = 0x00001080; /* CKM_AES_KEY_GEN */
+	req_pl->attr_tag = 0x04;
+	req_pl->attr_len = 5 * sizeof(u32);
+	req_pl->attr_header = 0x10010000;
+	req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+	req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+	req_pl->attr_val_len_type = 0x00000161; /* CKA_VALUE_LEN */
+	req_pl->attr_val_len_value = keybitsize / 8;
+	req_pl->pin_tag = 0x04;
+
+	/* reply cprb and payload */
+	rep = alloc_cprb(sizeof(struct keygen_rep_pl));
+	if (!rep)
+		goto out;
+	rep_pl = (struct keygen_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+	/* urb and target */
+	urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+	if (!urb)
+		goto out;
+	target.ap_id = card;
+	target.dom_id = domain;
+	prep_urb(urb, &target, 1,
+		 req, sizeof(*req) + sizeof(*req_pl),
+		 rep, sizeof(*rep) + sizeof(*rep_pl));
+
+	rc = zcrypt_send_ep11_cprb(urb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+			__func__, (int) card, (int) domain, rc);
+		goto out;
+	}
+
+	rc = check_reply_pl((u8 *)rep_pl, __func__);
+	if (rc)
+		goto out;
+	if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+		DEBUG_ERR("%s unknown reply data format\n", __func__);
+		rc = -EIO;
+		goto out;
+	}
+	if (rep_pl->data_len > *keybufsize) {
+		DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
+			  __func__);
+		rc = -ENOSPC;
+		goto out;
+	}
+
+	/* copy key blob and set header values */
+	memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+	*keybufsize = rep_pl->data_len;
+	kb = (struct ep11keyblob *) keybuf;
+	kb->head.type = TOKTYPE_NON_CCA;
+	kb->head.len = rep_pl->data_len;
+	kb->head.version = TOKVER_EP11_AES;
+	kb->head.keybitlen = keybitsize;
+
+out:
+	kfree(req);
+	kfree(rep);
+	kfree(urb);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_genaeskey);
+
+static int ep11_cryptsingle(u16 card, u16 domain,
+			    u16 mode, u32 mech, const u8 *iv,
+			    const u8 *key, size_t keysize,
+			    const u8 *inbuf, size_t inbufsize,
+			    u8 *outbuf, size_t *outbufsize)
+{
+	struct crypt_req_pl {
+		struct pl_head head;
+		u8  var_tag;
+		u8  var_len;
+		u32 var;
+		u8  mech_tag;
+		u8  mech_len;
+		u32 mech;
+		/*
+		 * maybe followed by iv data
+		 * followed by key tag + key blob
+		 * followed by plaintext tag + plaintext
+		 */
+	} __packed * req_pl;
+	struct crypt_rep_pl {
+		struct pl_head head;
+		u8  rc_tag;
+		u8  rc_len;
+		u32 rc;
+		u8  data_tag;
+		u8  data_lenfmt;
+		/* data follows */
+	} __packed * rep_pl;
+	struct ep11_cprb *req = NULL, *rep = NULL;
+	struct ep11_target_dev target;
+	struct ep11_urb *urb = NULL;
+	size_t req_pl_size, rep_pl_size;
+	int n, api = 1, rc = -ENOMEM;
+	u8 *p;
+
+	/* the simple asn1 coding used has length limits */
+	if (keysize > 0xFFFF || inbufsize > 0xFFFF)
+		return -EINVAL;
+
+	/* request cprb and payload */
+	req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0)
+		+ ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize);
+	req = alloc_cprb(req_pl_size);
+	if (!req)
+		goto out;
+	req_pl = (struct crypt_req_pl *) (((u8 *) req) + sizeof(*req));
+	prep_head(&req_pl->head, req_pl_size, api, (mode ? 20 : 19));
+	req_pl->var_tag = 0x04;
+	req_pl->var_len = sizeof(u32);
+	/* mech is mech + mech params (iv here) */
+	req_pl->mech_tag = 0x04;
+	req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+	req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+	p = ((u8 *) req_pl) + sizeof(*req_pl);
+	if (iv) {
+		memcpy(p, iv, 16);
+		p += 16;
+	}
+	/* key and input data */
+	p += asn1tag_write(p, 0x04, key, keysize);
+	p += asn1tag_write(p, 0x04, inbuf, inbufsize);
+
+	/* reply cprb and payload, assume out data size <= in data size + 32 */
+	rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32);
+	rep = alloc_cprb(rep_pl_size);
+	if (!rep)
+		goto out;
+	rep_pl = (struct crypt_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+	/* urb and target */
+	urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+	if (!urb)
+		goto out;
+	target.ap_id = card;
+	target.dom_id = domain;
+	prep_urb(urb, &target, 1,
+		 req, sizeof(*req) + req_pl_size,
+		 rep, sizeof(*rep) + rep_pl_size);
+
+	rc = zcrypt_send_ep11_cprb(urb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+			__func__, (int) card, (int) domain, rc);
+		goto out;
+	}
+
+	rc = check_reply_pl((u8 *)rep_pl, __func__);
+	if (rc)
+		goto out;
+	if (rep_pl->data_tag != 0x04) {
+		DEBUG_ERR("%s unknown reply data format\n", __func__);
+		rc = -EIO;
+		goto out;
+	}
+	p = ((u8 *) rep_pl) + sizeof(*rep_pl);
+	if (rep_pl->data_lenfmt <= 127)
+		n = rep_pl->data_lenfmt;
+	else if (rep_pl->data_lenfmt == 0x81)
+		n = *p++;
+	else if (rep_pl->data_lenfmt == 0x82) {
+		n = *((u16 *) p);
+		p += 2;
+	} else {
+		DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n",
+			  __func__, rep_pl->data_lenfmt);
+		rc = -EIO;
+		goto out;
+	}
+	if (n > *outbufsize) {
+		DEBUG_ERR("%s mismatch reply data len %d / output buffer %zu\n",
+			  __func__, n, *outbufsize);
+		rc = -ENOSPC;
+		goto out;
+	}
+
+	memcpy(outbuf, p, n);
+	*outbufsize = n;
+
+out:
+	kfree(req);
+	kfree(rep);
+	kfree(urb);
+	return rc;
+}
+
+static int ep11_unwrapkey(u16 card, u16 domain,
+			  const u8 *kek, size_t keksize,
+			  const u8 *enckey, size_t enckeysize,
+			  u32 mech, const u8 *iv,
+			  u32 keybitsize, u32 keygenflags,
+			  u8 *keybuf, size_t *keybufsize)
+{
+	struct uw_req_pl {
+		struct pl_head head;
+		u8  attr_tag;
+		u8  attr_len;
+		u32 attr_header;
+		u32 attr_bool_mask;
+		u32 attr_bool_bits;
+		u32 attr_key_type;
+		u32 attr_key_type_value;
+		u32 attr_val_len;
+		u32 attr_val_len_value;
+		u8  mech_tag;
+		u8  mech_len;
+		u32 mech;
+		/*
+		 * maybe followed by iv data
+		 * followed by kek tag + kek blob
+		 * followed by empty mac tag
+		 * followed by empty pin tag
+		 * followed by encryted key tag + bytes
+		 */
+	} __packed * req_pl;
+	struct uw_rep_pl {
+		struct pl_head head;
+		u8  rc_tag;
+		u8  rc_len;
+		u32 rc;
+		u8  data_tag;
+		u8  data_lenfmt;
+		u16 data_len;
+		u8  data[512];
+	} __packed * rep_pl;
+	struct ep11_cprb *req = NULL, *rep = NULL;
+	struct ep11_target_dev target;
+	struct ep11_urb *urb = NULL;
+	struct ep11keyblob *kb;
+	size_t req_pl_size;
+	int api, rc = -ENOMEM;
+	u8 *p;
+
+	/* request cprb and payload */
+	req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0)
+		+ ASN1TAGLEN(keksize) + 4 + ASN1TAGLEN(enckeysize);
+	req = alloc_cprb(req_pl_size);
+	if (!req)
+		goto out;
+	req_pl = (struct uw_req_pl *) (((u8 *) req) + sizeof(*req));
+	api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
+	prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */
+	req_pl->attr_tag = 0x04;
+	req_pl->attr_len = 7 * sizeof(u32);
+	req_pl->attr_header = 0x10020000;
+	req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+	req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+	req_pl->attr_key_type = 0x00000100; /* CKA_KEY_TYPE */
+	req_pl->attr_key_type_value = 0x0000001f; /* CKK_AES */
+	req_pl->attr_val_len = 0x00000161; /* CKA_VALUE_LEN */
+	req_pl->attr_val_len_value = keybitsize / 8;
+	/* mech is mech + mech params (iv here) */
+	req_pl->mech_tag = 0x04;
+	req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+	req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+	p = ((u8 *) req_pl) + sizeof(*req_pl);
+	if (iv) {
+		memcpy(p, iv, 16);
+		p += 16;
+	}
+	/* kek */
+	p += asn1tag_write(p, 0x04, kek, keksize);
+	/* empty mac key tag */
+	*p++ = 0x04;
+	*p++ = 0;
+	/* empty pin tag */
+	*p++ = 0x04;
+	*p++ = 0;
+	/* encrypted key value tag and bytes */
+	p += asn1tag_write(p, 0x04, enckey, enckeysize);
+
+	/* reply cprb and payload */
+	rep = alloc_cprb(sizeof(struct uw_rep_pl));
+	if (!rep)
+		goto out;
+	rep_pl = (struct uw_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+	/* urb and target */
+	urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+	if (!urb)
+		goto out;
+	target.ap_id = card;
+	target.dom_id = domain;
+	prep_urb(urb, &target, 1,
+		 req, sizeof(*req) + req_pl_size,
+		 rep, sizeof(*rep) + sizeof(*rep_pl));
+
+	rc = zcrypt_send_ep11_cprb(urb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+			__func__, (int) card, (int) domain, rc);
+		goto out;
+	}
+
+	rc = check_reply_pl((u8 *)rep_pl, __func__);
+	if (rc)
+		goto out;
+	if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+		DEBUG_ERR("%s unknown reply data format\n", __func__);
+		rc = -EIO;
+		goto out;
+	}
+	if (rep_pl->data_len > *keybufsize) {
+		DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
+			  __func__);
+		rc = -ENOSPC;
+		goto out;
+	}
+
+	/* copy key blob and set header values */
+	memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+	*keybufsize = rep_pl->data_len;
+	kb = (struct ep11keyblob *) keybuf;
+	kb->head.type = TOKTYPE_NON_CCA;
+	kb->head.len = rep_pl->data_len;
+	kb->head.version = TOKVER_EP11_AES;
+	kb->head.keybitlen = keybitsize;
+
+out:
+	kfree(req);
+	kfree(rep);
+	kfree(urb);
+	return rc;
+}
+
+static int ep11_wrapkey(u16 card, u16 domain,
+			const u8 *key, size_t keysize,
+			u32 mech, const u8 *iv,
+			u8 *databuf, size_t *datasize)
+{
+	struct wk_req_pl {
+		struct pl_head head;
+		u8  var_tag;
+		u8  var_len;
+		u32 var;
+		u8  mech_tag;
+		u8  mech_len;
+		u32 mech;
+		/*
+		 * followed by iv data
+		 * followed by key tag + key blob
+		 * followed by dummy kek param
+		 * followed by dummy mac param
+		 */
+	} __packed * req_pl;
+	struct wk_rep_pl {
+		struct pl_head head;
+		u8  rc_tag;
+		u8  rc_len;
+		u32 rc;
+		u8  data_tag;
+		u8  data_lenfmt;
+		u16 data_len;
+		u8  data[1024];
+	} __packed * rep_pl;
+	struct ep11_cprb *req = NULL, *rep = NULL;
+	struct ep11_target_dev target;
+	struct ep11_urb *urb = NULL;
+	struct ep11keyblob *kb;
+	size_t req_pl_size;
+	int api, rc = -ENOMEM;
+	bool has_header = false;
+	u8 *p;
+
+	/* maybe the session field holds a header with key info */
+	kb = (struct ep11keyblob *) key;
+	if (kb->head.type == TOKTYPE_NON_CCA &&
+	    kb->head.version == TOKVER_EP11_AES) {
+		has_header = true;
+		keysize = kb->head.len < keysize ? kb->head.len : keysize;
+	}
+
+	/* request cprb and payload */
+	req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0)
+		+ ASN1TAGLEN(keysize) + 4;
+	req = alloc_cprb(req_pl_size);
+	if (!req)
+		goto out;
+	if (!mech || mech == 0x80060001)
+		req->flags |= 0x20; /* CPACF_WRAP needs special bit */
+	req_pl = (struct wk_req_pl *) (((u8 *) req) + sizeof(*req));
+	api = (!mech || mech == 0x80060001) ? 4 : 1; /* CKM_IBM_CPACF_WRAP */
+	prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */
+	req_pl->var_tag = 0x04;
+	req_pl->var_len = sizeof(u32);
+	/* mech is mech + mech params (iv here) */
+	req_pl->mech_tag = 0x04;
+	req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+	req_pl->mech = (mech ? mech : 0x80060001); /* CKM_IBM_CPACF_WRAP */
+	p = ((u8 *) req_pl) + sizeof(*req_pl);
+	if (iv) {
+		memcpy(p, iv, 16);
+		p += 16;
+	}
+	/* key blob */
+	p += asn1tag_write(p, 0x04, key, keysize);
+	/* maybe the key argument needs the head data cleaned out */
+	if (has_header) {
+		kb = (struct ep11keyblob *)(p - keysize);
+		memset(&kb->head, 0, sizeof(kb->head));
+	}
+	/* empty kek tag */
+	*p++ = 0x04;
+	*p++ = 0;
+	/* empty mac tag */
+	*p++ = 0x04;
+	*p++ = 0;
+
+	/* reply cprb and payload */
+	rep = alloc_cprb(sizeof(struct wk_rep_pl));
+	if (!rep)
+		goto out;
+	rep_pl = (struct wk_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+	/* urb and target */
+	urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+	if (!urb)
+		goto out;
+	target.ap_id = card;
+	target.dom_id = domain;
+	prep_urb(urb, &target, 1,
+		 req, sizeof(*req) + req_pl_size,
+		 rep, sizeof(*rep) + sizeof(*rep_pl));
+
+	rc = zcrypt_send_ep11_cprb(urb);
+	if (rc) {
+		DEBUG_ERR(
+			"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+			__func__, (int) card, (int) domain, rc);
+		goto out;
+	}
+
+	rc = check_reply_pl((u8 *)rep_pl, __func__);
+	if (rc)
+		goto out;
+	if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+		DEBUG_ERR("%s unknown reply data format\n", __func__);
+		rc = -EIO;
+		goto out;
+	}
+	if (rep_pl->data_len > *datasize) {
+		DEBUG_ERR("%s mismatch reply data len / data buffer len\n",
+			  __func__);
+		rc = -ENOSPC;
+		goto out;
+	}
+
+	/* copy the data from the cprb to the data buffer */
+	memcpy(databuf, rep_pl->data, rep_pl->data_len);
+	*datasize = rep_pl->data_len;
+
+out:
+	kfree(req);
+	kfree(rep);
+	kfree(urb);
+	return rc;
+}
+
+int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+		     const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
+{
+	int rc;
+	struct ep11keyblob *kb;
+	u8 encbuf[64], *kek = NULL;
+	size_t clrkeylen, keklen, encbuflen = sizeof(encbuf);
+
+	if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256)
+		clrkeylen = keybitsize / 8;
+	else {
+		DEBUG_ERR(
+			"%s unknown/unsupported keybitsize %d\n",
+			__func__, keybitsize);
+		return -EINVAL;
+	}
+
+	/* allocate memory for the temp kek */
+	keklen = MAXEP11AESKEYBLOBSIZE;
+	kek = kmalloc(keklen, GFP_ATOMIC);
+	if (!kek) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* Step 1: generate AES 256 bit random kek key */
+	rc = ep11_genaeskey(card, domain, 256,
+			    0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
+			    kek, &keklen);
+	if (rc) {
+		DEBUG_ERR(
+			"%s generate kek key failed, rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+	kb = (struct ep11keyblob *) kek;
+	memset(&kb->head, 0, sizeof(kb->head));
+
+	/* Step 2: encrypt clear key value with the kek key */
+	rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
+			      clrkey, clrkeylen, encbuf, &encbuflen);
+	if (rc) {
+		DEBUG_ERR(
+			"%s encrypting key value with kek key failed, rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+
+	/* Step 3: import the encrypted key value as a new key */
+	rc = ep11_unwrapkey(card, domain, kek, keklen,
+			    encbuf, encbuflen, 0, def_iv,
+			    keybitsize, 0, keybuf, keybufsize);
+	if (rc) {
+		DEBUG_ERR(
+			"%s importing key value as new key failed,, rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+
+out:
+	kfree(kek);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_clr2keyblob);
+
+int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
+		       u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+	int rc = -EIO;
+	u8 *wkbuf = NULL;
+	size_t wkbuflen, keylen;
+	struct wk_info {
+		u16 version;
+		u8  res1[16];
+		u32 pkeytype;
+		u32 pkeybitsize;
+		u64 pkeysize;
+		u8  res2[8];
+		u8  pkey[0];
+	} __packed * wki;
+	const u8 *key;
+	struct ep11kblob_header *hdr;
+
+	/* key with or without header ? */
+	hdr = (struct ep11kblob_header *) keyblob;
+	if (hdr->type == TOKTYPE_NON_CCA
+	    && (hdr->version == TOKVER_EP11_AES_WITH_HEADER
+		|| hdr->version == TOKVER_EP11_ECC_WITH_HEADER)
+	    && is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) {
+		/* EP11 AES or ECC key with header */
+		key = keyblob + sizeof(struct ep11kblob_header);
+		keylen = hdr->len - sizeof(struct ep11kblob_header);
+	} else if (hdr->type == TOKTYPE_NON_CCA
+		   && hdr->version == TOKVER_EP11_AES
+		   && is_ep11_keyblob(keyblob)) {
+		/* EP11 AES key (old style) */
+		key = keyblob;
+		keylen = hdr->len;
+	} else if (is_ep11_keyblob(keyblob)) {
+		/* raw EP11 key blob */
+		key = keyblob;
+		keylen = keybloblen;
+	} else
+		return -EINVAL;
+
+	/* alloc temp working buffer */
+	wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1));
+	wkbuf = kmalloc(wkbuflen, GFP_ATOMIC);
+	if (!wkbuf)
+		return -ENOMEM;
+
+	/* ep11 secure key -> protected key + info */
+	rc = ep11_wrapkey(card, dom, key, keylen,
+			  0, def_iv, wkbuf, &wkbuflen);
+	if (rc) {
+		DEBUG_ERR(
+			"%s rewrapping ep11 key to pkey failed, rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+	wki = (struct wk_info *) wkbuf;
+
+	/* check struct version and pkey type */
+	if (wki->version != 1 || wki->pkeytype < 1 || wki->pkeytype > 5) {
+		DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
+			  __func__, (int) wki->version, (int) wki->pkeytype);
+		rc = -EIO;
+		goto out;
+	}
+
+	/* check protected key type field */
+	switch (wki->pkeytype) {
+	case 1: /* AES */
+		switch (wki->pkeysize) {
+		case 16+32:
+			/* AES 128 protected key */
+			if (protkeytype)
+				*protkeytype = PKEY_KEYTYPE_AES_128;
+			break;
+		case 24+32:
+			/* AES 192 protected key */
+			if (protkeytype)
+				*protkeytype = PKEY_KEYTYPE_AES_192;
+			break;
+		case 32+32:
+			/* AES 256 protected key */
+			if (protkeytype)
+				*protkeytype = PKEY_KEYTYPE_AES_256;
+			break;
+		default:
+			DEBUG_ERR("%s unknown/unsupported AES pkeysize %d\n",
+				  __func__, (int) wki->pkeysize);
+			rc = -EIO;
+			goto out;
+		}
+		break;
+	case 3: /* EC-P */
+	case 4: /* EC-ED */
+	case 5: /* EC-BP */
+		if (protkeytype)
+			*protkeytype = PKEY_KEYTYPE_ECC;
+		break;
+	case 2: /* TDES */
+	default:
+		DEBUG_ERR("%s unknown/unsupported key type %d\n",
+			  __func__, (int) wki->pkeytype);
+		rc = -EIO;
+		goto out;
+	}
+
+	/* copy the tanslated protected key */
+	if (wki->pkeysize > *protkeylen) {
+		DEBUG_ERR("%s wk info pkeysize %llu > protkeysize %u\n",
+			  __func__, wki->pkeysize, *protkeylen);
+		rc = -EINVAL;
+		goto out;
+	}
+	memcpy(protkey, wki->pkey, wki->pkeysize);
+	*protkeylen = wki->pkeysize;
+
+out:
+	kfree(wkbuf);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_kblob2protkey);
+
+int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+		   int minhwtype, int minapi, const u8 *wkvp)
+{
+	struct zcrypt_device_status_ext *device_status;
+	u32 *_apqns = NULL, _nr_apqns = 0;
+	int i, card, dom, rc = -ENOMEM;
+	struct ep11_domain_info edi;
+	struct ep11_card_info eci;
+
+	/* fetch status of all crypto cards */
+	device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+				       sizeof(struct zcrypt_device_status_ext),
+				       GFP_KERNEL);
+	if (!device_status)
+		return -ENOMEM;
+	zcrypt_device_status_mask_ext(device_status);
+
+	/* allocate 1k space for up to 256 apqns */
+	_apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+	if (!_apqns) {
+		kvfree(device_status);
+		return -ENOMEM;
+	}
+
+	/* walk through all the crypto apqnss */
+	for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+		card = AP_QID_CARD(device_status[i].qid);
+		dom = AP_QID_QUEUE(device_status[i].qid);
+		/* check online state */
+		if (!device_status[i].online)
+			continue;
+		/* check for ep11 functions */
+		if (!(device_status[i].functions & 0x01))
+			continue;
+		/* check cardnr */
+		if (cardnr != 0xFFFF && card != cardnr)
+			continue;
+		/* check domain */
+		if (domain != 0xFFFF && dom != domain)
+			continue;
+		/* check min hardware type */
+		if (minhwtype && device_status[i].hwtype < minhwtype)
+			continue;
+		/* check min api version if given */
+		if (minapi > 0) {
+			if (ep11_get_card_info(card, &eci, 0))
+				continue;
+			if (minapi > eci.API_ord_nr)
+				continue;
+		}
+		/* check wkvp if given */
+		if (wkvp) {
+			if (ep11_get_domain_info(card, dom, &edi))
+				continue;
+			if (edi.cur_wk_state != '1')
+				continue;
+			if (memcmp(wkvp, edi.cur_wkvp, 16))
+				continue;
+		}
+		/* apqn passed all filtering criterons, add to the array */
+		if (_nr_apqns < 256)
+			_apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom);
+	}
+
+	/* nothing found ? */
+	if (!_nr_apqns) {
+		kfree(_apqns);
+		rc = -ENODEV;
+	} else {
+		/* no re-allocation, simple return the _apqns array */
+		*apqns = _apqns;
+		*nr_apqns = _nr_apqns;
+		rc = 0;
+	}
+
+	kvfree(device_status);
+	return rc;
+}
+EXPORT_SYMBOL(ep11_findcard2);
+
+void __exit zcrypt_ep11misc_exit(void)
+{
+	card_cache_free();
+}
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
new file mode 100644
index 0000000..1e02b19
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ep11misc.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ *  Copyright IBM Corp. 2019
+ *  Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ *  Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#ifndef _ZCRYPT_EP11MISC_H_
+#define _ZCRYPT_EP11MISC_H_
+
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#define EP11_API_V 4  /* highest known and supported EP11 API version */
+#define EP11_STRUCT_MAGIC 0x1234
+#define EP11_BLOB_PKEY_EXTRACTABLE 0x00200000
+
+/*
+ * Internal used values for the version field of the key header.
+ * Should match to the enum pkey_key_type in pkey.h.
+ */
+#define TOKVER_EP11_AES  0x03  /* EP11 AES key blob (old style) */
+#define TOKVER_EP11_AES_WITH_HEADER 0x06 /* EP11 AES key blob with header */
+#define TOKVER_EP11_ECC_WITH_HEADER 0x07 /* EP11 ECC key blob with header */
+
+/* inside view of an EP11 secure key blob */
+struct ep11keyblob {
+	union {
+		u8 session[32];
+		/* only used for PKEY_TYPE_EP11: */
+		struct {
+			u8  type;      /* 0x00 (TOKTYPE_NON_CCA) */
+			u8  res0;      /* unused */
+			u16 len;       /* total length in bytes of this blob */
+			u8  version;   /* 0x03 (TOKVER_EP11_AES) */
+			u8  res1;      /* unused */
+			u16 keybitlen; /* clear key bit len, 0 for unknown */
+		} head;
+	};
+	u8  wkvp[16];  /* wrapping key verification pattern */
+	u64 attr;      /* boolean key attributes */
+	u64 mode;      /* mode bits */
+	u16 version;   /* 0x1234, EP11_STRUCT_MAGIC */
+	u8  iv[14];
+	u8  encrypted_key_data[144];
+	u8  mac[32];
+} __packed;
+
+/* check ep11 key magic to find out if this is an ep11 key blob */
+static inline bool is_ep11_keyblob(const u8 *key)
+{
+	struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+	return (kb->version == EP11_STRUCT_MAGIC);
+}
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with header.
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
+				const u8 *key, size_t keylen, int checkcpacfexp);
+
+/*
+ * Simple check if the key blob is a valid EP11 ECC key blob with header.
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
+				const u8 *key, size_t keylen, int checkcpacfexp);
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with
+ * the header in the session field (old style EP11 AES key).
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
+		       const u8 *key, size_t keylen, int checkcpacfexp);
+
+/* EP11 card info struct */
+struct ep11_card_info {
+	u32  API_ord_nr;    /* API ordinal number */
+	u16  FW_version;    /* Firmware major and minor version */
+	char serial[16];    /* serial number string (16 ascii, no 0x00 !) */
+	u64  op_mode;	    /* card operational mode(s) */
+};
+
+/* EP11 domain info struct */
+struct ep11_domain_info {
+	char cur_wk_state;  /* '0' invalid, '1' valid */
+	char new_wk_state;  /* '0' empty, '1' uncommitted, '2' committed */
+	u8   cur_wkvp[32];  /* current wrapping key verification pattern */
+	u8   new_wkvp[32];  /* new wrapping key verification pattern */
+	u64  op_mode;	    /* domain operational mode(s) */
+};
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info);
+
+/*
+ * Generate (random) EP11 AES secure key.
+ */
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+		   u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Generate EP11 AES secure key with given clear key value.
+ */
+int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+		     const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Build a list of ep11 apqns meeting the following constrains:
+ * - apqn is online and is in fact an EP11 apqn
+ * - if cardnr is not FFFF only apqns with this cardnr
+ * - if domain is not FFFF only apqns with this domainnr
+ * - if minhwtype > 0 only apqns with hwtype >= minhwtype
+ * - if minapi > 0 only apqns with API_ord_nr >= minapi
+ * - if wkvp != NULL only apqns where the wkvp (EP11_WKVPLEN bytes) matches
+ *   to the first EP11_WKVPLEN bytes of the wkvp of the current wrapping
+ *   key for this domain. When a wkvp is given there will aways be a re-fetch
+ *   of the domain info for the potential apqn - so this triggers an request
+ *   reply to each apqn eligible.
+ * The array of apqn entries is allocated with kmalloc and returned in *apqns;
+ * the number of apqns stored into the list is returned in *nr_apqns. One apqn
+ * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
+ * may be casted to struct pkey_apqn. The return value is either 0 for success
+ * or a negative errno value. If no apqn meeting the criterias is found,
+ * -ENODEV is returned.
+ */
+int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+		   int minhwtype, int minapi, const u8 *wkvp);
+
+/*
+ * Derive proteced key from EP11 key blob (AES and ECC keys).
+ */
+int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, size_t keylen,
+		       u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+void zcrypt_ep11misc_exit(void);
+
+#endif /* _ZCRYPT_EP11MISC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 4f4dd9d..39e626e 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -52,7 +52,6 @@
 #define REP82_ERROR_INVALID_COMMAND	    0x30
 #define REP82_ERROR_MALFORMED_MSG	    0x40
 #define REP82_ERROR_INVALID_SPECIAL_CMD	    0x41
-#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
 #define REP82_ERROR_RESERVED_FIELDO	    0x50 /* old value	*/
 #define REP82_ERROR_WORD_ALIGNMENT	    0x60
 #define REP82_ERROR_MESSAGE_LENGTH	    0x80
@@ -67,7 +66,6 @@
 #define REP82_ERROR_ZERO_BUFFER_LEN	    0xB0
 
 #define REP88_ERROR_MODULE_FAILURE	    0x10
-
 #define REP88_ERROR_MESSAGE_TYPE	    0x20
 #define REP88_ERROR_MESSAGE_MALFORMD	    0x22
 #define REP88_ERROR_MESSAGE_LENGTH	    0x23
@@ -80,83 +78,61 @@
 static inline int convert_error(struct zcrypt_queue *zq,
 				struct ap_message *reply)
 {
-	struct error_hdr *ehdr = reply->message;
+	struct error_hdr *ehdr = reply->msg;
 	int card = AP_QID_CARD(zq->queue->qid);
 	int queue = AP_QID_QUEUE(zq->queue->qid);
 
 	switch (ehdr->reply_code) {
-	case REP82_ERROR_OPERAND_INVALID:
-	case REP82_ERROR_OPERAND_SIZE:
-	case REP82_ERROR_EVEN_MOD_IN_OPND:
-	case REP88_ERROR_MESSAGE_MALFORMD:
-	case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
-	case REP82_ERROR_INVALID_DOMAIN_PENDING:
-	case REP82_ERROR_INVALID_SPECIAL_CMD:
-	case REP82_ERROR_FILTERED_BY_HYPERVISOR:
-	//   REP88_ERROR_INVALID_KEY		// '82' CEX2A
-	//   REP88_ERROR_OPERAND		// '84' CEX2A
-	//   REP88_ERROR_OPERAND_EVEN_MOD	// '85' CEX2A
-		/* Invalid input data. */
+	case REP82_ERROR_INVALID_MSG_LEN:	 /* 0x23 */
+	case REP82_ERROR_RESERVD_FIELD:		 /* 0x24 */
+	case REP82_ERROR_FORMAT_FIELD:		 /* 0x29 */
+	case REP82_ERROR_MALFORMED_MSG:		 /* 0x40 */
+	case REP82_ERROR_INVALID_SPECIAL_CMD:	 /* 0x41 */
+	case REP82_ERROR_MESSAGE_LENGTH:	 /* 0x80 */
+	case REP82_ERROR_OPERAND_INVALID:	 /* 0x82 */
+	case REP82_ERROR_OPERAND_SIZE:		 /* 0x84 */
+	case REP82_ERROR_EVEN_MOD_IN_OPND:	 /* 0x85 */
+	case REP82_ERROR_INVALID_DOMAIN_PENDING: /* 0x8A */
+	case REP82_ERROR_FILTERED_BY_HYPERVISOR: /* 0x8B */
+	case REP82_ERROR_PACKET_TRUNCATED:	 /* 0xA0 */
+	case REP88_ERROR_MESSAGE_MALFORMD:	 /* 0x22 */
+	case REP88_ERROR_KEY_TYPE:		 /* 0x34 */
+		/* RY indicates malformed request */
 		ZCRYPT_DBF(DBF_WARN,
-			   "device=%02x.%04x reply=0x%02x => rc=EINVAL\n",
+			   "dev=%02x.%04x RY=0x%02x => rc=EINVAL\n",
 			   card, queue, ehdr->reply_code);
 		return -EINVAL;
-	case REP82_ERROR_MESSAGE_TYPE:
-	//   REP88_ERROR_MESSAGE_TYPE		// '20' CEX2A
+	case REP82_ERROR_MACHINE_FAILURE:	 /* 0x10 */
+	case REP82_ERROR_MESSAGE_TYPE:		 /* 0x20 */
+	case REP82_ERROR_TRANSPORT_FAIL:	 /* 0x90 */
 		/*
-		 * To sent a message of the wrong type is a bug in the
-		 * device driver. Send error msg, disable the device
-		 * and then repeat the request.
+		 * Msg to wrong type or card/infrastructure failure.
+		 * Trigger rescan of the ap bus, trigger retry request.
 		 */
 		atomic_set(&zcrypt_rescan_req, 1);
-		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
-		       card, queue);
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
-			   card, queue, ehdr->reply_code);
-		return -EAGAIN;
-	case REP82_ERROR_TRANSPORT_FAIL:
-		/* Card or infrastructure failure, disable card */
-		atomic_set(&zcrypt_rescan_req, 1);
-		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
-		       card, queue);
 		/* For type 86 response show the apfs value (failure reason) */
-		if (ehdr->type == TYPE86_RSP_CODE) {
+		if (ehdr->reply_code == REP82_ERROR_TRANSPORT_FAIL &&
+		    ehdr->type == TYPE86_RSP_CODE) {
 			struct {
 				struct type86_hdr hdr;
 				struct type86_fmt2_ext fmt2;
-			} __packed * head = reply->message;
+			} __packed * head = reply->msg;
 			unsigned int apfs = *((u32 *)head->fmt2.apfs);
 
-			ZCRYPT_DBF(DBF_ERR,
-				   "device=%02x.%04x reply=0x%02x apfs=0x%x => online=0 rc=EAGAIN\n",
-				   card, queue, apfs, ehdr->reply_code);
+			ZCRYPT_DBF(DBF_WARN,
+				   "dev=%02x.%04x RY=0x%02x apfs=0x%x => bus rescan, rc=EAGAIN\n",
+				   card, queue, ehdr->reply_code, apfs);
 		} else
-			ZCRYPT_DBF(DBF_ERR,
-				   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+			ZCRYPT_DBF(DBF_WARN,
+				   "dev=%02x.%04x RY=0x%02x => bus rescan, rc=EAGAIN\n",
 				   card, queue, ehdr->reply_code);
 		return -EAGAIN;
-	case REP82_ERROR_MACHINE_FAILURE:
-	//   REP88_ERROR_MODULE_FAILURE		// '10' CEX2A
-		/* If a card fails disable it and repeat the request. */
-		atomic_set(&zcrypt_rescan_req, 1);
-		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
-		       card, queue);
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+	default:
+		/* Assume request is valid and a retry will be worth it */
+		ZCRYPT_DBF(DBF_WARN,
+			   "dev=%02x.%04x RY=0x%02x => rc=EAGAIN\n",
 			   card, queue, ehdr->reply_code);
 		return -EAGAIN;
-	default:
-		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
-		       card, queue);
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
-			   card, queue, ehdr->reply_code);
-		return -EAGAIN;	/* repeat the request on a different device. */
 	}
 }
 
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index fc4295b..bf14ee4 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -207,10 +207,10 @@
 	mod_len = mex->inputdatalength;
 
 	if (mod_len <= 128) {
-		struct type50_meb1_msg *meb1 = ap_msg->message;
+		struct type50_meb1_msg *meb1 = ap_msg->msg;
 
 		memset(meb1, 0, sizeof(*meb1));
-		ap_msg->length = sizeof(*meb1);
+		ap_msg->len = sizeof(*meb1);
 		meb1->header.msg_type_code = TYPE50_TYPE_CODE;
 		meb1->header.msg_len = sizeof(*meb1);
 		meb1->keyblock_type = TYPE50_MEB1_FMT;
@@ -218,10 +218,10 @@
 		exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
 		inp = meb1->message + sizeof(meb1->message) - mod_len;
 	} else if (mod_len <= 256) {
-		struct type50_meb2_msg *meb2 = ap_msg->message;
+		struct type50_meb2_msg *meb2 = ap_msg->msg;
 
 		memset(meb2, 0, sizeof(*meb2));
-		ap_msg->length = sizeof(*meb2);
+		ap_msg->len = sizeof(*meb2);
 		meb2->header.msg_type_code = TYPE50_TYPE_CODE;
 		meb2->header.msg_len = sizeof(*meb2);
 		meb2->keyblock_type = TYPE50_MEB2_FMT;
@@ -229,10 +229,10 @@
 		exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
 		inp = meb2->message + sizeof(meb2->message) - mod_len;
 	} else if (mod_len <= 512) {
-		struct type50_meb3_msg *meb3 = ap_msg->message;
+		struct type50_meb3_msg *meb3 = ap_msg->msg;
 
 		memset(meb3, 0, sizeof(*meb3));
-		ap_msg->length = sizeof(*meb3);
+		ap_msg->len = sizeof(*meb3);
 		meb3->header.msg_type_code = TYPE50_TYPE_CODE;
 		meb3->header.msg_len = sizeof(*meb3);
 		meb3->keyblock_type = TYPE50_MEB3_FMT;
@@ -246,6 +246,12 @@
 	    copy_from_user(exp, mex->b_key, mod_len) ||
 	    copy_from_user(inp, mex->inputdata, mod_len))
 		return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
+		ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
+#endif
+
 	return 0;
 }
 
@@ -275,10 +281,10 @@
 	 * 512 byte modulus (4k keys).
 	 */
 	if (mod_len <= 128) {		/* up to 1024 bit key size */
-		struct type50_crb1_msg *crb1 = ap_msg->message;
+		struct type50_crb1_msg *crb1 = ap_msg->msg;
 
 		memset(crb1, 0, sizeof(*crb1));
-		ap_msg->length = sizeof(*crb1);
+		ap_msg->len = sizeof(*crb1);
 		crb1->header.msg_type_code = TYPE50_TYPE_CODE;
 		crb1->header.msg_len = sizeof(*crb1);
 		crb1->keyblock_type = TYPE50_CRB1_FMT;
@@ -289,10 +295,10 @@
 		u = crb1->u + sizeof(crb1->u) - short_len;
 		inp = crb1->message + sizeof(crb1->message) - mod_len;
 	} else if (mod_len <= 256) {	/* up to 2048 bit key size */
-		struct type50_crb2_msg *crb2 = ap_msg->message;
+		struct type50_crb2_msg *crb2 = ap_msg->msg;
 
 		memset(crb2, 0, sizeof(*crb2));
-		ap_msg->length = sizeof(*crb2);
+		ap_msg->len = sizeof(*crb2);
 		crb2->header.msg_type_code = TYPE50_TYPE_CODE;
 		crb2->header.msg_len = sizeof(*crb2);
 		crb2->keyblock_type = TYPE50_CRB2_FMT;
@@ -304,10 +310,10 @@
 		inp = crb2->message + sizeof(crb2->message) - mod_len;
 	} else if ((mod_len <= 512) &&	/* up to 4096 bit key size */
 		   (zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) {
-		struct type50_crb3_msg *crb3 = ap_msg->message;
+		struct type50_crb3_msg *crb3 = ap_msg->msg;
 
 		memset(crb3, 0, sizeof(*crb3));
-		ap_msg->length = sizeof(*crb3);
+		ap_msg->len = sizeof(*crb3);
 		crb3->header.msg_type_code = TYPE50_TYPE_CODE;
 		crb3->header.msg_len = sizeof(*crb3);
 		crb3->keyblock_type = TYPE50_CRB3_FMT;
@@ -332,6 +338,11 @@
 	    copy_from_user(inp, crt->inputdata, mod_len))
 		return -EFAULT;
 
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
+		ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
+#endif
+
 	return 0;
 }
 
@@ -350,39 +361,39 @@
 			  char __user *outputdata,
 			  unsigned int outputdatalength)
 {
-	struct type80_hdr *t80h = reply->message;
+	struct type80_hdr *t80h = reply->msg;
 	unsigned char *data;
 
 	if (t80h->len < sizeof(*t80h) + outputdatalength) {
 		/* The result is too short, the CEXxA card may not do that.. */
 		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		pr_err("Crypto dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
 		       AP_QID_CARD(zq->queue->qid),
-		       AP_QID_QUEUE(zq->queue->qid));
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
-			   AP_QID_CARD(zq->queue->qid),
-			   AP_QID_QUEUE(zq->queue->qid),
-			   t80h->code);
-		return -EAGAIN;	/* repeat the request on a different device. */
+		       AP_QID_QUEUE(zq->queue->qid),
+		       t80h->code);
+		ZCRYPT_DBF_ERR("dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
+			       AP_QID_CARD(zq->queue->qid),
+			       AP_QID_QUEUE(zq->queue->qid),
+			       t80h->code);
+		return -EAGAIN;
 	}
 	if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
 		BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
 	else
 		BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
-	data = reply->message + t80h->len - outputdatalength;
+	data = reply->msg + t80h->len - outputdatalength;
 	if (copy_to_user(outputdata, data, outputdatalength))
 		return -EFAULT;
 	return 0;
 }
 
-static int convert_response(struct zcrypt_queue *zq,
-			    struct ap_message *reply,
-			    char __user *outputdata,
-			    unsigned int outputdatalength)
+static int convert_response_cex2a(struct zcrypt_queue *zq,
+				  struct ap_message *reply,
+				  char __user *outputdata,
+				  unsigned int outputdatalength)
 {
 	/* Response type byte is the second byte in the response. */
-	unsigned char rtype = ((unsigned char *) reply->message)[1];
+	unsigned char rtype = ((unsigned char *) reply->msg)[1];
 
 	switch (rtype) {
 	case TYPE82_RSP_CODE:
@@ -393,15 +404,15 @@
 				      outputdata, outputdatalength);
 	default: /* Unknown response type, this should NEVER EVER happen */
 		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
 		       AP_QID_CARD(zq->queue->qid),
-		       AP_QID_QUEUE(zq->queue->qid));
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
-			   AP_QID_CARD(zq->queue->qid),
-			   AP_QID_QUEUE(zq->queue->qid),
-			   (unsigned int) rtype);
-		return -EAGAIN;	/* repeat the request on a different device. */
+		       AP_QID_QUEUE(zq->queue->qid),
+		       (int) rtype);
+		ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+			       AP_QID_CARD(zq->queue->qid),
+			       AP_QID_QUEUE(zq->queue->qid),
+			       (int) rtype);
+		return -EAGAIN;
 	}
 }
 
@@ -422,22 +433,20 @@
 		.reply_code = REP82_ERROR_MACHINE_FAILURE,
 	};
 	struct type80_hdr *t80h;
-	int length;
+	int len;
 
 	/* Copy the reply message to the request message buffer. */
 	if (!reply)
 		goto out;	/* ap_msg->rc indicates the error */
-	t80h = reply->message;
+	t80h = reply->msg;
 	if (t80h->type == TYPE80_RSP_CODE) {
 		if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A)
-			length = min_t(int,
-				       CEX2A_MAX_RESPONSE_SIZE, t80h->len);
+			len = min_t(int, CEX2A_MAX_RESPONSE_SIZE, t80h->len);
 		else
-			length = min_t(int,
-				       CEX3A_MAX_RESPONSE_SIZE, t80h->len);
-		memcpy(msg->message, reply->message, length);
+			len = min_t(int, CEX3A_MAX_RESPONSE_SIZE, t80h->len);
+		memcpy(msg->msg, reply->msg, len);
 	} else
-		memcpy(msg->message, reply->message, sizeof(error_reply));
+		memcpy(msg->msg, reply->msg, sizeof(error_reply));
 out:
 	complete((struct completion *) msg->private);
 }
@@ -452,41 +461,41 @@
  * @mex: pointer to the modexpo request buffer
  */
 static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
-				 struct ica_rsa_modexpo *mex)
+				 struct ica_rsa_modexpo *mex,
+				 struct ap_message *ap_msg)
 {
-	struct ap_message ap_msg;
 	struct completion work;
 	int rc;
 
-	ap_init_message(&ap_msg);
 	if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
-		ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
-					 GFP_KERNEL);
+		ap_msg->msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL);
 	else
-		ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
-					 GFP_KERNEL);
-	if (!ap_msg.message)
+		ap_msg->msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL);
+	if (!ap_msg->msg)
 		return -ENOMEM;
-	ap_msg.receive = zcrypt_cex2a_receive;
-	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
-				atomic_inc_return(&zcrypt_step);
-	ap_msg.private = &work;
-	rc = ICAMEX_msg_to_type50MEX_msg(zq, &ap_msg, mex);
+	ap_msg->receive = zcrypt_cex2a_receive;
+	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+		atomic_inc_return(&zcrypt_step);
+	ap_msg->private = &work;
+	rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex);
 	if (rc)
-		goto out_free;
+		goto out;
 	init_completion(&work);
-	ap_queue_message(zq->queue, &ap_msg);
+	rc = ap_queue_message(zq->queue, ap_msg);
+	if (rc)
+		goto out;
 	rc = wait_for_completion_interruptible(&work);
 	if (rc == 0) {
-		rc = ap_msg.rc;
+		rc = ap_msg->rc;
 		if (rc == 0)
-			rc = convert_response(zq, &ap_msg, mex->outputdata,
-					      mex->outputdatalength);
+			rc = convert_response_cex2a(zq, ap_msg,
+						    mex->outputdata,
+						    mex->outputdatalength);
 	} else
 		/* Signal pending. */
-		ap_cancel_message(zq->queue, &ap_msg);
-out_free:
-	kfree(ap_msg.message);
+		ap_cancel_message(zq->queue, ap_msg);
+out:
+	ap_msg->private = NULL;
 	return rc;
 }
 
@@ -498,41 +507,41 @@
  * @crt: pointer to the modexpoc_crt request buffer
  */
 static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
-				     struct ica_rsa_modexpo_crt *crt)
+				     struct ica_rsa_modexpo_crt *crt,
+				     struct ap_message *ap_msg)
 {
-	struct ap_message ap_msg;
 	struct completion work;
 	int rc;
 
-	ap_init_message(&ap_msg);
 	if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
-		ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
-					 GFP_KERNEL);
+		ap_msg->msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL);
 	else
-		ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
-					 GFP_KERNEL);
-	if (!ap_msg.message)
+		ap_msg->msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL);
+	if (!ap_msg->msg)
 		return -ENOMEM;
-	ap_msg.receive = zcrypt_cex2a_receive;
-	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
-				atomic_inc_return(&zcrypt_step);
-	ap_msg.private = &work;
-	rc = ICACRT_msg_to_type50CRT_msg(zq, &ap_msg, crt);
+	ap_msg->receive = zcrypt_cex2a_receive;
+	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+		atomic_inc_return(&zcrypt_step);
+	ap_msg->private = &work;
+	rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt);
 	if (rc)
-		goto out_free;
+		goto out;
 	init_completion(&work);
-	ap_queue_message(zq->queue, &ap_msg);
+	rc = ap_queue_message(zq->queue, ap_msg);
+	if (rc)
+		goto out;
 	rc = wait_for_completion_interruptible(&work);
 	if (rc == 0) {
-		rc = ap_msg.rc;
+		rc = ap_msg->rc;
 		if (rc == 0)
-			rc = convert_response(zq, &ap_msg, crt->outputdata,
-					      crt->outputdatalength);
+			rc = convert_response_cex2a(zq, ap_msg,
+						    crt->outputdata,
+						    crt->outputdatalength);
 	} else
 		/* Signal pending. */
-		ap_cancel_message(zq->queue, &ap_msg);
-out_free:
-	kfree(ap_msg.message);
+		ap_cancel_message(zq->queue, ap_msg);
+out:
+	ap_msg->private = NULL;
 	return rc;
 }
 
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index a36251d..307f906 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -266,7 +266,7 @@
 		struct function_and_rules_block fr;
 		unsigned short length;
 		char text[0];
-	} __packed * msg = ap_msg->message;
+	} __packed * msg = ap_msg->msg;
 	int size;
 
 	/*
@@ -301,7 +301,7 @@
 
 	msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
 
-	ap_msg->length = size;
+	ap_msg->len = size;
 	return 0;
 }
 
@@ -336,7 +336,7 @@
 		struct function_and_rules_block fr;
 		unsigned short length;
 		char text[0];
-	} __packed * msg = ap_msg->message;
+	} __packed * msg = ap_msg->msg;
 	int size;
 
 	/*
@@ -370,7 +370,7 @@
 
 	msg->fr = static_pkd_fnr;
 
-	ap_msg->length = size;
+	ap_msg->len = size;
 	return 0;
 }
 
@@ -388,7 +388,7 @@
 	struct type86_fmt2_ext fmt2;
 } __packed;
 
-static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
+static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg,
 				      struct ica_xcRB *xcRB,
 				      unsigned int *fcode,
 				      unsigned short **dom)
@@ -400,11 +400,11 @@
 	struct {
 		struct type6_hdr hdr;
 		struct CPRBX cprbx;
-	} __packed * msg = ap_msg->message;
+	} __packed * msg = ap_msg->msg;
 
 	int rcblen = CEIL4(xcRB->request_control_blk_length);
 	int replylen, req_sumlen, resp_sumlen;
-	char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
+	char *req_data = ap_msg->msg + sizeof(struct type6_hdr) + rcblen;
 	char *function_code;
 
 	if (CEIL4(xcRB->request_control_blk_length) <
@@ -412,10 +412,10 @@
 		return -EINVAL; /* overflow after alignment*/
 
 	/* length checks */
-	ap_msg->length = sizeof(struct type6_hdr) +
+	ap_msg->len = sizeof(struct type6_hdr) +
 		CEIL4(xcRB->request_control_blk_length) +
 		xcRB->request_data_length;
-	if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE)
+	if (ap_msg->len > MSGTYPE06_MAX_MSG_SIZE)
 		return -EINVAL;
 
 	/*
@@ -465,8 +465,8 @@
 	msg->hdr.FromCardLen2 = xcRB->reply_data_length;
 
 	/* prepare CPRB */
-	if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
-		    xcRB->request_control_blk_length))
+	if (z_copy_from_user(userspace, &(msg->cprbx), xcRB->request_control_blk_addr,
+			     xcRB->request_control_blk_length))
 		return -EFAULT;
 	if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
 	    xcRB->request_control_blk_length)
@@ -480,22 +480,25 @@
 
 	if (memcmp(function_code, "US", 2) == 0
 	    || memcmp(function_code, "AU", 2) == 0)
-		ap_msg->special = 1;
-	else
-		ap_msg->special = 0;
+		ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
+		ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
+#endif
 
 	/* copy data block */
 	if (xcRB->request_data_length &&
-	    copy_from_user(req_data, xcRB->request_data_address,
-		xcRB->request_data_length))
+	    z_copy_from_user(userspace, req_data, xcRB->request_data_address,
+			     xcRB->request_data_length))
 		return -EFAULT;
 
 	return 0;
 }
 
-static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
-				       struct ep11_urb *xcRB,
-				       unsigned int *fcode)
+static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap_msg,
+					   struct ep11_urb *xcRB,
+					   unsigned int *fcode)
 {
 	unsigned int lfmt;
 	static struct type6_hdr static_type6_ep11_hdr = {
@@ -512,7 +515,7 @@
 		struct ep11_cprb cprbx;
 		unsigned char	pld_tag;	/* fixed value 0x30 */
 		unsigned char	pld_lenfmt;	/* payload length format */
-	} __packed * msg = ap_msg->message;
+	} __packed * msg = ap_msg->msg;
 
 	struct pld_hdr {
 		unsigned char	func_tag;	/* fixed value 0x4 */
@@ -527,7 +530,7 @@
 		return -EINVAL; /* overflow after alignment*/
 
 	/* length checks */
-	ap_msg->length = sizeof(struct type6_hdr) + xcRB->req_len;
+	ap_msg->len = sizeof(struct type6_hdr) + xcRB->req_len;
 	if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE -
 				   (sizeof(struct type6_hdr)))
 		return -EINVAL;
@@ -545,8 +548,8 @@
 	msg->hdr.FromCardLen1 = xcRB->resp_len;
 
 	/* Import CPRB data from the ioctl input parameter */
-	if (copy_from_user(&(msg->cprbx.cprb_len),
-			   (char __force __user *)xcRB->req, xcRB->req_len)) {
+	if (z_copy_from_user(userspace, &(msg->cprbx.cprb_len),
+			     (char __force __user *)xcRB->req, xcRB->req_len)) {
 		return -EFAULT;
 	}
 
@@ -569,7 +572,12 @@
 
 	/* enable special processing based on the cprbs flags special bit */
 	if (msg->cprbx.flags & 0x20)
-		ap_msg->special = 1;
+		ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+	if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
+		ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
+#endif
 
 	return 0;
 }
@@ -590,7 +598,7 @@
 	struct CPRBX cprbx;
 	unsigned char pad[4];	/* 4 byte function code/rules block ? */
 	unsigned short length;
-	char text[0];
+	char text[];
 } __packed;
 
 struct type86_ep11_reply {
@@ -639,7 +647,7 @@
 		0x35, 0x9D, 0xD3, 0xD3, 0xA7, 0x9D, 0x5D, 0x41,
 		0x6F, 0x65, 0x1B, 0xCF, 0xA9, 0x87, 0x91, 0x09
 	};
-	struct type86x_reply *msg = reply->message;
+	struct type86x_reply *msg = reply->msg;
 	unsigned short service_rc, service_rs;
 	unsigned int reply_len, pad_len;
 	char *data;
@@ -652,23 +660,22 @@
 		    (service_rc == 8 && service_rs == 72) ||
 		    (service_rc == 8 && service_rs == 770) ||
 		    (service_rc == 12 && service_rs == 769)) {
-			ZCRYPT_DBF(DBF_DEBUG,
-				   "device=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
-				   AP_QID_CARD(zq->queue->qid),
-				   AP_QID_QUEUE(zq->queue->qid),
-				   (int) service_rc, (int) service_rs);
+			ZCRYPT_DBF_WARN("dev=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
+					AP_QID_CARD(zq->queue->qid),
+					AP_QID_QUEUE(zq->queue->qid),
+					(int) service_rc, (int) service_rs);
 			return -EINVAL;
 		}
 		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		pr_err("Crypto dev=%02x.%04x rc/rs=%d/%d online=0 rc=EAGAIN\n",
 		       AP_QID_CARD(zq->queue->qid),
-		       AP_QID_QUEUE(zq->queue->qid));
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
-			   AP_QID_CARD(zq->queue->qid),
-			   AP_QID_QUEUE(zq->queue->qid),
-			   (int) service_rc, (int) service_rs);
-		return -EAGAIN;	/* repeat the request on a different device. */
+		       AP_QID_QUEUE(zq->queue->qid),
+		       (int) service_rc, (int) service_rs);
+		ZCRYPT_DBF_ERR("dev=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
+			       AP_QID_CARD(zq->queue->qid),
+			       AP_QID_QUEUE(zq->queue->qid),
+			       (int) service_rc, (int) service_rs);
+		return -EAGAIN;
 	}
 	data = msg->text;
 	reply_len = msg->length - 2;
@@ -709,23 +716,23 @@
  *
  * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
  */
-static int convert_type86_xcrb(struct zcrypt_queue *zq,
+static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
 			       struct ap_message *reply,
 			       struct ica_xcRB *xcRB)
 {
-	struct type86_fmt2_msg *msg = reply->message;
-	char *data = reply->message;
+	struct type86_fmt2_msg *msg = reply->msg;
+	char *data = reply->msg;
 
 	/* Copy CPRB to user */
-	if (copy_to_user(xcRB->reply_control_blk_addr,
-		data + msg->fmt2.offset1, msg->fmt2.count1))
+	if (z_copy_to_user(userspace, xcRB->reply_control_blk_addr,
+			   data + msg->fmt2.offset1, msg->fmt2.count1))
 		return -EFAULT;
 	xcRB->reply_control_blk_length = msg->fmt2.count1;
 
 	/* Copy data buffer to user */
 	if (msg->fmt2.count2)
-		if (copy_to_user(xcRB->reply_data_addr,
-			data + msg->fmt2.offset2, msg->fmt2.count2))
+		if (z_copy_to_user(userspace, xcRB->reply_data_addr,
+				   data + msg->fmt2.offset2, msg->fmt2.count2))
 			return -EFAULT;
 	xcRB->reply_data_length = msg->fmt2.count2;
 	return 0;
@@ -740,19 +747,19 @@
  *
  * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
  */
-static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq,
+static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
 				    struct ap_message *reply,
 				    struct ep11_urb *xcRB)
 {
-	struct type86_fmt2_msg *msg = reply->message;
-	char *data = reply->message;
+	struct type86_fmt2_msg *msg = reply->msg;
+	char *data = reply->msg;
 
 	if (xcRB->resp_len < msg->fmt2.count1)
 		return -EINVAL;
 
 	/* Copy response CPRB to user */
-	if (copy_to_user((char __force __user *)xcRB->resp,
-			 data + msg->fmt2.offset1, msg->fmt2.count1))
+	if (z_copy_to_user(userspace, (char __force __user *)xcRB->resp,
+			   data + msg->fmt2.offset1, msg->fmt2.count1))
 		return -EFAULT;
 	xcRB->resp_len = msg->fmt2.count1;
 	return 0;
@@ -766,8 +773,8 @@
 		struct type86_hdr hdr;
 		struct type86_fmt2_ext fmt2;
 		struct CPRBX cprbx;
-	} __packed * msg = reply->message;
-	char *data = reply->message;
+	} __packed * msg = reply->msg;
+	char *data = reply->msg;
 
 	if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
 		return -EINVAL;
@@ -780,7 +787,7 @@
 			    char __user *outputdata,
 			    unsigned int outputdatalength)
 {
-	struct type86x_reply *msg = reply->message;
+	struct type86x_reply *msg = reply->msg;
 
 	switch (msg->hdr.type) {
 	case TYPE82_RSP_CODE:
@@ -801,26 +808,27 @@
 		if (msg->cprbx.cprb_ver_id == 0x02)
 			return convert_type86_ica(zq, reply,
 						  outputdata, outputdatalength);
-		/* fall through - wrong cprb version is an unknown response */
-	default: /* Unknown response type, this should NEVER EVER happen */
+		fallthrough;	/* wrong cprb version is an unknown response */
+	default:
+		/* Unknown response type, this should NEVER EVER happen */
 		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
 		       AP_QID_CARD(zq->queue->qid),
-		       AP_QID_QUEUE(zq->queue->qid));
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
-			   AP_QID_CARD(zq->queue->qid),
-			   AP_QID_QUEUE(zq->queue->qid),
-			   (int) msg->hdr.type);
-		return -EAGAIN;	/* repeat the request on a different device. */
+		       AP_QID_QUEUE(zq->queue->qid),
+		       (int) msg->hdr.type);
+		ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+			       AP_QID_CARD(zq->queue->qid),
+			       AP_QID_QUEUE(zq->queue->qid),
+			       (int) msg->hdr.type);
+		return -EAGAIN;
 	}
 }
 
-static int convert_response_xcrb(struct zcrypt_queue *zq,
-			    struct ap_message *reply,
-			    struct ica_xcRB *xcRB)
+static int convert_response_xcrb(bool userspace, struct zcrypt_queue *zq,
+				 struct ap_message *reply,
+				 struct ica_xcRB *xcRB)
 {
-	struct type86x_reply *msg = reply->message;
+	struct type86x_reply *msg = reply->msg;
 
 	switch (msg->hdr.type) {
 	case TYPE82_RSP_CODE:
@@ -833,27 +841,27 @@
 			return convert_error(zq, reply);
 		}
 		if (msg->cprbx.cprb_ver_id == 0x02)
-			return convert_type86_xcrb(zq, reply, xcRB);
-		/* fall through - wrong cprb version is an unknown response */
+			return convert_type86_xcrb(userspace, zq, reply, xcRB);
+		fallthrough;	/* wrong cprb version is an unknown response */
 	default: /* Unknown response type, this should NEVER EVER happen */
 		xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
 		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
 		       AP_QID_CARD(zq->queue->qid),
-		       AP_QID_QUEUE(zq->queue->qid));
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
-			   AP_QID_CARD(zq->queue->qid),
-			   AP_QID_QUEUE(zq->queue->qid),
-			   (int) msg->hdr.type);
-		return -EAGAIN;	/* repeat the request on a different device. */
+		       AP_QID_QUEUE(zq->queue->qid),
+		       (int) msg->hdr.type);
+		ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+			       AP_QID_CARD(zq->queue->qid),
+			       AP_QID_QUEUE(zq->queue->qid),
+			       (int) msg->hdr.type);
+		return -EAGAIN;
 	}
 }
 
-static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
-	struct ap_message *reply, struct ep11_urb *xcRB)
+static int convert_response_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
+				      struct ap_message *reply, struct ep11_urb *xcRB)
 {
-	struct type86_ep11_reply *msg = reply->message;
+	struct type86_ep11_reply *msg = reply->msg;
 
 	switch (msg->hdr.type) {
 	case TYPE82_RSP_CODE:
@@ -863,19 +871,19 @@
 		if (msg->hdr.reply_code)
 			return convert_error(zq, reply);
 		if (msg->cprbx.cprb_ver_id == 0x04)
-			return convert_type86_ep11_xcrb(zq, reply, xcRB);
-		/* fall through - wrong cprb version is an unknown resp */
+			return convert_type86_ep11_xcrb(userspace, zq, reply, xcRB);
+		fallthrough;	/* wrong cprb version is an unknown resp */
 	default: /* Unknown response type, this should NEVER EVER happen */
 		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
 		       AP_QID_CARD(zq->queue->qid),
-		       AP_QID_QUEUE(zq->queue->qid));
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
-			   AP_QID_CARD(zq->queue->qid),
-			   AP_QID_QUEUE(zq->queue->qid),
-			   (int) msg->hdr.type);
-		return -EAGAIN; /* repeat the request on a different device. */
+		       AP_QID_QUEUE(zq->queue->qid),
+		       (int) msg->hdr.type);
+		ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+			       AP_QID_CARD(zq->queue->qid),
+			       AP_QID_QUEUE(zq->queue->qid),
+			       (int) msg->hdr.type);
+		return -EAGAIN;
 	}
 }
 
@@ -883,7 +891,7 @@
 				 struct ap_message *reply,
 				 char *data)
 {
-	struct type86x_reply *msg = reply->message;
+	struct type86x_reply *msg = reply->msg;
 
 	switch (msg->hdr.type) {
 	case TYPE82_RSP_CODE:
@@ -894,18 +902,18 @@
 			return -EINVAL;
 		if (msg->cprbx.cprb_ver_id == 0x02)
 			return convert_type86_rng(zq, reply, data);
-		/* fall through - wrong cprb version is an unknown response */
+		fallthrough;	/* wrong cprb version is an unknown response */
 	default: /* Unknown response type, this should NEVER EVER happen */
 		zq->online = 0;
-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
 		       AP_QID_CARD(zq->queue->qid),
-		       AP_QID_QUEUE(zq->queue->qid));
-		ZCRYPT_DBF(DBF_ERR,
-			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
-			   AP_QID_CARD(zq->queue->qid),
-			   AP_QID_QUEUE(zq->queue->qid),
-			   (int) msg->hdr.type);
-		return -EAGAIN;	/* repeat the request on a different device. */
+		       AP_QID_QUEUE(zq->queue->qid),
+		       (int) msg->hdr.type);
+		ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+			       AP_QID_CARD(zq->queue->qid),
+			       AP_QID_QUEUE(zq->queue->qid),
+			       (int) msg->hdr.type);
+		return -EAGAIN;
 	}
 }
 
@@ -928,32 +936,30 @@
 	struct response_type *resp_type =
 		(struct response_type *) msg->private;
 	struct type86x_reply *t86r;
-	int length;
+	int len;
 
 	/* Copy the reply message to the request message buffer. */
 	if (!reply)
 		goto out;	/* ap_msg->rc indicates the error */
-	t86r = reply->message;
+	t86r = reply->msg;
 	if (t86r->hdr.type == TYPE86_RSP_CODE &&
 		 t86r->cprbx.cprb_ver_id == 0x02) {
 		switch (resp_type->type) {
 		case CEXXC_RESPONSE_TYPE_ICA:
-			length = sizeof(struct type86x_reply)
-				+ t86r->length - 2;
-			length = min(CEXXC_MAX_ICA_RESPONSE_SIZE, length);
-			memcpy(msg->message, reply->message, length);
+			len = sizeof(struct type86x_reply) + t86r->length - 2;
+			len = min_t(int, CEXXC_MAX_ICA_RESPONSE_SIZE, len);
+			memcpy(msg->msg, reply->msg, len);
 			break;
 		case CEXXC_RESPONSE_TYPE_XCRB:
-			length = t86r->fmt2.offset2 + t86r->fmt2.count2;
-			length = min(MSGTYPE06_MAX_MSG_SIZE, length);
-			memcpy(msg->message, reply->message, length);
+			len = t86r->fmt2.offset2 + t86r->fmt2.count2;
+			len = min_t(int, MSGTYPE06_MAX_MSG_SIZE, len);
+			memcpy(msg->msg, reply->msg, len);
 			break;
 		default:
-			memcpy(msg->message, &error_reply,
-			       sizeof(error_reply));
+			memcpy(msg->msg, &error_reply, sizeof(error_reply));
 		}
 	} else
-		memcpy(msg->message, reply->message, sizeof(error_reply));
+		memcpy(msg->msg, reply->msg, sizeof(error_reply));
 out:
 	complete(&(resp_type->work));
 }
@@ -977,25 +983,25 @@
 	struct response_type *resp_type =
 		(struct response_type *)msg->private;
 	struct type86_ep11_reply *t86r;
-	int length;
+	int len;
 
 	/* Copy the reply message to the request message buffer. */
 	if (!reply)
 		goto out;	/* ap_msg->rc indicates the error */
-	t86r = reply->message;
+	t86r = reply->msg;
 	if (t86r->hdr.type == TYPE86_RSP_CODE &&
 	    t86r->cprbx.cprb_ver_id == 0x04) {
 		switch (resp_type->type) {
 		case CEXXC_RESPONSE_TYPE_EP11:
-			length = t86r->fmt2.offset1 + t86r->fmt2.count1;
-			length = min(MSGTYPE06_MAX_MSG_SIZE, length);
-			memcpy(msg->message, reply->message, length);
+			len = t86r->fmt2.offset1 + t86r->fmt2.count1;
+			len = min_t(int, MSGTYPE06_MAX_MSG_SIZE, len);
+			memcpy(msg->msg, reply->msg, len);
 			break;
 		default:
-			memcpy(msg->message, &error_reply, sizeof(error_reply));
+			memcpy(msg->msg, &error_reply, sizeof(error_reply));
 		}
 	} else {
-		memcpy(msg->message, reply->message, sizeof(error_reply));
+		memcpy(msg->msg, reply->msg, sizeof(error_reply));
 	}
 out:
 	complete(&(resp_type->work));
@@ -1011,39 +1017,42 @@
  * @mex: pointer to the modexpo request buffer
  */
 static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
-				  struct ica_rsa_modexpo *mex)
+				    struct ica_rsa_modexpo *mex,
+				    struct ap_message *ap_msg)
 {
-	struct ap_message ap_msg;
 	struct response_type resp_type = {
 		.type = CEXXC_RESPONSE_TYPE_ICA,
 	};
 	int rc;
 
-	ap_init_message(&ap_msg);
-	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
-	if (!ap_msg.message)
+	ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!ap_msg->msg)
 		return -ENOMEM;
-	ap_msg.receive = zcrypt_msgtype6_receive;
-	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
-				atomic_inc_return(&zcrypt_step);
-	ap_msg.private = &resp_type;
-	rc = ICAMEX_msg_to_type6MEX_msgX(zq, &ap_msg, mex);
+	ap_msg->receive = zcrypt_msgtype6_receive;
+	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+		atomic_inc_return(&zcrypt_step);
+	ap_msg->private = &resp_type;
+	rc = ICAMEX_msg_to_type6MEX_msgX(zq, ap_msg, mex);
 	if (rc)
 		goto out_free;
 	init_completion(&resp_type.work);
-	ap_queue_message(zq->queue, &ap_msg);
+	rc = ap_queue_message(zq->queue, ap_msg);
+	if (rc)
+		goto out_free;
 	rc = wait_for_completion_interruptible(&resp_type.work);
 	if (rc == 0) {
-		rc = ap_msg.rc;
+		rc = ap_msg->rc;
 		if (rc == 0)
-			rc = convert_response_ica(zq, &ap_msg,
+			rc = convert_response_ica(zq, ap_msg,
 						  mex->outputdata,
 						  mex->outputdatalength);
 	} else
 		/* Signal pending. */
-		ap_cancel_message(zq->queue, &ap_msg);
+		ap_cancel_message(zq->queue, ap_msg);
 out_free:
-	free_page((unsigned long) ap_msg.message);
+	free_page((unsigned long) ap_msg->msg);
+	ap_msg->private = NULL;
+	ap_msg->msg = NULL;
 	return rc;
 }
 
@@ -1055,40 +1064,43 @@
  * @crt: pointer to the modexpoc_crt request buffer
  */
 static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
-				      struct ica_rsa_modexpo_crt *crt)
+					struct ica_rsa_modexpo_crt *crt,
+					struct ap_message *ap_msg)
 {
-	struct ap_message ap_msg;
 	struct response_type resp_type = {
 		.type = CEXXC_RESPONSE_TYPE_ICA,
 	};
 	int rc;
 
-	ap_init_message(&ap_msg);
-	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
-	if (!ap_msg.message)
+	ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!ap_msg->msg)
 		return -ENOMEM;
-	ap_msg.receive = zcrypt_msgtype6_receive;
-	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
-				atomic_inc_return(&zcrypt_step);
-	ap_msg.private = &resp_type;
-	rc = ICACRT_msg_to_type6CRT_msgX(zq, &ap_msg, crt);
+	ap_msg->receive = zcrypt_msgtype6_receive;
+	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+		atomic_inc_return(&zcrypt_step);
+	ap_msg->private = &resp_type;
+	rc = ICACRT_msg_to_type6CRT_msgX(zq, ap_msg, crt);
 	if (rc)
 		goto out_free;
 	init_completion(&resp_type.work);
-	ap_queue_message(zq->queue, &ap_msg);
+	rc = ap_queue_message(zq->queue, ap_msg);
+	if (rc)
+		goto out_free;
 	rc = wait_for_completion_interruptible(&resp_type.work);
 	if (rc == 0) {
-		rc = ap_msg.rc;
+		rc = ap_msg->rc;
 		if (rc == 0)
-			rc = convert_response_ica(zq, &ap_msg,
+			rc = convert_response_ica(zq, ap_msg,
 						  crt->outputdata,
 						  crt->outputdatalength);
 	} else {
 		/* Signal pending. */
-		ap_cancel_message(zq->queue, &ap_msg);
+		ap_cancel_message(zq->queue, ap_msg);
 	}
 out_free:
-	free_page((unsigned long) ap_msg.message);
+	free_page((unsigned long) ap_msg->msg);
+	ap_msg->private = NULL;
+	ap_msg->msg = NULL;
 	return rc;
 }
 
@@ -1099,16 +1111,16 @@
  * by the caller with ap_init_message(). Also the caller has to
  * make sure ap_release_message() is always called even on failure.
  */
-unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
-				struct ap_message *ap_msg,
-				unsigned int *func_code, unsigned short **dom)
+unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *xcRB,
+			 struct ap_message *ap_msg,
+			 unsigned int *func_code, unsigned short **dom)
 {
 	struct response_type resp_type = {
 		.type = CEXXC_RESPONSE_TYPE_XCRB,
 	};
 
-	ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
-	if (!ap_msg->message)
+	ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+	if (!ap_msg->msg)
 		return -ENOMEM;
 	ap_msg->receive = zcrypt_msgtype6_receive;
 	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
@@ -1116,7 +1128,7 @@
 	ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
 	if (!ap_msg->private)
 		return -ENOMEM;
-	return XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom);
+	return XCRB_msg_to_type6CPRB_msgX(userspace, ap_msg, xcRB, func_code, dom);
 }
 
 /**
@@ -1126,24 +1138,26 @@
  *	CEXxC device to the request distributor
  * @xcRB: pointer to the send_cprb request buffer
  */
-static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq,
-				    struct ica_xcRB *xcRB,
-				    struct ap_message *ap_msg)
+static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
+				      struct ica_xcRB *xcRB,
+				      struct ap_message *ap_msg)
 {
 	int rc;
 	struct response_type *rtype = (struct response_type *)(ap_msg->private);
 
 	init_completion(&rtype->work);
-	ap_queue_message(zq->queue, ap_msg);
+	rc = ap_queue_message(zq->queue, ap_msg);
+	if (rc)
+		goto out;
 	rc = wait_for_completion_interruptible(&rtype->work);
 	if (rc == 0) {
 		rc = ap_msg->rc;
 		if (rc == 0)
-			rc = convert_response_xcrb(zq, ap_msg, xcRB);
+			rc = convert_response_xcrb(userspace, zq, ap_msg, xcRB);
 	} else
 		/* Signal pending. */
 		ap_cancel_message(zq->queue, ap_msg);
-
+out:
 	return rc;
 }
 
@@ -1154,16 +1168,16 @@
  * by the caller with ap_init_message(). Also the caller has to
  * make sure ap_release_message() is always called even on failure.
  */
-unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
-				    struct ap_message *ap_msg,
-				    unsigned int *func_code)
+unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *xcrb,
+			     struct ap_message *ap_msg,
+			     unsigned int *func_code)
 {
 	struct response_type resp_type = {
 		.type = CEXXC_RESPONSE_TYPE_EP11,
 	};
 
-	ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
-	if (!ap_msg->message)
+	ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+	if (!ap_msg->msg)
 		return -ENOMEM;
 	ap_msg->receive = zcrypt_msgtype6_receive_ep11;
 	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
@@ -1171,7 +1185,7 @@
 	ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
 	if (!ap_msg->private)
 		return -ENOMEM;
-	return xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code);
+	return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb, func_code);
 }
 
 /**
@@ -1181,7 +1195,7 @@
  *	  CEX4P device to the request distributor
  * @xcRB: pointer to the ep11 user request block
  */
-static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq,
+static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *zq,
 					   struct ep11_urb *xcrb,
 					   struct ap_message *ap_msg)
 {
@@ -1193,7 +1207,7 @@
 		struct ep11_cprb cprbx;
 		unsigned char	pld_tag;	/* fixed value 0x30 */
 		unsigned char	pld_lenfmt;	/* payload length format */
-	} __packed * msg = ap_msg->message;
+	} __packed * msg = ap_msg->msg;
 	struct pld_hdr {
 		unsigned char	func_tag;	/* fixed value 0x4 */
 		unsigned char	func_len;	/* fixed value 0x4 */
@@ -1236,16 +1250,18 @@
 	}
 
 	init_completion(&rtype->work);
-	ap_queue_message(zq->queue, ap_msg);
+	rc = ap_queue_message(zq->queue, ap_msg);
+	if (rc)
+		goto out;
 	rc = wait_for_completion_interruptible(&rtype->work);
 	if (rc == 0) {
 		rc = ap_msg->rc;
 		if (rc == 0)
-			rc = convert_response_ep11_xcrb(zq, ap_msg, xcrb);
+			rc = convert_response_ep11_xcrb(userspace, zq, ap_msg, xcrb);
 	} else
 		/* Signal pending. */
 		ap_cancel_message(zq->queue, ap_msg);
-
+out:
 	return rc;
 }
 
@@ -1256,8 +1272,8 @@
 		.type = CEXXC_RESPONSE_TYPE_XCRB,
 	};
 
-	ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
-	if (!ap_msg->message)
+	ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+	if (!ap_msg->msg)
 		return -ENOMEM;
 	ap_msg->receive = zcrypt_msgtype6_receive;
 	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
@@ -1290,14 +1306,16 @@
 		char rule[8];
 		short int verb_length;
 		short int key_length;
-	} __packed * msg = ap_msg->message;
+	} __packed * msg = ap_msg->msg;
 	struct response_type *rtype = (struct response_type *)(ap_msg->private);
 	int rc;
 
 	msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
 
 	init_completion(&rtype->work);
-	ap_queue_message(zq->queue, ap_msg);
+	rc = ap_queue_message(zq->queue, ap_msg);
+	if (rc)
+		goto out;
 	rc = wait_for_completion_interruptible(&rtype->work);
 	if (rc == 0) {
 		rc = ap_msg->rc;
@@ -1306,7 +1324,7 @@
 	} else
 		/* Signal pending. */
 		ap_cancel_message(zq->queue, ap_msg);
-
+out:
 	return rc;
 }
 
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index 41a0df5..0a0bf07 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -96,9 +96,9 @@
 	unsigned int	  offset4;	/* 0x00000000			*/
 } __packed;
 
-unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *,
+unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *, struct ap_message *,
 			 unsigned int *, unsigned short **);
-unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *,
+unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *, struct ap_message *,
 			     unsigned int *);
 unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *);
 
@@ -127,7 +127,7 @@
 		char rule[8];
 		short int verb_length;
 		short int key_length;
-	} __packed * msg = ap_msg->message;
+	} __packed * msg = ap_msg->msg;
 	static struct type6_hdr static_type6_hdrX = {
 		.type		= 0x06,
 		.offset1	= 0x00000058,
@@ -154,7 +154,7 @@
 	memcpy(msg->rule, "RANDOM  ", 8);
 	msg->verb_length = 0x02;
 	msg->key_length = 0x02;
-	ap_msg->length = sizeof(*msg);
+	ap_msg->len = sizeof(*msg);
 	*domain = (unsigned short)msg->cprbx.domain;
 }
 
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index 522c4bc..c3ffbd2 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -40,22 +40,27 @@
 			   struct device_attribute *attr,
 			   char *buf)
 {
-	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+	struct ap_queue *aq = to_ap_queue(dev);
+	struct zcrypt_queue *zq = aq->private;
+	int online = aq->config && zq->online ? 1 : 0;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", zq->online);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", online);
 }
 
 static ssize_t online_store(struct device *dev,
 			    struct device_attribute *attr,
 			    const char *buf, size_t count)
 {
-	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+	struct ap_queue *aq = to_ap_queue(dev);
+	struct zcrypt_queue *zq = aq->private;
 	struct zcrypt_card *zc = zq->zcard;
 	int online;
 
 	if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
 		return -EINVAL;
 
+	if (online && (!aq->config || !aq->card->config))
+		return -ENODEV;
 	if (online && !zc->online)
 		return -EINVAL;
 	zq->online = online;
@@ -78,7 +83,7 @@
 {
 	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
+	return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
 }
 
 static DEVICE_ATTR_RO(load);
@@ -107,10 +112,10 @@
 	zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL);
 	if (!zq)
 		return NULL;
-	zq->reply.message = kmalloc(max_response_size, GFP_KERNEL);
-	if (!zq->reply.message)
+	zq->reply.msg = kmalloc(max_response_size, GFP_KERNEL);
+	if (!zq->reply.msg)
 		goto out_free;
-	zq->reply.length = max_response_size;
+	zq->reply.len = max_response_size;
 	INIT_LIST_HEAD(&zq->list);
 	kref_init(&zq->refcount);
 	return zq;
@@ -123,7 +128,7 @@
 
 void zcrypt_queue_free(struct zcrypt_queue *zq)
 {
-	kfree(zq->reply.message);
+	kfree(zq->reply.msg);
 	kfree(zq);
 }
 EXPORT_SYMBOL(zcrypt_queue_free);
@@ -175,7 +180,6 @@
 				&zcrypt_queue_attr_group);
 	if (rc)
 		goto out;
-	get_device(&zq->queue->ap_dev.device);
 
 	if (zq->ops->rng) {
 		rc = zcrypt_rng_device_add();
@@ -187,7 +191,6 @@
 out_unregister:
 	sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
 			   &zcrypt_queue_attr_group);
-	put_device(&zq->queue->ap_dev.device);
 out:
 	spin_lock(&zcrypt_list_lock);
 	list_del_init(&zq->list);
@@ -215,12 +218,11 @@
 	list_del_init(&zq->list);
 	zcrypt_device_count--;
 	spin_unlock(&zcrypt_list_lock);
-	zcrypt_card_put(zc);
 	if (zq->ops->rng)
 		zcrypt_rng_device_remove();
 	sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
 			   &zcrypt_queue_attr_group);
-	put_device(&zq->queue->ap_dev.device);
+	zcrypt_card_put(zc);
 	zcrypt_queue_put(zq);
 }
 EXPORT_SYMBOL(zcrypt_queue_unregister);