v4.19.13 snapshot.
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
new file mode 100644
index 0000000..e328b52
--- /dev/null
+++ b/include/crypto/acompress.h
@@ -0,0 +1,269 @@
+/*
+ * Asynchronous Compression operations
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Weigang Li <weigang.li@intel.com>
+ *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ACOMP_H
+#define _CRYPTO_ACOMP_H
+#include <linux/crypto.h>
+
+#define CRYPTO_ACOMP_ALLOC_OUTPUT	0x00000001
+
+/**
+ * struct acomp_req - asynchronous (de)compression request
+ *
+ * @base:	Common attributes for asynchronous crypto requests
+ * @src:	Source Data
+ * @dst:	Destination data
+ * @slen:	Size of the input buffer
+ * @dlen:	Size of the output buffer and number of bytes produced
+ * @flags:	Internal flags
+ * @__ctx:	Start of private context data
+ */
+struct acomp_req {
+	struct crypto_async_request base;
+	struct scatterlist *src;
+	struct scatterlist *dst;
+	unsigned int slen;
+	unsigned int dlen;
+	u32 flags;
+	void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
+ * struct crypto_acomp - user-instantiated objects which encapsulate
+ * algorithms and core processing logic
+ *
+ * @compress:		Function performs a compress operation
+ * @decompress:		Function performs a de-compress operation
+ * @dst_free:		Frees destination buffer if allocated inside the
+ *			algorithm
+ * @reqsize:		Context size for (de)compression requests
+ * @base:		Common crypto API algorithm data structure
+ */
+struct crypto_acomp {
+	int (*compress)(struct acomp_req *req);
+	int (*decompress)(struct acomp_req *req);
+	void (*dst_free)(struct scatterlist *dst);
+	unsigned int reqsize;
+	struct crypto_tfm base;
+};
+
+/**
+ * struct acomp_alg - asynchronous compression algorithm
+ *
+ * @compress:	Function performs a compress operation
+ * @decompress:	Function performs a de-compress operation
+ * @dst_free:	Frees destination buffer if allocated inside the algorithm
+ * @init:	Initialize the cryptographic transformation object.
+ *		This function is used to initialize the cryptographic
+ *		transformation object. This function is called only once at
+ *		the instantiation time, right after the transformation context
+ *		was allocated. In case the cryptographic hardware has some
+ *		special requirements which need to be handled by software, this
+ *		function shall check for the precise requirement of the
+ *		transformation and put any software fallbacks in place.
+ * @exit:	Deinitialize the cryptographic transformation object. This is a
+ *		counterpart to @init, used to remove various changes set in
+ *		@init.
+ *
+ * @reqsize:	Context size for (de)compression requests
+ * @base:	Common crypto API algorithm data structure
+ */
+struct acomp_alg {
+	int (*compress)(struct acomp_req *req);
+	int (*decompress)(struct acomp_req *req);
+	void (*dst_free)(struct scatterlist *dst);
+	int (*init)(struct crypto_acomp *tfm);
+	void (*exit)(struct crypto_acomp *tfm);
+	unsigned int reqsize;
+	struct crypto_alg base;
+};
+
+/**
+ * DOC: Asynchronous Compression API
+ *
+ * The Asynchronous Compression API is used with the algorithms of type
+ * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
+ */
+
+/**
+ * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
+ * @alg_name:	is the cra_name / name or cra_driver_name / driver name of the
+ *		compression algorithm e.g. "deflate"
+ * @type:	specifies the type of the algorithm
+ * @mask:	specifies the mask for the algorithm
+ *
+ * Allocate a handle for a compression algorithm. The returned struct
+ * crypto_acomp is the handle that is required for any subsequent
+ * API invocation for the compression operations.
+ *
+ * Return:	allocated handle in case of success; IS_ERR() is true in case
+ *		of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
+					u32 mask);
+
+static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
+{
+	return &tfm->base;
+}
+
+static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
+{
+	return container_of(alg, struct acomp_alg, base);
+}
+
+static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
+{
+	return container_of(tfm, struct crypto_acomp, base);
+}
+
+static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
+{
+	return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
+}
+
+static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
+{
+	return tfm->reqsize;
+}
+
+static inline void acomp_request_set_tfm(struct acomp_req *req,
+					 struct crypto_acomp *tfm)
+{
+	req->base.tfm = crypto_acomp_tfm(tfm);
+}
+
+static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
+{
+	return __crypto_acomp_tfm(req->base.tfm);
+}
+
+/**
+ * crypto_free_acomp() -- free ACOMPRESS tfm handle
+ *
+ * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ */
+static inline void crypto_free_acomp(struct crypto_acomp *tfm)
+{
+	crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
+}
+
+static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
+{
+	type &= ~CRYPTO_ALG_TYPE_MASK;
+	type |= CRYPTO_ALG_TYPE_ACOMPRESS;
+	mask |= CRYPTO_ALG_TYPE_MASK;
+
+	return crypto_has_alg(alg_name, type, mask);
+}
+
+/**
+ * acomp_request_alloc() -- allocates asynchronous (de)compression request
+ *
+ * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ *
+ * Return:	allocated handle in case of success or NULL in case of an error
+ */
+struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
+
+/**
+ * acomp_request_free() -- zeroize and free asynchronous (de)compression
+ *			   request as well as the output buffer if allocated
+ *			   inside the algorithm
+ *
+ * @req:	request to free
+ */
+void acomp_request_free(struct acomp_req *req);
+
+/**
+ * acomp_request_set_callback() -- Sets an asynchronous callback
+ *
+ * Callback will be called when an asynchronous operation on a given
+ * request is finished.
+ *
+ * @req:	request that the callback will be set for
+ * @flgs:	specify for instance if the operation may backlog
+ * @cmlp:	callback which will be called
+ * @data:	private data used by the caller
+ */
+static inline void acomp_request_set_callback(struct acomp_req *req,
+					      u32 flgs,
+					      crypto_completion_t cmpl,
+					      void *data)
+{
+	req->base.complete = cmpl;
+	req->base.data = data;
+	req->base.flags = flgs;
+}
+
+/**
+ * acomp_request_set_params() -- Sets request parameters
+ *
+ * Sets parameters required by an acomp operation
+ *
+ * @req:	asynchronous compress request
+ * @src:	pointer to input buffer scatterlist
+ * @dst:	pointer to output buffer scatterlist. If this is NULL, the
+ *		acomp layer will allocate the output memory
+ * @slen:	size of the input buffer
+ * @dlen:	size of the output buffer. If dst is NULL, this can be used by
+ *		the user to specify the maximum amount of memory to allocate
+ */
+static inline void acomp_request_set_params(struct acomp_req *req,
+					    struct scatterlist *src,
+					    struct scatterlist *dst,
+					    unsigned int slen,
+					    unsigned int dlen)
+{
+	req->src = src;
+	req->dst = dst;
+	req->slen = slen;
+	req->dlen = dlen;
+
+	if (!req->dst)
+		req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
+}
+
+/**
+ * crypto_acomp_compress() -- Invoke asynchronous compress operation
+ *
+ * Function invokes the asynchronous compress operation
+ *
+ * @req:	asynchronous compress request
+ *
+ * Return:	zero on success; error code in case of error
+ */
+static inline int crypto_acomp_compress(struct acomp_req *req)
+{
+	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+	return tfm->compress(req);
+}
+
+/**
+ * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
+ *
+ * Function invokes the asynchronous decompress operation
+ *
+ * @req:	asynchronous compress request
+ *
+ * Return:	zero on success; error code in case of error
+ */
+static inline int crypto_acomp_decompress(struct acomp_req *req)
+{
+	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+	return tfm->decompress(req);
+}
+
+#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
new file mode 100644
index 0000000..1e26f79
--- /dev/null
+++ b/include/crypto/aead.h
@@ -0,0 +1,532 @@
+/*
+ * AEAD: Authenticated Encryption with Associated Data
+ * 
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) 
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_AEAD_H
+#define _CRYPTO_AEAD_H
+
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+/**
+ * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
+ *
+ * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
+ * (listed as type "aead" in /proc/crypto)
+ *
+ * The most prominent examples for this type of encryption is GCM and CCM.
+ * However, the kernel supports other types of AEAD ciphers which are defined
+ * with the following cipher string:
+ *
+ *	authenc(keyed message digest, block cipher)
+ *
+ * For example: authenc(hmac(sha256), cbc(aes))
+ *
+ * The example code provided for the symmetric key cipher operation
+ * applies here as well. Naturally all *skcipher* symbols must be exchanged
+ * the *aead* pendants discussed in the following. In addition, for the AEAD
+ * operation, the aead_request_set_ad function must be used to set the
+ * pointer to the associated data memory location before performing the
+ * encryption or decryption operation. In case of an encryption, the associated
+ * data memory is filled during the encryption operation. For decryption, the
+ * associated data memory must contain data that is used to verify the integrity
+ * of the decrypted data. Another deviation from the asynchronous block cipher
+ * operation is that the caller should explicitly check for -EBADMSG of the
+ * crypto_aead_decrypt. That error indicates an authentication error, i.e.
+ * a breach in the integrity of the message. In essence, that -EBADMSG error
+ * code is the key bonus an AEAD cipher has over "standard" block chaining
+ * modes.
+ *
+ * Memory Structure:
+ *
+ * To support the needs of the most prominent user of AEAD ciphers, namely
+ * IPSEC, the AEAD ciphers have a special memory layout the caller must adhere
+ * to.
+ *
+ * The scatter list pointing to the input data must contain:
+ *
+ * * for RFC4106 ciphers, the concatenation of
+ *   associated authentication data || IV || plaintext or ciphertext. Note, the
+ *   same IV (buffer) is also set with the aead_request_set_crypt call. Note,
+ *   the API call of aead_request_set_ad must provide the length of the AAD and
+ *   the IV. The API call of aead_request_set_crypt only points to the size of
+ *   the input plaintext or ciphertext.
+ *
+ * * for "normal" AEAD ciphers, the concatenation of
+ *   associated authentication data || plaintext or ciphertext.
+ *
+ * It is important to note that if multiple scatter gather list entries form
+ * the input data mentioned above, the first entry must not point to a NULL
+ * buffer. If there is any potential where the AAD buffer can be NULL, the
+ * calling code must contain a precaution to ensure that this does not result
+ * in the first scatter gather list entry pointing to a NULL buffer.
+ */
+
+struct crypto_aead;
+
+/**
+ *	struct aead_request - AEAD request
+ *	@base: Common attributes for async crypto requests
+ *	@assoclen: Length in bytes of associated data for authentication
+ *	@cryptlen: Length of data to be encrypted or decrypted
+ *	@iv: Initialisation vector
+ *	@src: Source data
+ *	@dst: Destination data
+ *	@__ctx: Start of private context data
+ */
+struct aead_request {
+	struct crypto_async_request base;
+
+	unsigned int assoclen;
+	unsigned int cryptlen;
+
+	u8 *iv;
+
+	struct scatterlist *src;
+	struct scatterlist *dst;
+
+	void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
+ * struct aead_alg - AEAD cipher definition
+ * @maxauthsize: Set the maximum authentication tag size supported by the
+ *		 transformation. A transformation may support smaller tag sizes.
+ *		 As the authentication tag is a message digest to ensure the
+ *		 integrity of the encrypted data, a consumer typically wants the
+ *		 largest authentication tag possible as defined by this
+ *		 variable.
+ * @setauthsize: Set authentication size for the AEAD transformation. This
+ *		 function is used to specify the consumer requested size of the
+ * 		 authentication tag to be either generated by the transformation
+ *		 during encryption or the size of the authentication tag to be
+ *		 supplied during the decryption operation. This function is also
+ *		 responsible for checking the authentication tag size for
+ *		 validity.
+ * @setkey: see struct skcipher_alg
+ * @encrypt: see struct skcipher_alg
+ * @decrypt: see struct skcipher_alg
+ * @geniv: see struct skcipher_alg
+ * @ivsize: see struct skcipher_alg
+ * @chunksize: see struct skcipher_alg
+ * @init: Initialize the cryptographic transformation object. This function
+ *	  is used to initialize the cryptographic transformation object.
+ *	  This function is called only once at the instantiation time, right
+ *	  after the transformation context was allocated. In case the
+ *	  cryptographic hardware has some special requirements which need to
+ *	  be handled by software, this function shall check for the precise
+ *	  requirement of the transformation and put any software fallbacks
+ *	  in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ *	  counterpart to @init, used to remove various changes set in
+ *	  @init.
+ * @base: Definition of a generic crypto cipher algorithm.
+ *
+ * All fields except @ivsize is mandatory and must be filled.
+ */
+struct aead_alg {
+	int (*setkey)(struct crypto_aead *tfm, const u8 *key,
+	              unsigned int keylen);
+	int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
+	int (*encrypt)(struct aead_request *req);
+	int (*decrypt)(struct aead_request *req);
+	int (*init)(struct crypto_aead *tfm);
+	void (*exit)(struct crypto_aead *tfm);
+
+	const char *geniv;
+
+	unsigned int ivsize;
+	unsigned int maxauthsize;
+	unsigned int chunksize;
+
+	struct crypto_alg base;
+};
+
+struct crypto_aead {
+	unsigned int authsize;
+	unsigned int reqsize;
+
+	struct crypto_tfm base;
+};
+
+static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
+{
+	return container_of(tfm, struct crypto_aead, base);
+}
+
+/**
+ * crypto_alloc_aead() - allocate AEAD cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	     AEAD cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an AEAD. The returned struct
+ * crypto_aead is the cipher handle that is required for any subsequent
+ * API invocation for that AEAD.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
+{
+	return &tfm->base;
+}
+
+/**
+ * crypto_free_aead() - zeroize and free aead handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_aead(struct crypto_aead *tfm)
+{
+	crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
+}
+
+static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
+{
+	return container_of(crypto_aead_tfm(tfm)->__crt_alg,
+			    struct aead_alg, base);
+}
+
+static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg)
+{
+	return alg->ivsize;
+}
+
+/**
+ * crypto_aead_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the aead referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
+{
+	return crypto_aead_alg_ivsize(crypto_aead_alg(tfm));
+}
+
+/**
+ * crypto_aead_authsize() - obtain maximum authentication data size
+ * @tfm: cipher handle
+ *
+ * The maximum size of the authentication data for the AEAD cipher referenced
+ * by the AEAD cipher handle is returned. The authentication data size may be
+ * zero if the cipher implements a hard-coded maximum.
+ *
+ * The authentication data may also be known as "tag value".
+ *
+ * Return: authentication data size / tag size in bytes
+ */
+static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
+{
+	return tfm->authsize;
+}
+
+/**
+ * crypto_aead_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the AEAD referenced with the cipher handle is returned.
+ * The caller may use that information to allocate appropriate memory for the
+ * data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
+{
+	return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
+}
+
+static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
+{
+	return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
+}
+
+static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm)
+{
+	return crypto_tfm_get_flags(crypto_aead_tfm(tfm));
+}
+
+static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags)
+{
+	crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags);
+}
+
+static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
+{
+	crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
+}
+
+/**
+ * crypto_aead_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the AEAD referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_aead_setkey(struct crypto_aead *tfm,
+		       const u8 *key, unsigned int keylen);
+
+/**
+ * crypto_aead_setauthsize() - set authentication data size
+ * @tfm: cipher handle
+ * @authsize: size of the authentication data / tag in bytes
+ *
+ * Set the authentication data size / tag size. AEAD requires an authentication
+ * tag (or MAC) in addition to the associated data.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
+
+static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
+{
+	return __crypto_aead_cast(req->base.tfm);
+}
+
+/**
+ * crypto_aead_encrypt() - encrypt plaintext
+ * @req: reference to the aead_request handle that holds all information
+ *	 needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the aead_request handle. That data structure
+ * and how it is filled with data is discussed with the aead_request_*
+ * functions.
+ *
+ * IMPORTANT NOTE The encryption operation creates the authentication data /
+ *		  tag. That data is concatenated with the created ciphertext.
+ *		  The ciphertext memory size is therefore the given number of
+ *		  block cipher blocks + the size defined by the
+ *		  crypto_aead_setauthsize invocation. The caller must ensure
+ *		  that sufficient memory is available for the ciphertext and
+ *		  the authentication tag.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_aead_encrypt(struct aead_request *req)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+	if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
+		return -ENOKEY;
+
+	return crypto_aead_alg(aead)->encrypt(req);
+}
+
+/**
+ * crypto_aead_decrypt() - decrypt ciphertext
+ * @req: reference to the ablkcipher_request handle that holds all information
+ *	 needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the aead_request handle. That data structure
+ * and how it is filled with data is discussed with the aead_request_*
+ * functions.
+ *
+ * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
+ *		  authentication data / tag. That authentication data / tag
+ *		  must have the size defined by the crypto_aead_setauthsize
+ *		  invocation.
+ *
+ *
+ * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
+ *	   cipher operation performs the authentication of the data during the
+ *	   decryption operation. Therefore, the function returns this error if
+ *	   the authentication of the ciphertext was unsuccessful (i.e. the
+ *	   integrity of the ciphertext or the associated data was violated);
+ *	   < 0 if an error occurred.
+ */
+static inline int crypto_aead_decrypt(struct aead_request *req)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+	if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
+		return -ENOKEY;
+
+	if (req->cryptlen < crypto_aead_authsize(aead))
+		return -EINVAL;
+
+	return crypto_aead_alg(aead)->decrypt(req);
+}
+
+/**
+ * DOC: Asynchronous AEAD Request Handle
+ *
+ * The aead_request data structure contains all pointers to data required for
+ * the AEAD cipher operation. This includes the cipher handle (which can be
+ * used by multiple aead_request instances), pointer to plaintext and
+ * ciphertext, asynchronous callback function, etc. It acts as a handle to the
+ * aead_request_* API calls in a similar way as AEAD handle to the
+ * crypto_aead_* API calls.
+ */
+
+/**
+ * crypto_aead_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
+static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
+{
+	return tfm->reqsize;
+}
+
+/**
+ * aead_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing aead handle in the request
+ * data structure with a different one.
+ */
+static inline void aead_request_set_tfm(struct aead_request *req,
+					struct crypto_aead *tfm)
+{
+	req->base.tfm = crypto_aead_tfm(tfm);
+}
+
+/**
+ * aead_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the AEAD
+ * encrypt and decrypt API calls. During the allocation, the provided aead
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success, or NULL if out of memory
+ */
+static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
+						      gfp_t gfp)
+{
+	struct aead_request *req;
+
+	req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
+
+	if (likely(req))
+		aead_request_set_tfm(req, tfm);
+
+	return req;
+}
+
+/**
+ * aead_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
+static inline void aead_request_free(struct aead_request *req)
+{
+	kzfree(req);
+}
+
+/**
+ * aead_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ *	   CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ *	   increase the wait queue beyond the initial maximum size;
+ *	   CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ *	  crypto API, but provided to the callback function for it to use. Here,
+ *	  the caller can provide a reference to memory the callback function can
+ *	  operate on. As the callback function is invoked asynchronously to the
+ *	  related functionality, it may need to access data structures of the
+ *	  related functionality which can be referenced using this pointer. The
+ *	  callback function can access the memory via the "data" field in the
+ *	  crypto_async_request data structure provided to the callback function.
+ *
+ * Setting the callback function that is triggered once the cipher operation
+ * completes
+ *
+ * The callback function is registered with the aead_request handle and
+ * must comply with the following template::
+ *
+ *	void callback_function(struct crypto_async_request *req, int error)
+ */
+static inline void aead_request_set_callback(struct aead_request *req,
+					     u32 flags,
+					     crypto_completion_t compl,
+					     void *data)
+{
+	req->base.complete = compl;
+	req->base.data = data;
+	req->base.flags = flags;
+}
+
+/**
+ * aead_request_set_crypt - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @cryptlen: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ *      by crypto_aead_ivsize()
+ *
+ * Setting the source data and destination data scatter / gather lists which
+ * hold the associated data concatenated with the plaintext or ciphertext. See
+ * below for the authentication tag.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed - the source is the ciphertext and the destination is the plaintext.
+ *
+ * The memory structure for cipher operation has the following structure:
+ *
+ * - AEAD encryption input:  assoc data || plaintext
+ * - AEAD encryption output: assoc data || cipherntext || auth tag
+ * - AEAD decryption input:  assoc data || ciphertext || auth tag
+ * - AEAD decryption output: assoc data || plaintext
+ *
+ * Albeit the kernel requires the presence of the AAD buffer, however,
+ * the kernel does not fill the AAD buffer in the output case. If the
+ * caller wants to have that data buffer filled, the caller must either
+ * use an in-place cipher operation (i.e. same memory location for
+ * input/output memory location).
+ */
+static inline void aead_request_set_crypt(struct aead_request *req,
+					  struct scatterlist *src,
+					  struct scatterlist *dst,
+					  unsigned int cryptlen, u8 *iv)
+{
+	req->src = src;
+	req->dst = dst;
+	req->cryptlen = cryptlen;
+	req->iv = iv;
+}
+
+/**
+ * aead_request_set_ad - set associated data information
+ * @req: request handle
+ * @assoclen: number of bytes in associated data
+ *
+ * Setting the AD information.  This function sets the length of
+ * the associated data.
+ */
+static inline void aead_request_set_ad(struct aead_request *req,
+				       unsigned int assoclen)
+{
+	req->assoclen = assoclen;
+}
+
+#endif	/* _CRYPTO_AEAD_H */
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
new file mode 100644
index 0000000..852eaa9
--- /dev/null
+++ b/include/crypto/aes.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for AES algorithms
+ */
+
+#ifndef _CRYPTO_AES_H
+#define _CRYPTO_AES_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define AES_MIN_KEY_SIZE	16
+#define AES_MAX_KEY_SIZE	32
+#define AES_KEYSIZE_128		16
+#define AES_KEYSIZE_192		24
+#define AES_KEYSIZE_256		32
+#define AES_BLOCK_SIZE		16
+#define AES_MAX_KEYLENGTH	(15 * 16)
+#define AES_MAX_KEYLENGTH_U32	(AES_MAX_KEYLENGTH / sizeof(u32))
+
+/*
+ * Please ensure that the first two fields are 16-byte aligned
+ * relative to the start of the structure, i.e., don't move them!
+ */
+struct crypto_aes_ctx {
+	u32 key_enc[AES_MAX_KEYLENGTH_U32];
+	u32 key_dec[AES_MAX_KEYLENGTH_U32];
+	u32 key_length;
+};
+
+extern const u32 crypto_ft_tab[4][256];
+extern const u32 crypto_fl_tab[4][256];
+extern const u32 crypto_it_tab[4][256];
+extern const u32 crypto_il_tab[4][256];
+
+int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+		unsigned int key_len);
+int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
+		unsigned int key_len);
+#endif
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
new file mode 100644
index 0000000..b5e11de
--- /dev/null
+++ b/include/crypto/akcipher.h
@@ -0,0 +1,387 @@
+/*
+ * Public Key Encryption
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk <tadeusz.struk@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_AKCIPHER_H
+#define _CRYPTO_AKCIPHER_H
+#include <linux/crypto.h>
+
+/**
+ * struct akcipher_request - public key request
+ *
+ * @base:	Common attributes for async crypto requests
+ * @src:	Source data
+ * @dst:	Destination data
+ * @src_len:	Size of the input buffer
+ * @dst_len:	Size of the output buffer. It needs to be at least
+ *		as big as the expected result depending	on the operation
+ *		After operation it will be updated with the actual size of the
+ *		result.
+ *		In case of error where the dst sgl size was insufficient,
+ *		it will be updated to the size required for the operation.
+ * @__ctx:	Start of private context data
+ */
+struct akcipher_request {
+	struct crypto_async_request base;
+	struct scatterlist *src;
+	struct scatterlist *dst;
+	unsigned int src_len;
+	unsigned int dst_len;
+	void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
+ * struct crypto_akcipher - user-instantiated objects which encapsulate
+ * algorithms and core processing logic
+ *
+ * @base:	Common crypto API algorithm data structure
+ */
+struct crypto_akcipher {
+	struct crypto_tfm base;
+};
+
+/**
+ * struct akcipher_alg - generic public key algorithm
+ *
+ * @sign:	Function performs a sign operation as defined by public key
+ *		algorithm. In case of error, where the dst_len was insufficient,
+ *		the req->dst_len will be updated to the size required for the
+ *		operation
+ * @verify:	Function performs a sign operation as defined by public key
+ *		algorithm. In case of error, where the dst_len was insufficient,
+ *		the req->dst_len will be updated to the size required for the
+ *		operation
+ * @encrypt:	Function performs an encrypt operation as defined by public key
+ *		algorithm. In case of error, where the dst_len was insufficient,
+ *		the req->dst_len will be updated to the size required for the
+ *		operation
+ * @decrypt:	Function performs a decrypt operation as defined by public key
+ *		algorithm. In case of error, where the dst_len was insufficient,
+ *		the req->dst_len will be updated to the size required for the
+ *		operation
+ * @set_pub_key: Function invokes the algorithm specific set public key
+ *		function, which knows how to decode and interpret
+ *		the BER encoded public key
+ * @set_priv_key: Function invokes the algorithm specific set private key
+ *		function, which knows how to decode and interpret
+ *		the BER encoded private key
+ * @max_size:	Function returns dest buffer size required for a given key.
+ * @init:	Initialize the cryptographic transformation object.
+ *		This function is used to initialize the cryptographic
+ *		transformation object. This function is called only once at
+ *		the instantiation time, right after the transformation context
+ *		was allocated. In case the cryptographic hardware has some
+ *		special requirements which need to be handled by software, this
+ *		function shall check for the precise requirement of the
+ *		transformation and put any software fallbacks in place.
+ * @exit:	Deinitialize the cryptographic transformation object. This is a
+ *		counterpart to @init, used to remove various changes set in
+ *		@init.
+ *
+ * @reqsize:	Request context size required by algorithm implementation
+ * @base:	Common crypto API algorithm data structure
+ */
+struct akcipher_alg {
+	int (*sign)(struct akcipher_request *req);
+	int (*verify)(struct akcipher_request *req);
+	int (*encrypt)(struct akcipher_request *req);
+	int (*decrypt)(struct akcipher_request *req);
+	int (*set_pub_key)(struct crypto_akcipher *tfm, const void *key,
+			   unsigned int keylen);
+	int (*set_priv_key)(struct crypto_akcipher *tfm, const void *key,
+			    unsigned int keylen);
+	unsigned int (*max_size)(struct crypto_akcipher *tfm);
+	int (*init)(struct crypto_akcipher *tfm);
+	void (*exit)(struct crypto_akcipher *tfm);
+
+	unsigned int reqsize;
+	struct crypto_alg base;
+};
+
+/**
+ * DOC: Generic Public Key API
+ *
+ * The Public Key API is used with the algorithms of type
+ * CRYPTO_ALG_TYPE_AKCIPHER (listed as type "akcipher" in /proc/crypto)
+ */
+
+/**
+ * crypto_alloc_akcipher() - allocate AKCIPHER tfm handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      public key algorithm e.g. "rsa"
+ * @type: specifies the type of the algorithm
+ * @mask: specifies the mask for the algorithm
+ *
+ * Allocate a handle for public key algorithm. The returned struct
+ * crypto_akcipher is the handle that is required for any subsequent
+ * API invocation for the public key operations.
+ *
+ * Return: allocated handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
+					      u32 mask);
+
+static inline struct crypto_tfm *crypto_akcipher_tfm(
+	struct crypto_akcipher *tfm)
+{
+	return &tfm->base;
+}
+
+static inline struct akcipher_alg *__crypto_akcipher_alg(struct crypto_alg *alg)
+{
+	return container_of(alg, struct akcipher_alg, base);
+}
+
+static inline struct crypto_akcipher *__crypto_akcipher_tfm(
+	struct crypto_tfm *tfm)
+{
+	return container_of(tfm, struct crypto_akcipher, base);
+}
+
+static inline struct akcipher_alg *crypto_akcipher_alg(
+	struct crypto_akcipher *tfm)
+{
+	return __crypto_akcipher_alg(crypto_akcipher_tfm(tfm)->__crt_alg);
+}
+
+static inline unsigned int crypto_akcipher_reqsize(struct crypto_akcipher *tfm)
+{
+	return crypto_akcipher_alg(tfm)->reqsize;
+}
+
+static inline void akcipher_request_set_tfm(struct akcipher_request *req,
+					    struct crypto_akcipher *tfm)
+{
+	req->base.tfm = crypto_akcipher_tfm(tfm);
+}
+
+static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
+	struct akcipher_request *req)
+{
+	return __crypto_akcipher_tfm(req->base.tfm);
+}
+
+/**
+ * crypto_free_akcipher() - free AKCIPHER tfm handle
+ *
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ */
+static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
+{
+	crypto_destroy_tfm(tfm, crypto_akcipher_tfm(tfm));
+}
+
+/**
+ * akcipher_request_alloc() - allocates public key request
+ *
+ * @tfm:	AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ * @gfp:	allocation flags
+ *
+ * Return: allocated handle in case of success or NULL in case of an error.
+ */
+static inline struct akcipher_request *akcipher_request_alloc(
+	struct crypto_akcipher *tfm, gfp_t gfp)
+{
+	struct akcipher_request *req;
+
+	req = kmalloc(sizeof(*req) + crypto_akcipher_reqsize(tfm), gfp);
+	if (likely(req))
+		akcipher_request_set_tfm(req, tfm);
+
+	return req;
+}
+
+/**
+ * akcipher_request_free() - zeroize and free public key request
+ *
+ * @req:	request to free
+ */
+static inline void akcipher_request_free(struct akcipher_request *req)
+{
+	kzfree(req);
+}
+
+/**
+ * akcipher_request_set_callback() - Sets an asynchronous callback.
+ *
+ * Callback will be called when an asynchronous operation on a given
+ * request is finished.
+ *
+ * @req:	request that the callback will be set for
+ * @flgs:	specify for instance if the operation may backlog
+ * @cmpl:	callback which will be called
+ * @data:	private data used by the caller
+ */
+static inline void akcipher_request_set_callback(struct akcipher_request *req,
+						 u32 flgs,
+						 crypto_completion_t cmpl,
+						 void *data)
+{
+	req->base.complete = cmpl;
+	req->base.data = data;
+	req->base.flags = flgs;
+}
+
+/**
+ * akcipher_request_set_crypt() - Sets request parameters
+ *
+ * Sets parameters required by crypto operation
+ *
+ * @req:	public key request
+ * @src:	ptr to input scatter list
+ * @dst:	ptr to output scatter list
+ * @src_len:	size of the src input scatter list to be processed
+ * @dst_len:	size of the dst output scatter list
+ */
+static inline void akcipher_request_set_crypt(struct akcipher_request *req,
+					      struct scatterlist *src,
+					      struct scatterlist *dst,
+					      unsigned int src_len,
+					      unsigned int dst_len)
+{
+	req->src = src;
+	req->dst = dst;
+	req->src_len = src_len;
+	req->dst_len = dst_len;
+}
+
+/**
+ * crypto_akcipher_maxsize() - Get len for output buffer
+ *
+ * Function returns the dest buffer size required for a given key.
+ * Function assumes that the key is already set in the transformation. If this
+ * function is called without a setkey or with a failed setkey, you will end up
+ * in a NULL dereference.
+ *
+ * @tfm:	AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ */
+static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
+{
+	struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+	return alg->max_size(tfm);
+}
+
+/**
+ * crypto_akcipher_encrypt() - Invoke public key encrypt operation
+ *
+ * Function invokes the specific public key encrypt operation for a given
+ * public key algorithm
+ *
+ * @req:	asymmetric key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+	return alg->encrypt(req);
+}
+
+/**
+ * crypto_akcipher_decrypt() - Invoke public key decrypt operation
+ *
+ * Function invokes the specific public key decrypt operation for a given
+ * public key algorithm
+ *
+ * @req:	asymmetric key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+	return alg->decrypt(req);
+}
+
+/**
+ * crypto_akcipher_sign() - Invoke public key sign operation
+ *
+ * Function invokes the specific public key sign operation for a given
+ * public key algorithm
+ *
+ * @req:	asymmetric key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_sign(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+	return alg->sign(req);
+}
+
+/**
+ * crypto_akcipher_verify() - Invoke public key verify operation
+ *
+ * Function invokes the specific public key verify operation for a given
+ * public key algorithm
+ *
+ * @req:	asymmetric key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_verify(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+	return alg->verify(req);
+}
+
+/**
+ * crypto_akcipher_set_pub_key() - Invoke set public key operation
+ *
+ * Function invokes the algorithm specific set key function, which knows
+ * how to decode and interpret the encoded key
+ *
+ * @tfm:	tfm handle
+ * @key:	BER encoded public key
+ * @keylen:	length of the key
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_set_pub_key(struct crypto_akcipher *tfm,
+					      const void *key,
+					      unsigned int keylen)
+{
+	struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+	return alg->set_pub_key(tfm, key, keylen);
+}
+
+/**
+ * crypto_akcipher_set_priv_key() - Invoke set private key operation
+ *
+ * Function invokes the algorithm specific set key function, which knows
+ * how to decode and interpret the encoded key
+ *
+ * @tfm:	tfm handle
+ * @key:	BER encoded private key
+ * @keylen:	length of the key
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_set_priv_key(struct crypto_akcipher *tfm,
+					       const void *key,
+					       unsigned int keylen)
+{
+	struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+	return alg->set_priv_key(tfm, key, keylen);
+}
+#endif
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
new file mode 100644
index 0000000..bd5e8cc
--- /dev/null
+++ b/include/crypto/algapi.h
@@ -0,0 +1,428 @@
+/*
+ * Cryptographic API for algorithms (i.e., low-level API).
+ *
+ * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) 
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ALGAPI_H
+#define _CRYPTO_ALGAPI_H
+
+#include <linux/crypto.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+
+/*
+ * Maximum values for blocksize and alignmask, used to allocate
+ * static buffers that are big enough for any combination of
+ * ciphers and architectures.
+ */
+#define MAX_CIPHER_BLOCKSIZE		16
+#define MAX_CIPHER_ALIGNMASK		15
+
+struct crypto_aead;
+struct crypto_instance;
+struct module;
+struct rtattr;
+struct seq_file;
+
+struct crypto_type {
+	unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
+	unsigned int (*extsize)(struct crypto_alg *alg);
+	int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
+	int (*init_tfm)(struct crypto_tfm *tfm);
+	void (*show)(struct seq_file *m, struct crypto_alg *alg);
+	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
+	void (*free)(struct crypto_instance *inst);
+
+	unsigned int type;
+	unsigned int maskclear;
+	unsigned int maskset;
+	unsigned int tfmsize;
+};
+
+struct crypto_instance {
+	struct crypto_alg alg;
+
+	struct crypto_template *tmpl;
+	struct hlist_node list;
+
+	void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+struct crypto_template {
+	struct list_head list;
+	struct hlist_head instances;
+	struct module *module;
+
+	struct crypto_instance *(*alloc)(struct rtattr **tb);
+	void (*free)(struct crypto_instance *inst);
+	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
+
+	char name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct crypto_spawn {
+	struct list_head list;
+	struct crypto_alg *alg;
+	struct crypto_instance *inst;
+	const struct crypto_type *frontend;
+	u32 mask;
+};
+
+struct crypto_queue {
+	struct list_head list;
+	struct list_head *backlog;
+
+	unsigned int qlen;
+	unsigned int max_qlen;
+};
+
+struct scatter_walk {
+	struct scatterlist *sg;
+	unsigned int offset;
+};
+
+struct blkcipher_walk {
+	union {
+		struct {
+			struct page *page;
+			unsigned long offset;
+		} phys;
+
+		struct {
+			u8 *page;
+			u8 *addr;
+		} virt;
+	} src, dst;
+
+	struct scatter_walk in;
+	unsigned int nbytes;
+
+	struct scatter_walk out;
+	unsigned int total;
+
+	void *page;
+	u8 *buffer;
+	u8 *iv;
+	unsigned int ivsize;
+
+	int flags;
+	unsigned int walk_blocksize;
+	unsigned int cipher_blocksize;
+	unsigned int alignmask;
+};
+
+struct ablkcipher_walk {
+	struct {
+		struct page *page;
+		unsigned int offset;
+	} src, dst;
+
+	struct scatter_walk	in;
+	unsigned int		nbytes;
+	struct scatter_walk	out;
+	unsigned int		total;
+	struct list_head	buffers;
+	u8			*iv_buffer;
+	u8			*iv;
+	int			flags;
+	unsigned int		blocksize;
+};
+
+extern const struct crypto_type crypto_ablkcipher_type;
+extern const struct crypto_type crypto_blkcipher_type;
+
+void crypto_mod_put(struct crypto_alg *alg);
+
+int crypto_register_template(struct crypto_template *tmpl);
+void crypto_unregister_template(struct crypto_template *tmpl);
+struct crypto_template *crypto_lookup_template(const char *name);
+
+int crypto_register_instance(struct crypto_template *tmpl,
+			     struct crypto_instance *inst);
+int crypto_unregister_instance(struct crypto_instance *inst);
+
+int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
+		      struct crypto_instance *inst, u32 mask);
+int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
+		       struct crypto_instance *inst,
+		       const struct crypto_type *frontend);
+int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
+		      u32 type, u32 mask);
+
+void crypto_drop_spawn(struct crypto_spawn *spawn);
+struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
+				    u32 mask);
+void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
+
+static inline void crypto_set_spawn(struct crypto_spawn *spawn,
+				    struct crypto_instance *inst)
+{
+	spawn->inst = inst;
+}
+
+struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
+int crypto_check_attr_type(struct rtattr **tb, u32 type);
+const char *crypto_attr_alg_name(struct rtattr *rta);
+struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
+				    const struct crypto_type *frontend,
+				    u32 type, u32 mask);
+
+static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
+						 u32 type, u32 mask)
+{
+	return crypto_attr_alg2(rta, NULL, type, mask);
+}
+
+int crypto_attr_u32(struct rtattr *rta, u32 *num);
+int crypto_inst_setname(struct crypto_instance *inst, const char *name,
+			struct crypto_alg *alg);
+void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
+			     unsigned int head);
+struct crypto_instance *crypto_alloc_instance(const char *name,
+					      struct crypto_alg *alg);
+
+void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
+int crypto_enqueue_request(struct crypto_queue *queue,
+			   struct crypto_async_request *request);
+struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
+int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
+static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
+{
+	return queue->qlen;
+}
+
+void crypto_inc(u8 *a, unsigned int size);
+void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
+
+static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
+{
+	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+	    __builtin_constant_p(size) &&
+	    (size % sizeof(unsigned long)) == 0) {
+		unsigned long *d = (unsigned long *)dst;
+		unsigned long *s = (unsigned long *)src;
+
+		while (size > 0) {
+			*d++ ^= *s++;
+			size -= sizeof(unsigned long);
+		}
+	} else {
+		__crypto_xor(dst, dst, src, size);
+	}
+}
+
+static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
+				  unsigned int size)
+{
+	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+	    __builtin_constant_p(size) &&
+	    (size % sizeof(unsigned long)) == 0) {
+		unsigned long *d = (unsigned long *)dst;
+		unsigned long *s1 = (unsigned long *)src1;
+		unsigned long *s2 = (unsigned long *)src2;
+
+		while (size > 0) {
+			*d++ = *s1++ ^ *s2++;
+			size -= sizeof(unsigned long);
+		}
+	} else {
+		__crypto_xor(dst, src1, src2, size);
+	}
+}
+
+int blkcipher_walk_done(struct blkcipher_desc *desc,
+			struct blkcipher_walk *walk, int err);
+int blkcipher_walk_virt(struct blkcipher_desc *desc,
+			struct blkcipher_walk *walk);
+int blkcipher_walk_phys(struct blkcipher_desc *desc,
+			struct blkcipher_walk *walk);
+int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
+			      struct blkcipher_walk *walk,
+			      unsigned int blocksize);
+int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
+				   struct blkcipher_walk *walk,
+				   struct crypto_aead *tfm,
+				   unsigned int blocksize);
+
+int ablkcipher_walk_done(struct ablkcipher_request *req,
+			 struct ablkcipher_walk *walk, int err);
+int ablkcipher_walk_phys(struct ablkcipher_request *req,
+			 struct ablkcipher_walk *walk);
+void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
+
+static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
+{
+	return PTR_ALIGN(crypto_tfm_ctx(tfm),
+			 crypto_tfm_alg_alignmask(tfm) + 1);
+}
+
+static inline struct crypto_instance *crypto_tfm_alg_instance(
+	struct crypto_tfm *tfm)
+{
+	return container_of(tfm->__crt_alg, struct crypto_instance, alg);
+}
+
+static inline void *crypto_instance_ctx(struct crypto_instance *inst)
+{
+	return inst->__ctx;
+}
+
+static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
+	struct crypto_ablkcipher *tfm)
+{
+	return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
+}
+
+static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
+{
+	return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
+{
+	return crypto_tfm_ctx_aligned(&tfm->base);
+}
+
+static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
+	struct crypto_spawn *spawn)
+{
+	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
+	u32 mask = CRYPTO_ALG_TYPE_MASK;
+
+	return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
+}
+
+static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
+{
+	return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
+{
+	return crypto_tfm_ctx_aligned(&tfm->base);
+}
+
+static inline struct crypto_cipher *crypto_spawn_cipher(
+	struct crypto_spawn *spawn)
+{
+	u32 type = CRYPTO_ALG_TYPE_CIPHER;
+	u32 mask = CRYPTO_ALG_TYPE_MASK;
+
+	return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
+}
+
+static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
+{
+	return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
+}
+
+static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
+				       struct scatterlist *dst,
+				       struct scatterlist *src,
+				       unsigned int nbytes)
+{
+	walk->in.sg = src;
+	walk->out.sg = dst;
+	walk->total = nbytes;
+}
+
+static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
+					struct scatterlist *dst,
+					struct scatterlist *src,
+					unsigned int nbytes)
+{
+	walk->in.sg = src;
+	walk->out.sg = dst;
+	walk->total = nbytes;
+	INIT_LIST_HEAD(&walk->buffers);
+}
+
+static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
+{
+	if (unlikely(!list_empty(&walk->buffers)))
+		__ablkcipher_walk_complete(walk);
+}
+
+static inline struct crypto_async_request *crypto_get_backlog(
+	struct crypto_queue *queue)
+{
+	return queue->backlog == &queue->list ? NULL :
+	       container_of(queue->backlog, struct crypto_async_request, list);
+}
+
+static inline int ablkcipher_enqueue_request(struct crypto_queue *queue,
+					     struct ablkcipher_request *request)
+{
+	return crypto_enqueue_request(queue, &request->base);
+}
+
+static inline struct ablkcipher_request *ablkcipher_dequeue_request(
+	struct crypto_queue *queue)
+{
+	return ablkcipher_request_cast(crypto_dequeue_request(queue));
+}
+
+static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
+{
+	return req->__ctx;
+}
+
+static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
+					  struct crypto_ablkcipher *tfm)
+{
+	return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
+}
+
+static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
+						     u32 type, u32 mask)
+{
+	return crypto_attr_alg(tb[1], type, mask);
+}
+
+static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
+{
+	return (type ^ off) & mask & off;
+}
+
+/*
+ * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
+ * Otherwise returns zero.
+ */
+static inline int crypto_requires_sync(u32 type, u32 mask)
+{
+	return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC);
+}
+
+noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
+
+/**
+ * crypto_memneq - Compare two areas of memory without leaking
+ *		   timing information.
+ *
+ * @a: One area of memory
+ * @b: Another area of memory
+ * @size: The size of the area.
+ *
+ * Returns 0 when data is equal, 1 otherwise.
+ */
+static inline int crypto_memneq(const void *a, const void *b, size_t size)
+{
+	return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
+}
+
+static inline void crypto_yield(u32 flags)
+{
+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
+	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
+		cond_resched();
+#endif
+}
+
+#endif	/* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h
new file mode 100644
index 0000000..6775059
--- /dev/null
+++ b/include/crypto/authenc.h
@@ -0,0 +1,37 @@
+/*
+ * Authenc: Simple AEAD wrapper for IPsec
+ *
+ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_AUTHENC_H
+#define _CRYPTO_AUTHENC_H
+
+#include <linux/types.h>
+
+enum {
+	CRYPTO_AUTHENC_KEYA_UNSPEC,
+	CRYPTO_AUTHENC_KEYA_PARAM,
+};
+
+struct crypto_authenc_key_param {
+	__be32 enckeylen;
+};
+
+struct crypto_authenc_keys {
+	const u8 *authkey;
+	const u8 *enckey;
+
+	unsigned int authkeylen;
+	unsigned int enckeylen;
+};
+
+int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+			       unsigned int keylen);
+
+#endif	/* _CRYPTO_AUTHENC_H */
diff --git a/include/crypto/b128ops.h b/include/crypto/b128ops.h
new file mode 100644
index 0000000..0b8e6bc
--- /dev/null
+++ b/include/crypto/b128ops.h
@@ -0,0 +1,80 @@
+/* b128ops.h - common 128-bit block operations
+ *
+ * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
+ * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based on Dr Brian Gladman's (GPL'd) work published at
+ * http://fp.gladman.plus.com/cryptography_technology/index.htm
+ * See the original copyright notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.   All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+   1. distributions of this source code include the above copyright
+      notice, this list of conditions and the following disclaimer;
+
+   2. distributions in binary form include the above copyright
+      notice, this list of conditions and the following disclaimer
+      in the documentation and/or other associated materials;
+
+   3. the copyright holder's name is not used to endorse products
+      built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 13/06/2006
+*/
+
+#ifndef _CRYPTO_B128OPS_H
+#define _CRYPTO_B128OPS_H
+
+#include <linux/types.h>
+
+typedef struct {
+	u64 a, b;
+} u128;
+
+typedef struct {
+	__be64 a, b;
+} be128;
+
+typedef struct {
+	__le64 b, a;
+} le128;
+
+static inline void u128_xor(u128 *r, const u128 *p, const u128 *q)
+{
+	r->a = p->a ^ q->a;
+	r->b = p->b ^ q->b;
+}
+
+static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
+{
+	u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
+}
+
+static inline void le128_xor(le128 *r, const le128 *p, const le128 *q)
+{
+	u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
+}
+
+#endif /* _CRYPTO_B128OPS_H */
diff --git a/include/crypto/blowfish.h b/include/crypto/blowfish.h
new file mode 100644
index 0000000..9b38467
--- /dev/null
+++ b/include/crypto/blowfish.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for blowfish algorithms
+ */
+
+#ifndef _CRYPTO_BLOWFISH_H
+#define _CRYPTO_BLOWFISH_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define BF_BLOCK_SIZE 8
+#define BF_MIN_KEY_SIZE 4
+#define BF_MAX_KEY_SIZE 56
+
+struct bf_ctx {
+	u32 p[18];
+	u32 s[1024];
+};
+
+int blowfish_setkey(struct crypto_tfm *tfm, const u8 *key,
+		    unsigned int key_len);
+
+#endif
diff --git a/include/crypto/cast5.h b/include/crypto/cast5.h
new file mode 100644
index 0000000..3d4ed4e
--- /dev/null
+++ b/include/crypto/cast5.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CRYPTO_CAST5_H
+#define _CRYPTO_CAST5_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <crypto/cast_common.h>
+
+#define CAST5_BLOCK_SIZE 8
+#define CAST5_MIN_KEY_SIZE 5
+#define CAST5_MAX_KEY_SIZE 16
+
+struct cast5_ctx {
+	u32 Km[16];
+	u8 Kr[16];
+	int rr;	/* rr ? rounds = 12 : rounds = 16; (rfc 2144) */
+};
+
+int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
+
+void __cast5_encrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
+void __cast5_decrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
+
+#endif
diff --git a/include/crypto/cast6.h b/include/crypto/cast6.h
new file mode 100644
index 0000000..c71f6ef
--- /dev/null
+++ b/include/crypto/cast6.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CRYPTO_CAST6_H
+#define _CRYPTO_CAST6_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <crypto/cast_common.h>
+
+#define CAST6_BLOCK_SIZE 16
+#define CAST6_MIN_KEY_SIZE 16
+#define CAST6_MAX_KEY_SIZE 32
+
+struct cast6_ctx {
+	u32 Km[12][4];
+	u8 Kr[12][4];
+};
+
+int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key,
+		   unsigned int keylen, u32 *flags);
+int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
+
+void __cast6_encrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
+void __cast6_decrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
+
+#endif
diff --git a/include/crypto/cast_common.h b/include/crypto/cast_common.h
new file mode 100644
index 0000000..b900902
--- /dev/null
+++ b/include/crypto/cast_common.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CRYPTO_CAST_COMMON_H
+#define _CRYPTO_CAST_COMMON_H
+
+extern const u32 cast_s1[256];
+extern const u32 cast_s2[256];
+extern const u32 cast_s3[256];
+extern const u32 cast_s4[256];
+
+#endif
diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h
new file mode 100644
index 0000000..f5b8bfc
--- /dev/null
+++ b/include/crypto/cbc.h
@@ -0,0 +1,146 @@
+/*
+ * CBC: Cipher Block Chaining mode
+ *
+ * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_CBC_H
+#define _CRYPTO_CBC_H
+
+#include <crypto/internal/skcipher.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+static inline int crypto_cbc_encrypt_segment(
+	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
+	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
+{
+	unsigned int bsize = crypto_skcipher_blocksize(tfm);
+	unsigned int nbytes = walk->nbytes;
+	u8 *src = walk->src.virt.addr;
+	u8 *dst = walk->dst.virt.addr;
+	u8 *iv = walk->iv;
+
+	do {
+		crypto_xor(iv, src, bsize);
+		fn(tfm, iv, dst);
+		memcpy(iv, dst, bsize);
+
+		src += bsize;
+		dst += bsize;
+	} while ((nbytes -= bsize) >= bsize);
+
+	return nbytes;
+}
+
+static inline int crypto_cbc_encrypt_inplace(
+	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
+	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
+{
+	unsigned int bsize = crypto_skcipher_blocksize(tfm);
+	unsigned int nbytes = walk->nbytes;
+	u8 *src = walk->src.virt.addr;
+	u8 *iv = walk->iv;
+
+	do {
+		crypto_xor(src, iv, bsize);
+		fn(tfm, src, src);
+		iv = src;
+
+		src += bsize;
+	} while ((nbytes -= bsize) >= bsize);
+
+	memcpy(walk->iv, iv, bsize);
+
+	return nbytes;
+}
+
+static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req,
+					  void (*fn)(struct crypto_skcipher *,
+						     const u8 *, u8 *))
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct skcipher_walk walk;
+	int err;
+
+	err = skcipher_walk_virt(&walk, req, false);
+
+	while (walk.nbytes) {
+		if (walk.src.virt.addr == walk.dst.virt.addr)
+			err = crypto_cbc_encrypt_inplace(&walk, tfm, fn);
+		else
+			err = crypto_cbc_encrypt_segment(&walk, tfm, fn);
+		err = skcipher_walk_done(&walk, err);
+	}
+
+	return err;
+}
+
+static inline int crypto_cbc_decrypt_segment(
+	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
+	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
+{
+	unsigned int bsize = crypto_skcipher_blocksize(tfm);
+	unsigned int nbytes = walk->nbytes;
+	u8 *src = walk->src.virt.addr;
+	u8 *dst = walk->dst.virt.addr;
+	u8 *iv = walk->iv;
+
+	do {
+		fn(tfm, src, dst);
+		crypto_xor(dst, iv, bsize);
+		iv = src;
+
+		src += bsize;
+		dst += bsize;
+	} while ((nbytes -= bsize) >= bsize);
+
+	memcpy(walk->iv, iv, bsize);
+
+	return nbytes;
+}
+
+static inline int crypto_cbc_decrypt_inplace(
+	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
+	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
+{
+	unsigned int bsize = crypto_skcipher_blocksize(tfm);
+	unsigned int nbytes = walk->nbytes;
+	u8 *src = walk->src.virt.addr;
+	u8 last_iv[bsize];
+
+	/* Start of the last block. */
+	src += nbytes - (nbytes & (bsize - 1)) - bsize;
+	memcpy(last_iv, src, bsize);
+
+	for (;;) {
+		fn(tfm, src, src);
+		if ((nbytes -= bsize) < bsize)
+			break;
+		crypto_xor(src, src - bsize, bsize);
+		src -= bsize;
+	}
+
+	crypto_xor(src, walk->iv, bsize);
+	memcpy(walk->iv, last_iv, bsize);
+
+	return nbytes;
+}
+
+static inline int crypto_cbc_decrypt_blocks(
+	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
+	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
+{
+	if (walk->src.virt.addr == walk->dst.virt.addr)
+		return crypto_cbc_decrypt_inplace(walk, tfm, fn);
+	else
+		return crypto_cbc_decrypt_segment(walk, tfm, fn);
+}
+
+#endif	/* _CRYPTO_CBC_H */
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
new file mode 100644
index 0000000..b83d660
--- /dev/null
+++ b/include/crypto/chacha20.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for the ChaCha20 algorithm
+ */
+
+#ifndef _CRYPTO_CHACHA20_H
+#define _CRYPTO_CHACHA20_H
+
+#include <crypto/skcipher.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define CHACHA20_IV_SIZE	16
+#define CHACHA20_KEY_SIZE	32
+#define CHACHA20_BLOCK_SIZE	64
+#define CHACHA20_BLOCK_WORDS	(CHACHA20_BLOCK_SIZE / sizeof(u32))
+
+struct chacha20_ctx {
+	u32 key[8];
+};
+
+void chacha20_block(u32 *state, u32 *stream);
+void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
+int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
+			   unsigned int keysize);
+int crypto_chacha20_crypt(struct skcipher_request *req);
+
+#endif
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
new file mode 100644
index 0000000..1e64f35
--- /dev/null
+++ b/include/crypto/cryptd.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Software async crypto daemon
+ *
+ * Added AEAD support to cryptd.
+ *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
+ *             Adrian Hoban <adrian.hoban@intel.com>
+ *             Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *             Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *    Copyright (c) 2010, Intel Corporation.
+ */
+
+#ifndef _CRYPTO_CRYPT_H
+#define _CRYPTO_CRYPT_H
+
+#include <linux/kernel.h>
+#include <crypto/aead.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+
+struct cryptd_ablkcipher {
+	struct crypto_ablkcipher base;
+};
+
+static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast(
+	struct crypto_ablkcipher *tfm)
+{
+	return (struct cryptd_ablkcipher *)tfm;
+}
+
+/* alg_name should be algorithm to be cryptd-ed */
+struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
+						  u32 type, u32 mask);
+struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
+bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm);
+void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
+
+struct cryptd_skcipher {
+	struct crypto_skcipher base;
+};
+
+struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
+					      u32 type, u32 mask);
+struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm);
+/* Must be called without moving CPUs. */
+bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm);
+void cryptd_free_skcipher(struct cryptd_skcipher *tfm);
+
+struct cryptd_ahash {
+	struct crypto_ahash base;
+};
+
+static inline struct cryptd_ahash *__cryptd_ahash_cast(
+	struct crypto_ahash *tfm)
+{
+	return (struct cryptd_ahash *)tfm;
+}
+
+/* alg_name should be algorithm to be cryptd-ed */
+struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
+					u32 type, u32 mask);
+struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm);
+struct shash_desc *cryptd_shash_desc(struct ahash_request *req);
+/* Must be called without moving CPUs. */
+bool cryptd_ahash_queued(struct cryptd_ahash *tfm);
+void cryptd_free_ahash(struct cryptd_ahash *tfm);
+
+struct cryptd_aead {
+	struct crypto_aead base;
+};
+
+static inline struct cryptd_aead *__cryptd_aead_cast(
+	struct crypto_aead *tfm)
+{
+	return (struct cryptd_aead *)tfm;
+}
+
+struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
+					  u32 type, u32 mask);
+
+struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm);
+/* Must be called without moving CPUs. */
+bool cryptd_aead_queued(struct cryptd_aead *tfm);
+
+void cryptd_free_aead(struct cryptd_aead *tfm);
+
+#endif
diff --git a/include/crypto/crypto_wq.h b/include/crypto/crypto_wq.h
new file mode 100644
index 0000000..2311474
--- /dev/null
+++ b/include/crypto/crypto_wq.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef CRYPTO_WQ_H
+#define CRYPTO_WQ_H
+
+#include <linux/workqueue.h>
+
+extern struct workqueue_struct *kcrypto_wq;
+#endif
diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h
new file mode 100644
index 0000000..4180fc0
--- /dev/null
+++ b/include/crypto/ctr.h
@@ -0,0 +1,20 @@
+/*
+ * CTR: Counter mode
+ *
+ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_CTR_H
+#define _CRYPTO_CTR_H
+
+#define CTR_RFC3686_NONCE_SIZE 4
+#define CTR_RFC3686_IV_SIZE 8
+#define CTR_RFC3686_BLOCK_SIZE 16
+
+#endif  /* _CRYPTO_CTR_H */
diff --git a/include/crypto/des.h b/include/crypto/des.h
new file mode 100644
index 0000000..d4094d5
--- /dev/null
+++ b/include/crypto/des.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* 
+ * DES & Triple DES EDE Cipher Algorithms.
+ */
+
+#ifndef __CRYPTO_DES_H
+#define __CRYPTO_DES_H
+
+#define DES_KEY_SIZE		8
+#define DES_EXPKEY_WORDS	32
+#define DES_BLOCK_SIZE		8
+
+#define DES3_EDE_KEY_SIZE	(3 * DES_KEY_SIZE)
+#define DES3_EDE_EXPKEY_WORDS	(3 * DES_EXPKEY_WORDS)
+#define DES3_EDE_BLOCK_SIZE	DES_BLOCK_SIZE
+
+
+extern unsigned long des_ekey(u32 *pe, const u8 *k);
+
+extern int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
+			     unsigned int keylen);
+
+#endif /* __CRYPTO_DES_H */
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
new file mode 100644
index 0000000..7e0dad9
--- /dev/null
+++ b/include/crypto/dh.h
@@ -0,0 +1,91 @@
+/*
+ * Diffie-Hellman secret to be used with kpp API along with helper functions
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_DH_
+#define _CRYPTO_DH_
+
+/**
+ * DOC: DH Helper Functions
+ *
+ * To use DH with the KPP cipher API, the following data structure and
+ * functions should be used.
+ *
+ * To use DH with KPP, the following functions should be used to operate on
+ * a DH private key. The packet private key that can be set with
+ * the KPP API function call of crypto_kpp_set_secret.
+ */
+
+/**
+ * struct dh - define a DH private key
+ *
+ * @key:	Private DH key
+ * @p:		Diffie-Hellman parameter P
+ * @q:		Diffie-Hellman parameter Q
+ * @g:		Diffie-Hellman generator G
+ * @key_size:	Size of the private DH key
+ * @p_size:	Size of DH parameter P
+ * @q_size:	Size of DH parameter Q
+ * @g_size:	Size of DH generator G
+ */
+struct dh {
+	void *key;
+	void *p;
+	void *q;
+	void *g;
+	unsigned int key_size;
+	unsigned int p_size;
+	unsigned int q_size;
+	unsigned int g_size;
+};
+
+/**
+ * crypto_dh_key_len() - Obtain the size of the private DH key
+ * @params:	private DH key
+ *
+ * This function returns the packet DH key size. A caller can use that
+ * with the provided DH private key reference to obtain the required
+ * memory size to hold a packet key.
+ *
+ * Return: size of the key in bytes
+ */
+unsigned int crypto_dh_key_len(const struct dh *params);
+
+/**
+ * crypto_dh_encode_key() - encode the private key
+ * @buf:	Buffer allocated by the caller to hold the packet DH
+ *		private key. The buffer should be at least crypto_dh_key_len
+ *		bytes in size.
+ * @len:	Length of the packet private key buffer
+ * @params:	Buffer with the caller-specified private key
+ *
+ * The DH implementations operate on a packet representation of the private
+ * key.
+ *
+ * Return:	-EINVAL if buffer has insufficient size, 0 on success
+ */
+int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params);
+
+/**
+ * crypto_dh_decode_key() - decode a private key
+ * @buf:	Buffer holding a packet key that should be decoded
+ * @len:	Length of the packet private key buffer
+ * @params:	Buffer allocated by the caller that is filled with the
+ *		unpacked DH private key.
+ *
+ * The unpacking obtains the private key by pointing @p to the correct location
+ * in @buf. Thus, both pointers refer to the same memory.
+ *
+ * Return:	-EINVAL if buffer has insufficient size, 0 on success
+ */
+int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params);
+
+#endif
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
new file mode 100644
index 0000000..3fb581b
--- /dev/null
+++ b/include/crypto/drbg.h
@@ -0,0 +1,283 @@
+/*
+ * DRBG based on NIST SP800-90A
+ *
+ * Copyright Stephan Mueller <smueller@chronox.de>, 2014
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, and the entire permission notice in its entirety,
+ *    including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions.  (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
+ * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef _DRBG_H
+#define _DRBG_H
+
+
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/slab.h>
+#include <crypto/internal/rng.h>
+#include <crypto/rng.h>
+#include <linux/fips.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+
+/*
+ * Concatenation Helper and string operation helper
+ *
+ * SP800-90A requires the concatenation of different data. To avoid copying
+ * buffers around or allocate additional memory, the following data structure
+ * is used to point to the original memory with its size. In addition, it
+ * is used to build a linked list. The linked list defines the concatenation
+ * of individual buffers. The order of memory block referenced in that
+ * linked list determines the order of concatenation.
+ */
+struct drbg_string {
+	const unsigned char *buf;
+	size_t len;
+	struct list_head list;
+};
+
+static inline void drbg_string_fill(struct drbg_string *string,
+				    const unsigned char *buf, size_t len)
+{
+	string->buf = buf;
+	string->len = len;
+	INIT_LIST_HEAD(&string->list);
+}
+
+struct drbg_state;
+typedef uint32_t drbg_flag_t;
+
+struct drbg_core {
+	drbg_flag_t flags;	/* flags for the cipher */
+	__u8 statelen;		/* maximum state length */
+	__u8 blocklen_bytes;	/* block size of output in bytes */
+	char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */
+	 /* kernel crypto API backend cipher name */
+	char backend_cra_name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct drbg_state_ops {
+	int (*update)(struct drbg_state *drbg, struct list_head *seed,
+		      int reseed);
+	int (*generate)(struct drbg_state *drbg,
+			unsigned char *buf, unsigned int buflen,
+			struct list_head *addtl);
+	int (*crypto_init)(struct drbg_state *drbg);
+	int (*crypto_fini)(struct drbg_state *drbg);
+
+};
+
+struct drbg_test_data {
+	struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */
+};
+
+struct drbg_state {
+	struct mutex drbg_mutex;	/* lock around DRBG */
+	unsigned char *V;	/* internal state 10.1.1.1 1a) */
+	unsigned char *Vbuf;
+	/* hash: static value 10.1.1.1 1b) hmac / ctr: key */
+	unsigned char *C;
+	unsigned char *Cbuf;
+	/* Number of RNG requests since last reseed -- 10.1.1.1 1c) */
+	size_t reseed_ctr;
+	size_t reseed_threshold;
+	 /* some memory the DRBG can use for its operation */
+	unsigned char *scratchpad;
+	unsigned char *scratchpadbuf;
+	void *priv_data;	/* Cipher handle */
+
+	struct crypto_skcipher *ctr_handle;	/* CTR mode cipher handle */
+	struct skcipher_request *ctr_req;	/* CTR mode request handle */
+	__u8 *outscratchpadbuf;			/* CTR mode output scratchpad */
+        __u8 *outscratchpad;			/* CTR mode aligned outbuf */
+	struct crypto_wait ctr_wait;		/* CTR mode async wait obj */
+	struct scatterlist sg_in, sg_out;	/* CTR mode SGLs */
+
+	bool seeded;		/* DRBG fully seeded? */
+	bool pr;		/* Prediction resistance enabled? */
+	struct work_struct seed_work;	/* asynchronous seeding support */
+	struct crypto_rng *jent;
+	const struct drbg_state_ops *d_ops;
+	const struct drbg_core *core;
+	struct drbg_string test_data;
+	struct random_ready_callback random_ready;
+};
+
+static inline __u8 drbg_statelen(struct drbg_state *drbg)
+{
+	if (drbg && drbg->core)
+		return drbg->core->statelen;
+	return 0;
+}
+
+static inline __u8 drbg_blocklen(struct drbg_state *drbg)
+{
+	if (drbg && drbg->core)
+		return drbg->core->blocklen_bytes;
+	return 0;
+}
+
+static inline __u8 drbg_keylen(struct drbg_state *drbg)
+{
+	if (drbg && drbg->core)
+		return (drbg->core->statelen - drbg->core->blocklen_bytes);
+	return 0;
+}
+
+static inline size_t drbg_max_request_bytes(struct drbg_state *drbg)
+{
+	/* SP800-90A requires the limit 2**19 bits, but we return bytes */
+	return (1 << 16);
+}
+
+static inline size_t drbg_max_addtl(struct drbg_state *drbg)
+{
+	/* SP800-90A requires 2**35 bytes additional info str / pers str */
+#if (__BITS_PER_LONG == 32)
+	/*
+	 * SP800-90A allows smaller maximum numbers to be returned -- we
+	 * return SIZE_MAX - 1 to allow the verification of the enforcement
+	 * of this value in drbg_healthcheck_sanity.
+	 */
+	return (SIZE_MAX - 1);
+#else
+	return (1UL<<35);
+#endif
+}
+
+static inline size_t drbg_max_requests(struct drbg_state *drbg)
+{
+	/* SP800-90A requires 2**48 maximum requests before reseeding */
+#if (__BITS_PER_LONG == 32)
+	return SIZE_MAX;
+#else
+	return (1UL<<48);
+#endif
+}
+
+/*
+ * This is a wrapper to the kernel crypto API function of
+ * crypto_rng_generate() to allow the caller to provide additional data.
+ *
+ * @drng DRBG handle -- see crypto_rng_get_bytes
+ * @outbuf output buffer -- see crypto_rng_get_bytes
+ * @outlen length of output buffer -- see crypto_rng_get_bytes
+ * @addtl_input additional information string input buffer
+ * @addtllen length of additional information string buffer
+ *
+ * return
+ *	see crypto_rng_get_bytes
+ */
+static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng,
+			unsigned char *outbuf, unsigned int outlen,
+			struct drbg_string *addtl)
+{
+	return crypto_rng_generate(drng, addtl->buf, addtl->len,
+				   outbuf, outlen);
+}
+
+/*
+ * TEST code
+ *
+ * This is a wrapper to the kernel crypto API function of
+ * crypto_rng_generate() to allow the caller to provide additional data and
+ * allow furnishing of test_data
+ *
+ * @drng DRBG handle -- see crypto_rng_get_bytes
+ * @outbuf output buffer -- see crypto_rng_get_bytes
+ * @outlen length of output buffer -- see crypto_rng_get_bytes
+ * @addtl_input additional information string input buffer
+ * @addtllen length of additional information string buffer
+ * @test_data filled test data
+ *
+ * return
+ *	see crypto_rng_get_bytes
+ */
+static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng,
+			unsigned char *outbuf, unsigned int outlen,
+			struct drbg_string *addtl,
+			struct drbg_test_data *test_data)
+{
+	crypto_rng_set_entropy(drng, test_data->testentropy->buf,
+			       test_data->testentropy->len);
+	return crypto_rng_generate(drng, addtl->buf, addtl->len,
+				   outbuf, outlen);
+}
+
+/*
+ * TEST code
+ *
+ * This is a wrapper to the kernel crypto API function of
+ * crypto_rng_reset() to allow the caller to provide test_data
+ *
+ * @drng DRBG handle -- see crypto_rng_reset
+ * @pers personalization string input buffer
+ * @perslen length of additional information string buffer
+ * @test_data filled test data
+ *
+ * return
+ *	see crypto_rng_reset
+ */
+static inline int crypto_drbg_reset_test(struct crypto_rng *drng,
+					 struct drbg_string *pers,
+					 struct drbg_test_data *test_data)
+{
+	crypto_rng_set_entropy(drng, test_data->testentropy->buf,
+			       test_data->testentropy->len);
+	return crypto_rng_reset(drng, pers->buf, pers->len);
+}
+
+/* DRBG type flags */
+#define DRBG_CTR	((drbg_flag_t)1<<0)
+#define DRBG_HMAC	((drbg_flag_t)1<<1)
+#define DRBG_HASH	((drbg_flag_t)1<<2)
+#define DRBG_TYPE_MASK	(DRBG_CTR | DRBG_HMAC | DRBG_HASH)
+/* DRBG strength flags */
+#define DRBG_STRENGTH128	((drbg_flag_t)1<<3)
+#define DRBG_STRENGTH192	((drbg_flag_t)1<<4)
+#define DRBG_STRENGTH256	((drbg_flag_t)1<<5)
+#define DRBG_STRENGTH_MASK	(DRBG_STRENGTH128 | DRBG_STRENGTH192 | \
+				 DRBG_STRENGTH256)
+
+enum drbg_prefixes {
+	DRBG_PREFIX0 = 0x00,
+	DRBG_PREFIX1,
+	DRBG_PREFIX2,
+	DRBG_PREFIX3
+};
+
+#endif /* _DRBG_H */
diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h
new file mode 100644
index 0000000..d696317
--- /dev/null
+++ b/include/crypto/ecdh.h
@@ -0,0 +1,88 @@
+/*
+ * ECDH params to be used with kpp API
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ECDH_
+#define _CRYPTO_ECDH_
+
+/**
+ * DOC: ECDH Helper Functions
+ *
+ * To use ECDH with the KPP cipher API, the following data structure and
+ * functions should be used.
+ *
+ * The ECC curves known to the ECDH implementation are specified in this
+ * header file.
+ *
+ * To use ECDH with KPP, the following functions should be used to operate on
+ * an ECDH private key. The packet private key that can be set with
+ * the KPP API function call of crypto_kpp_set_secret.
+ */
+
+/* Curves IDs */
+#define ECC_CURVE_NIST_P192	0x0001
+#define ECC_CURVE_NIST_P256	0x0002
+
+/**
+ * struct ecdh - define an ECDH private key
+ *
+ * @curve_id:	ECC curve the key is based on.
+ * @key:	Private ECDH key
+ * @key_size:	Size of the private ECDH key
+ */
+struct ecdh {
+	unsigned short curve_id;
+	char *key;
+	unsigned short key_size;
+};
+
+/**
+ * crypto_ecdh_key_len() - Obtain the size of the private ECDH key
+ * @params:	private ECDH key
+ *
+ * This function returns the packet ECDH key size. A caller can use that
+ * with the provided ECDH private key reference to obtain the required
+ * memory size to hold a packet key.
+ *
+ * Return: size of the key in bytes
+ */
+unsigned int crypto_ecdh_key_len(const struct ecdh *params);
+
+/**
+ * crypto_ecdh_encode_key() - encode the private key
+ * @buf:	Buffer allocated by the caller to hold the packet ECDH
+ *		private key. The buffer should be at least crypto_ecdh_key_len
+ *		bytes in size.
+ * @len:	Length of the packet private key buffer
+ * @p:		Buffer with the caller-specified private key
+ *
+ * The ECDH implementations operate on a packet representation of the private
+ * key.
+ *
+ * Return:	-EINVAL if buffer has insufficient size, 0 on success
+ */
+int crypto_ecdh_encode_key(char *buf, unsigned int len, const struct ecdh *p);
+
+/**
+ * crypto_ecdh_decode_key() - decode a private key
+ * @buf:	Buffer holding a packet key that should be decoded
+ * @len:	Length of the packet private key buffer
+ * @p:		Buffer allocated by the caller that is filled with the
+ *		unpacked ECDH private key.
+ *
+ * The unpacking obtains the private key by pointing @p to the correct location
+ * in @buf. Thus, both pointers refer to the same memory.
+ *
+ * Return:	-EINVAL if buffer has insufficient size, 0 on success
+ */
+int crypto_ecdh_decode_key(const char *buf, unsigned int len, struct ecdh *p);
+
+#endif
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
new file mode 100644
index 0000000..1cbec29
--- /dev/null
+++ b/include/crypto/engine.h
@@ -0,0 +1,116 @@
+/*
+ * Crypto engine API
+ *
+ * Copyright (c) 2016 Baolin Wang <baolin.wang@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ENGINE_H
+#define _CRYPTO_ENGINE_H
+
+#include <linux/crypto.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/akcipher.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+
+#define ENGINE_NAME_LEN	30
+/*
+ * struct crypto_engine - crypto hardware engine
+ * @name: the engine name
+ * @idling: the engine is entering idle state
+ * @busy: request pump is busy
+ * @running: the engine is on working
+ * @cur_req_prepared: current request is prepared
+ * @list: link with the global crypto engine list
+ * @queue_lock: spinlock to syncronise access to request queue
+ * @queue: the crypto queue of the engine
+ * @rt: whether this queue is set to run as a realtime task
+ * @prepare_crypt_hardware: a request will soon arrive from the queue
+ * so the subsystem requests the driver to prepare the hardware
+ * by issuing this call
+ * @unprepare_crypt_hardware: there are currently no more requests on the
+ * queue so the subsystem notifies the driver that it may relax the
+ * hardware by issuing this call
+ * @kworker: kthread worker struct for request pump
+ * @pump_requests: work struct for scheduling work to the request pump
+ * @priv_data: the engine private data
+ * @cur_req: the current request which is on processing
+ */
+struct crypto_engine {
+	char			name[ENGINE_NAME_LEN];
+	bool			idling;
+	bool			busy;
+	bool			running;
+	bool			cur_req_prepared;
+
+	struct list_head	list;
+	spinlock_t		queue_lock;
+	struct crypto_queue	queue;
+	struct device		*dev;
+
+	bool			rt;
+
+	int (*prepare_crypt_hardware)(struct crypto_engine *engine);
+	int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
+
+	struct kthread_worker           *kworker;
+	struct kthread_work             pump_requests;
+
+	void				*priv_data;
+	struct crypto_async_request	*cur_req;
+};
+
+/*
+ * struct crypto_engine_op - crypto hardware engine operations
+ * @prepare__request: do some prepare if need before handle the current request
+ * @unprepare_request: undo any work done by prepare_request()
+ * @do_one_request: do encryption for current request
+ */
+struct crypto_engine_op {
+	int (*prepare_request)(struct crypto_engine *engine,
+			       void *areq);
+	int (*unprepare_request)(struct crypto_engine *engine,
+				 void *areq);
+	int (*do_one_request)(struct crypto_engine *engine,
+			      void *areq);
+};
+
+struct crypto_engine_ctx {
+	struct crypto_engine_op op;
+};
+
+int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
+						 struct ablkcipher_request *req);
+int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
+					   struct aead_request *req);
+int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
+					       struct akcipher_request *req);
+int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
+					       struct ahash_request *req);
+int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
+					       struct skcipher_request *req);
+void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
+					struct ablkcipher_request *req, int err);
+void crypto_finalize_aead_request(struct crypto_engine *engine,
+				  struct aead_request *req, int err);
+void crypto_finalize_akcipher_request(struct crypto_engine *engine,
+				      struct akcipher_request *req, int err);
+void crypto_finalize_hash_request(struct crypto_engine *engine,
+				  struct ahash_request *req, int err);
+void crypto_finalize_skcipher_request(struct crypto_engine *engine,
+				      struct skcipher_request *req, int err);
+int crypto_engine_start(struct crypto_engine *engine);
+int crypto_engine_stop(struct crypto_engine *engine);
+struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
+int crypto_engine_exit(struct crypto_engine *engine);
+
+#endif /* _CRYPTO_ENGINE_H */
diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h
new file mode 100644
index 0000000..c50e057
--- /dev/null
+++ b/include/crypto/gcm.h
@@ -0,0 +1,8 @@
+#ifndef _CRYPTO_GCM_H
+#define _CRYPTO_GCM_H
+
+#define GCM_AES_IV_SIZE 12
+#define GCM_RFC4106_IV_SIZE 8
+#define GCM_RFC4543_IV_SIZE 8
+
+#endif
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
new file mode 100644
index 0000000..fa0a63d
--- /dev/null
+++ b/include/crypto/gf128mul.h
@@ -0,0 +1,252 @@
+/* gf128mul.h - GF(2^128) multiplication functions
+ *
+ * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
+ * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based on Dr Brian Gladman's (GPL'd) work published at
+ * http://fp.gladman.plus.com/cryptography_technology/index.htm
+ * See the original copyright notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.   All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+   1. distributions of this source code include the above copyright
+      notice, this list of conditions and the following disclaimer;
+
+   2. distributions in binary form include the above copyright
+      notice, this list of conditions and the following disclaimer
+      in the documentation and/or other associated materials;
+
+   3. the copyright holder's name is not used to endorse products
+      built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 31/01/2006
+
+ An implementation of field multiplication in Galois Field GF(2^128)
+*/
+
+#ifndef _CRYPTO_GF128MUL_H
+#define _CRYPTO_GF128MUL_H
+
+#include <asm/byteorder.h>
+#include <crypto/b128ops.h>
+#include <linux/slab.h>
+
+/* Comment by Rik:
+ *
+ * For some background on GF(2^128) see for example: 
+ * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf 
+ *
+ * The elements of GF(2^128) := GF(2)[X]/(X^128-X^7-X^2-X^1-1) can
+ * be mapped to computer memory in a variety of ways. Let's examine
+ * three common cases.
+ *
+ * Take a look at the 16 binary octets below in memory order. The msb's
+ * are left and the lsb's are right. char b[16] is an array and b[0] is
+ * the first octet.
+ *
+ * 10000000 00000000 00000000 00000000 .... 00000000 00000000 00000000
+ *   b[0]     b[1]     b[2]     b[3]          b[13]    b[14]    b[15]
+ *
+ * Every bit is a coefficient of some power of X. We can store the bits
+ * in every byte in little-endian order and the bytes themselves also in
+ * little endian order. I will call this lle (little-little-endian).
+ * The above buffer represents the polynomial 1, and X^7+X^2+X^1+1 looks
+ * like 11100001 00000000 .... 00000000 = { 0xE1, 0x00, }.
+ * This format was originally implemented in gf128mul and is used
+ * in GCM (Galois/Counter mode) and in ABL (Arbitrary Block Length).
+ *
+ * Another convention says: store the bits in bigendian order and the
+ * bytes also. This is bbe (big-big-endian). Now the buffer above
+ * represents X^127. X^7+X^2+X^1+1 looks like 00000000 .... 10000111,
+ * b[15] = 0x87 and the rest is 0. LRW uses this convention and bbe
+ * is partly implemented.
+ *
+ * Both of the above formats are easy to implement on big-endian
+ * machines.
+ *
+ * XTS and EME (the latter of which is patent encumbered) use the ble
+ * format (bits are stored in big endian order and the bytes in little
+ * endian). The above buffer represents X^7 in this case and the
+ * primitive polynomial is b[0] = 0x87.
+ *
+ * The common machine word-size is smaller than 128 bits, so to make
+ * an efficient implementation we must split into machine word sizes.
+ * This implementation uses 64-bit words for the moment. Machine
+ * endianness comes into play. The lle format in relation to machine
+ * endianness is discussed below by the original author of gf128mul Dr
+ * Brian Gladman.
+ *
+ * Let's look at the bbe and ble format on a little endian machine.
+ *
+ * bbe on a little endian machine u32 x[4]:
+ *
+ *  MS            x[0]           LS  MS            x[1]		  LS
+ *  ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+ *  103..96 111.104 119.112 127.120  71...64 79...72 87...80 95...88
+ *
+ *  MS            x[2]           LS  MS            x[3]		  LS
+ *  ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+ *  39...32 47...40 55...48 63...56  07...00 15...08 23...16 31...24
+ *
+ * ble on a little endian machine
+ *
+ *  MS            x[0]           LS  MS            x[1]		  LS
+ *  ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+ *  31...24 23...16 15...08 07...00  63...56 55...48 47...40 39...32
+ *
+ *  MS            x[2]           LS  MS            x[3]		  LS
+ *  ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+ *  95...88 87...80 79...72 71...64  127.120 199.112 111.104 103..96
+ *
+ * Multiplications in GF(2^128) are mostly bit-shifts, so you see why
+ * ble (and lbe also) are easier to implement on a little-endian
+ * machine than on a big-endian machine. The converse holds for bbe
+ * and lle.
+ *
+ * Note: to have good alignment, it seems to me that it is sufficient
+ * to keep elements of GF(2^128) in type u64[2]. On 32-bit wordsize
+ * machines this will automatically aligned to wordsize and on a 64-bit
+ * machine also.
+ */
+/*	Multiply a GF(2^128) field element by x. Field elements are
+    held in arrays of bytes in which field bits 8n..8n + 7 are held in
+    byte[n], with lower indexed bits placed in the more numerically
+    significant bit positions within bytes.
+
+    On little endian machines the bit indexes translate into the bit
+    positions within four 32-bit words in the following way
+
+    MS            x[0]           LS  MS            x[1]		  LS
+    ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+    24...31 16...23 08...15 00...07  56...63 48...55 40...47 32...39
+
+    MS            x[2]           LS  MS            x[3]		  LS
+    ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+    88...95 80...87 72...79 64...71  120.127 112.119 104.111 96..103
+
+    On big endian machines the bit indexes translate into the bit
+    positions within four 32-bit words in the following way
+
+    MS            x[0]           LS  MS            x[1]		  LS
+    ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+    00...07 08...15 16...23 24...31  32...39 40...47 48...55 56...63
+
+    MS            x[2]           LS  MS            x[3]		  LS
+    ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+    64...71 72...79 80...87 88...95  96..103 104.111 112.119 120.127
+*/
+
+/*	A slow generic version of gf_mul, implemented for lle and bbe
+ * 	It multiplies a and b and puts the result in a */
+void gf128mul_lle(be128 *a, const be128 *b);
+
+void gf128mul_bbe(be128 *a, const be128 *b);
+
+/*
+ * The following functions multiply a field element by x in
+ * the polynomial field representation.  They use 64-bit word operations
+ * to gain speed but compensate for machine endianness and hence work
+ * correctly on both styles of machine.
+ *
+ * They are defined here for performance.
+ */
+
+static inline u64 gf128mul_mask_from_bit(u64 x, int which)
+{
+	/* a constant-time version of 'x & ((u64)1 << which) ? (u64)-1 : 0' */
+	return ((s64)(x << (63 - which)) >> 63);
+}
+
+static inline void gf128mul_x_lle(be128 *r, const be128 *x)
+{
+	u64 a = be64_to_cpu(x->a);
+	u64 b = be64_to_cpu(x->b);
+
+	/* equivalent to gf128mul_table_le[(b << 7) & 0xff] << 48
+	 * (see crypto/gf128mul.c): */
+	u64 _tt = gf128mul_mask_from_bit(b, 0) & ((u64)0xe1 << 56);
+
+	r->b = cpu_to_be64((b >> 1) | (a << 63));
+	r->a = cpu_to_be64((a >> 1) ^ _tt);
+}
+
+static inline void gf128mul_x_bbe(be128 *r, const be128 *x)
+{
+	u64 a = be64_to_cpu(x->a);
+	u64 b = be64_to_cpu(x->b);
+
+	/* equivalent to gf128mul_table_be[a >> 63] (see crypto/gf128mul.c): */
+	u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87;
+
+	r->a = cpu_to_be64((a << 1) | (b >> 63));
+	r->b = cpu_to_be64((b << 1) ^ _tt);
+}
+
+/* needed by XTS */
+static inline void gf128mul_x_ble(le128 *r, const le128 *x)
+{
+	u64 a = le64_to_cpu(x->a);
+	u64 b = le64_to_cpu(x->b);
+
+	/* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
+	u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87;
+
+	r->a = cpu_to_le64((a << 1) | (b >> 63));
+	r->b = cpu_to_le64((b << 1) ^ _tt);
+}
+
+/* 4k table optimization */
+
+struct gf128mul_4k {
+	be128 t[256];
+};
+
+struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
+struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
+void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t);
+void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t);
+void gf128mul_x8_ble(le128 *r, const le128 *x);
+static inline void gf128mul_free_4k(struct gf128mul_4k *t)
+{
+	kzfree(t);
+}
+
+
+/* 64k table optimization, implemented for bbe */
+
+struct gf128mul_64k {
+	struct gf128mul_4k *t[16];
+};
+
+/* First initialize with the constant factor with which you
+ * want to multiply and then call gf128mul_64k_bbe with the other
+ * factor in the first argument, and the table in the second.
+ * Afterwards, the result is stored in *a.
+ */
+struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g);
+void gf128mul_free_64k(struct gf128mul_64k *t);
+void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t);
+
+#endif /* _CRYPTO_GF128MUL_H */
diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h
new file mode 100644
index 0000000..9136301
--- /dev/null
+++ b/include/crypto/ghash.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for GHASH algorithms
+ */
+
+#ifndef __CRYPTO_GHASH_H__
+#define __CRYPTO_GHASH_H__
+
+#include <linux/types.h>
+#include <crypto/gf128mul.h>
+
+#define GHASH_BLOCK_SIZE	16
+#define GHASH_DIGEST_SIZE	16
+
+struct ghash_ctx {
+	struct gf128mul_4k *gf128;
+};
+
+struct ghash_desc_ctx {
+	u8 buffer[GHASH_BLOCK_SIZE];
+	u32 bytes;
+};
+
+#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
new file mode 100644
index 0000000..76e432c
--- /dev/null
+++ b/include/crypto/hash.h
@@ -0,0 +1,942 @@
+/*
+ * Hash: Hash algorithms under the crypto API
+ * 
+ * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) 
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_HASH_H
+#define _CRYPTO_HASH_H
+
+#include <linux/crypto.h>
+#include <linux/string.h>
+
+struct crypto_ahash;
+
+/**
+ * DOC: Message Digest Algorithm Definitions
+ *
+ * These data structures define modular message digest algorithm
+ * implementations, managed via crypto_register_ahash(),
+ * crypto_register_shash(), crypto_unregister_ahash() and
+ * crypto_unregister_shash().
+ */
+
+/**
+ * struct hash_alg_common - define properties of message digest
+ * @digestsize: Size of the result of the transformation. A buffer of this size
+ *	        must be available to the @final and @finup calls, so they can
+ *	        store the resulting hash into it. For various predefined sizes,
+ *	        search include/crypto/ using
+ *	        git grep _DIGEST_SIZE include/crypto.
+ * @statesize: Size of the block for partial state of the transformation. A
+ *	       buffer of this size must be passed to the @export function as it
+ *	       will save the partial state of the transformation into it. On the
+ *	       other side, the @import function will load the state from a
+ *	       buffer of this size as well.
+ * @base: Start of data structure of cipher algorithm. The common data
+ *	  structure of crypto_alg contains information common to all ciphers.
+ *	  The hash_alg_common data structure now adds the hash-specific
+ *	  information.
+ */
+struct hash_alg_common {
+	unsigned int digestsize;
+	unsigned int statesize;
+
+	struct crypto_alg base;
+};
+
+struct ahash_request {
+	struct crypto_async_request base;
+
+	unsigned int nbytes;
+	struct scatterlist *src;
+	u8 *result;
+
+	/* This field may only be used by the ahash API code. */
+	void *priv;
+
+	void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+#define AHASH_REQUEST_ON_STACK(name, ahash) \
+	char __##name##_desc[sizeof(struct ahash_request) + \
+		crypto_ahash_reqsize(ahash)] CRYPTO_MINALIGN_ATTR; \
+	struct ahash_request *name = (void *)__##name##_desc
+
+/**
+ * struct ahash_alg - asynchronous message digest definition
+ * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the
+ *	  state of the HASH transformation at the beginning. This shall fill in
+ *	  the internal structures used during the entire duration of the whole
+ *	  transformation. No data processing happens at this point. Driver code
+ *	  implementation must not use req->result.
+ * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This
+ *	   function actually pushes blocks of data from upper layers into the
+ *	   driver, which then passes those to the hardware as seen fit. This
+ *	   function must not finalize the HASH transformation by calculating the
+ *	   final message digest as this only adds more data into the
+ *	   transformation. This function shall not modify the transformation
+ *	   context, as this function may be called in parallel with the same
+ *	   transformation object. Data processing can happen synchronously
+ *	   [SHASH] or asynchronously [AHASH] at this point. Driver must not use
+ *	   req->result.
+ * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the
+ *	   transformation and retrieves the resulting hash from the driver and
+ *	   pushes it back to upper layers. No data processing happens at this
+ *	   point unless hardware requires it to finish the transformation
+ *	   (then the data buffered by the device driver is processed).
+ * @finup: **[optional]** Combination of @update and @final. This function is effectively a
+ *	   combination of @update and @final calls issued in sequence. As some
+ *	   hardware cannot do @update and @final separately, this callback was
+ *	   added to allow such hardware to be used at least by IPsec. Data
+ *	   processing can happen synchronously [SHASH] or asynchronously [AHASH]
+ *	   at this point.
+ * @digest: Combination of @init and @update and @final. This function
+ *	    effectively behaves as the entire chain of operations, @init,
+ *	    @update and @final issued in sequence. Just like @finup, this was
+ *	    added for hardware which cannot do even the @finup, but can only do
+ *	    the whole transformation in one run. Data processing can happen
+ *	    synchronously [SHASH] or asynchronously [AHASH] at this point.
+ * @setkey: Set optional key used by the hashing algorithm. Intended to push
+ *	    optional key used by the hashing algorithm from upper layers into
+ *	    the driver. This function can store the key in the transformation
+ *	    context or can outright program it into the hardware. In the former
+ *	    case, one must be careful to program the key into the hardware at
+ *	    appropriate time and one must be careful that .setkey() can be
+ *	    called multiple times during the existence of the transformation
+ *	    object. Not  all hashing algorithms do implement this function as it
+ *	    is only needed for keyed message digests. SHAx/MDx/CRCx do NOT
+ *	    implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement
+ *	    this function. This function must be called before any other of the
+ *	    @init, @update, @final, @finup, @digest is called. No data
+ *	    processing happens at this point.
+ * @export: Export partial state of the transformation. This function dumps the
+ *	    entire state of the ongoing transformation into a provided block of
+ *	    data so it can be @import 'ed back later on. This is useful in case
+ *	    you want to save partial result of the transformation after
+ *	    processing certain amount of data and reload this partial result
+ *	    multiple times later on for multiple re-use. No data processing
+ *	    happens at this point. Driver must not use req->result.
+ * @import: Import partial state of the transformation. This function loads the
+ *	    entire state of the ongoing transformation from a provided block of
+ *	    data so the transformation can continue from this point onward. No
+ *	    data processing happens at this point. Driver must not use
+ *	    req->result.
+ * @halg: see struct hash_alg_common
+ */
+struct ahash_alg {
+	int (*init)(struct ahash_request *req);
+	int (*update)(struct ahash_request *req);
+	int (*final)(struct ahash_request *req);
+	int (*finup)(struct ahash_request *req);
+	int (*digest)(struct ahash_request *req);
+	int (*export)(struct ahash_request *req, void *out);
+	int (*import)(struct ahash_request *req, const void *in);
+	int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
+		      unsigned int keylen);
+
+	struct hash_alg_common halg;
+};
+
+struct shash_desc {
+	struct crypto_shash *tfm;
+	u32 flags;
+
+	void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+#define SHASH_DESC_ON_STACK(shash, ctx)				  \
+	char __##shash##_desc[sizeof(struct shash_desc) +	  \
+		crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \
+	struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
+
+/**
+ * struct shash_alg - synchronous message digest definition
+ * @init: see struct ahash_alg
+ * @update: see struct ahash_alg
+ * @final: see struct ahash_alg
+ * @finup: see struct ahash_alg
+ * @digest: see struct ahash_alg
+ * @export: see struct ahash_alg
+ * @import: see struct ahash_alg
+ * @setkey: see struct ahash_alg
+ * @digestsize: see struct ahash_alg
+ * @statesize: see struct ahash_alg
+ * @descsize: Size of the operational state for the message digest. This state
+ * 	      size is the memory size that needs to be allocated for
+ *	      shash_desc.__ctx
+ * @base: internally used
+ */
+struct shash_alg {
+	int (*init)(struct shash_desc *desc);
+	int (*update)(struct shash_desc *desc, const u8 *data,
+		      unsigned int len);
+	int (*final)(struct shash_desc *desc, u8 *out);
+	int (*finup)(struct shash_desc *desc, const u8 *data,
+		     unsigned int len, u8 *out);
+	int (*digest)(struct shash_desc *desc, const u8 *data,
+		      unsigned int len, u8 *out);
+	int (*export)(struct shash_desc *desc, void *out);
+	int (*import)(struct shash_desc *desc, const void *in);
+	int (*setkey)(struct crypto_shash *tfm, const u8 *key,
+		      unsigned int keylen);
+
+	unsigned int descsize;
+
+	/* These fields must match hash_alg_common. */
+	unsigned int digestsize
+		__attribute__ ((aligned(__alignof__(struct hash_alg_common))));
+	unsigned int statesize;
+
+	struct crypto_alg base;
+};
+
+struct crypto_ahash {
+	int (*init)(struct ahash_request *req);
+	int (*update)(struct ahash_request *req);
+	int (*final)(struct ahash_request *req);
+	int (*finup)(struct ahash_request *req);
+	int (*digest)(struct ahash_request *req);
+	int (*export)(struct ahash_request *req, void *out);
+	int (*import)(struct ahash_request *req, const void *in);
+	int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
+		      unsigned int keylen);
+
+	unsigned int reqsize;
+	struct crypto_tfm base;
+};
+
+struct crypto_shash {
+	unsigned int descsize;
+	struct crypto_tfm base;
+};
+
+/**
+ * DOC: Asynchronous Message Digest API
+ *
+ * The asynchronous message digest API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto)
+ *
+ * The asynchronous cipher operation discussion provided for the
+ * CRYPTO_ALG_TYPE_ABLKCIPHER API applies here as well.
+ */
+
+static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
+{
+	return container_of(tfm, struct crypto_ahash, base);
+}
+
+/**
+ * crypto_alloc_ahash() - allocate ahash cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      ahash cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an ahash. The returned struct
+ * crypto_ahash is the cipher handle that is required for any subsequent
+ * API invocation for that ahash.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
+					u32 mask);
+
+static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
+{
+	return &tfm->base;
+}
+
+/**
+ * crypto_free_ahash() - zeroize and free the ahash handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_ahash(struct crypto_ahash *tfm)
+{
+	crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm));
+}
+
+/**
+ * crypto_has_ahash() - Search for the availability of an ahash.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      ahash
+ * @type: specifies the type of the ahash
+ * @mask: specifies the mask for the ahash
+ *
+ * Return: true when the ahash is known to the kernel crypto API; false
+ *	   otherwise
+ */
+int crypto_has_ahash(const char *alg_name, u32 type, u32 mask);
+
+static inline const char *crypto_ahash_alg_name(struct crypto_ahash *tfm)
+{
+	return crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
+}
+
+static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm)
+{
+	return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
+}
+
+static inline unsigned int crypto_ahash_alignmask(
+	struct crypto_ahash *tfm)
+{
+	return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm));
+}
+
+/**
+ * crypto_ahash_blocksize() - obtain block size for cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the message digest cipher referenced with the cipher
+ * handle is returned.
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_ahash_blocksize(struct crypto_ahash *tfm)
+{
+	return crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+}
+
+static inline struct hash_alg_common *__crypto_hash_alg_common(
+	struct crypto_alg *alg)
+{
+	return container_of(alg, struct hash_alg_common, base);
+}
+
+static inline struct hash_alg_common *crypto_hash_alg_common(
+	struct crypto_ahash *tfm)
+{
+	return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg);
+}
+
+/**
+ * crypto_ahash_digestsize() - obtain message digest size
+ * @tfm: cipher handle
+ *
+ * The size for the message digest created by the message digest cipher
+ * referenced with the cipher handle is returned.
+ *
+ *
+ * Return: message digest size of cipher
+ */
+static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
+{
+	return crypto_hash_alg_common(tfm)->digestsize;
+}
+
+/**
+ * crypto_ahash_statesize() - obtain size of the ahash state
+ * @tfm: cipher handle
+ *
+ * Return the size of the ahash state. With the crypto_ahash_export()
+ * function, the caller can export the state into a buffer whose size is
+ * defined with this function.
+ *
+ * Return: size of the ahash state
+ */
+static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
+{
+	return crypto_hash_alg_common(tfm)->statesize;
+}
+
+static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
+{
+	return crypto_tfm_get_flags(crypto_ahash_tfm(tfm));
+}
+
+static inline void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags)
+{
+	crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags);
+}
+
+static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags)
+{
+	crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags);
+}
+
+/**
+ * crypto_ahash_reqtfm() - obtain cipher handle from request
+ * @req: asynchronous request handle that contains the reference to the ahash
+ *	 cipher handle
+ *
+ * Return the ahash cipher handle that is registered with the asynchronous
+ * request handle ahash_request.
+ *
+ * Return: ahash cipher handle
+ */
+static inline struct crypto_ahash *crypto_ahash_reqtfm(
+	struct ahash_request *req)
+{
+	return __crypto_ahash_cast(req->base.tfm);
+}
+
+/**
+ * crypto_ahash_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: size of the request data
+ */
+static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
+{
+	return tfm->reqsize;
+}
+
+static inline void *ahash_request_ctx(struct ahash_request *req)
+{
+	return req->__ctx;
+}
+
+/**
+ * crypto_ahash_setkey - set key for cipher handle
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the ahash cipher. The cipher
+ * handle must point to a keyed hash in order for this function to succeed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+			unsigned int keylen);
+
+/**
+ * crypto_ahash_finup() - update and finalize message digest
+ * @req: reference to the ahash_request handle that holds all information
+ *	 needed to perform the cipher operation
+ *
+ * This function is a "short-hand" for the function calls of
+ * crypto_ahash_update and crypto_ahash_final. The parameters have the same
+ * meaning as discussed for those separate functions.
+ *
+ * Return: see crypto_ahash_final()
+ */
+int crypto_ahash_finup(struct ahash_request *req);
+
+/**
+ * crypto_ahash_final() - calculate message digest
+ * @req: reference to the ahash_request handle that holds all information
+ *	 needed to perform the cipher operation
+ *
+ * Finalize the message digest operation and create the message digest
+ * based on all data added to the cipher handle. The message digest is placed
+ * into the output buffer registered with the ahash_request handle.
+ *
+ * Return:
+ * 0		if the message digest was successfully calculated;
+ * -EINPROGRESS	if data is feeded into hardware (DMA) or queued for later;
+ * -EBUSY	if queue is full and request should be resubmitted later;
+ * other < 0	if an error occurred
+ */
+int crypto_ahash_final(struct ahash_request *req);
+
+/**
+ * crypto_ahash_digest() - calculate message digest for a buffer
+ * @req: reference to the ahash_request handle that holds all information
+ *	 needed to perform the cipher operation
+ *
+ * This function is a "short-hand" for the function calls of crypto_ahash_init,
+ * crypto_ahash_update and crypto_ahash_final. The parameters have the same
+ * meaning as discussed for those separate three functions.
+ *
+ * Return: see crypto_ahash_final()
+ */
+int crypto_ahash_digest(struct ahash_request *req);
+
+/**
+ * crypto_ahash_export() - extract current message digest state
+ * @req: reference to the ahash_request handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * This function exports the hash state of the ahash_request handle into the
+ * caller-allocated output buffer out which must have sufficient size (e.g. by
+ * calling crypto_ahash_statesize()).
+ *
+ * Return: 0 if the export was successful; < 0 if an error occurred
+ */
+static inline int crypto_ahash_export(struct ahash_request *req, void *out)
+{
+	return crypto_ahash_reqtfm(req)->export(req, out);
+}
+
+/**
+ * crypto_ahash_import() - import message digest state
+ * @req: reference to ahash_request handle the state is imported into
+ * @in: buffer holding the state
+ *
+ * This function imports the hash state into the ahash_request handle from the
+ * input buffer. That buffer should have been generated with the
+ * crypto_ahash_export function.
+ *
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+		return -ENOKEY;
+
+	return tfm->import(req, in);
+}
+
+/**
+ * crypto_ahash_init() - (re)initialize message digest handle
+ * @req: ahash_request handle that already is initialized with all necessary
+ *	 data using the ahash_request_* API functions
+ *
+ * The call (re-)initializes the message digest referenced by the ahash_request
+ * handle. Any potentially existing state created by previous operations is
+ * discarded.
+ *
+ * Return: see crypto_ahash_final()
+ */
+static inline int crypto_ahash_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+		return -ENOKEY;
+
+	return tfm->init(req);
+}
+
+/**
+ * crypto_ahash_update() - add data to message digest for processing
+ * @req: ahash_request handle that was previously initialized with the
+ *	 crypto_ahash_init call.
+ *
+ * Updates the message digest state of the &ahash_request handle. The input data
+ * is pointed to by the scatter/gather list registered in the &ahash_request
+ * handle
+ *
+ * Return: see crypto_ahash_final()
+ */
+static inline int crypto_ahash_update(struct ahash_request *req)
+{
+	return crypto_ahash_reqtfm(req)->update(req);
+}
+
+/**
+ * DOC: Asynchronous Hash Request Handle
+ *
+ * The &ahash_request data structure contains all pointers to data
+ * required for the asynchronous cipher operation. This includes the cipher
+ * handle (which can be used by multiple &ahash_request instances), pointer
+ * to plaintext and the message digest output buffer, asynchronous callback
+ * function, etc. It acts as a handle to the ahash_request_* API calls in a
+ * similar way as ahash handle to the crypto_ahash_* API calls.
+ */
+
+/**
+ * ahash_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing ahash handle in the request
+ * data structure with a different one.
+ */
+static inline void ahash_request_set_tfm(struct ahash_request *req,
+					 struct crypto_ahash *tfm)
+{
+	req->base.tfm = crypto_ahash_tfm(tfm);
+}
+
+/**
+ * ahash_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the ahash
+ * message digest API calls. During
+ * the allocation, the provided ahash handle
+ * is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success, or NULL if out of memory
+ */
+static inline struct ahash_request *ahash_request_alloc(
+	struct crypto_ahash *tfm, gfp_t gfp)
+{
+	struct ahash_request *req;
+
+	req = kmalloc(sizeof(struct ahash_request) +
+		      crypto_ahash_reqsize(tfm), gfp);
+
+	if (likely(req))
+		ahash_request_set_tfm(req, tfm);
+
+	return req;
+}
+
+/**
+ * ahash_request_free() - zeroize and free the request data structure
+ * @req: request data structure cipher handle to be freed
+ */
+static inline void ahash_request_free(struct ahash_request *req)
+{
+	kzfree(req);
+}
+
+static inline void ahash_request_zero(struct ahash_request *req)
+{
+	memzero_explicit(req, sizeof(*req) +
+			      crypto_ahash_reqsize(crypto_ahash_reqtfm(req)));
+}
+
+static inline struct ahash_request *ahash_request_cast(
+	struct crypto_async_request *req)
+{
+	return container_of(req, struct ahash_request, base);
+}
+
+/**
+ * ahash_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ *	   CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ *	   increase the wait queue beyond the initial maximum size;
+ *	   CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ *	  crypto API, but provided to the callback function for it to use. Here,
+ *	  the caller can provide a reference to memory the callback function can
+ *	  operate on. As the callback function is invoked asynchronously to the
+ *	  related functionality, it may need to access data structures of the
+ *	  related functionality which can be referenced using this pointer. The
+ *	  callback function can access the memory via the "data" field in the
+ *	  &crypto_async_request data structure provided to the callback function.
+ *
+ * This function allows setting the callback function that is triggered once
+ * the cipher operation completes.
+ *
+ * The callback function is registered with the &ahash_request handle and
+ * must comply with the following template::
+ *
+ *	void callback_function(struct crypto_async_request *req, int error)
+ */
+static inline void ahash_request_set_callback(struct ahash_request *req,
+					      u32 flags,
+					      crypto_completion_t compl,
+					      void *data)
+{
+	req->base.complete = compl;
+	req->base.data = data;
+	req->base.flags = flags;
+}
+
+/**
+ * ahash_request_set_crypt() - set data buffers
+ * @req: ahash_request handle to be updated
+ * @src: source scatter/gather list
+ * @result: buffer that is filled with the message digest -- the caller must
+ *	    ensure that the buffer has sufficient space by, for example, calling
+ *	    crypto_ahash_digestsize()
+ * @nbytes: number of bytes to process from the source scatter/gather list
+ *
+ * By using this call, the caller references the source scatter/gather list.
+ * The source scatter/gather list points to the data the message digest is to
+ * be calculated for.
+ */
+static inline void ahash_request_set_crypt(struct ahash_request *req,
+					   struct scatterlist *src, u8 *result,
+					   unsigned int nbytes)
+{
+	req->src = src;
+	req->nbytes = nbytes;
+	req->result = result;
+}
+
+/**
+ * DOC: Synchronous Message Digest API
+ *
+ * The synchronous message digest API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto)
+ *
+ * The message digest API is able to maintain state information for the
+ * caller.
+ *
+ * The synchronous message digest API can store user-related context in in its
+ * shash_desc request data structure.
+ */
+
+/**
+ * crypto_alloc_shash() - allocate message digest handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a message digest. The returned &struct
+ * crypto_shash is the cipher handle that is required for any subsequent
+ * API invocation for that message digest.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
+					u32 mask);
+
+static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
+{
+	return &tfm->base;
+}
+
+/**
+ * crypto_free_shash() - zeroize and free the message digest handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_shash(struct crypto_shash *tfm)
+{
+	crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
+}
+
+static inline const char *crypto_shash_alg_name(struct crypto_shash *tfm)
+{
+	return crypto_tfm_alg_name(crypto_shash_tfm(tfm));
+}
+
+static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm)
+{
+	return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
+}
+
+static inline unsigned int crypto_shash_alignmask(
+	struct crypto_shash *tfm)
+{
+	return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
+}
+
+/**
+ * crypto_shash_blocksize() - obtain block size for cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the message digest cipher referenced with the cipher
+ * handle is returned.
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm)
+{
+	return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm));
+}
+
+static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg)
+{
+	return container_of(alg, struct shash_alg, base);
+}
+
+static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm)
+{
+	return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg);
+}
+
+/**
+ * crypto_shash_digestsize() - obtain message digest size
+ * @tfm: cipher handle
+ *
+ * The size for the message digest created by the message digest cipher
+ * referenced with the cipher handle is returned.
+ *
+ * Return: digest size of cipher
+ */
+static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm)
+{
+	return crypto_shash_alg(tfm)->digestsize;
+}
+
+static inline unsigned int crypto_shash_statesize(struct crypto_shash *tfm)
+{
+	return crypto_shash_alg(tfm)->statesize;
+}
+
+static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm)
+{
+	return crypto_tfm_get_flags(crypto_shash_tfm(tfm));
+}
+
+static inline void crypto_shash_set_flags(struct crypto_shash *tfm, u32 flags)
+{
+	crypto_tfm_set_flags(crypto_shash_tfm(tfm), flags);
+}
+
+static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
+{
+	crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags);
+}
+
+/**
+ * crypto_shash_descsize() - obtain the operational state size
+ * @tfm: cipher handle
+ *
+ * The size of the operational state the cipher needs during operation is
+ * returned for the hash referenced with the cipher handle. This size is
+ * required to calculate the memory requirements to allow the caller allocating
+ * sufficient memory for operational state.
+ *
+ * The operational state is defined with struct shash_desc where the size of
+ * that data structure is to be calculated as
+ * sizeof(struct shash_desc) + crypto_shash_descsize(alg)
+ *
+ * Return: size of the operational state
+ */
+static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
+{
+	return tfm->descsize;
+}
+
+static inline void *shash_desc_ctx(struct shash_desc *desc)
+{
+	return desc->__ctx;
+}
+
+/**
+ * crypto_shash_setkey() - set key for message digest
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the keyed message digest cipher. The
+ * cipher handle must point to a keyed message digest cipher in order for this
+ * function to succeed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
+			unsigned int keylen);
+
+/**
+ * crypto_shash_digest() - calculate message digest for buffer
+ * @desc: see crypto_shash_final()
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This function is a "short-hand" for the function calls of crypto_shash_init,
+ * crypto_shash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate three functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ *	   occurred
+ */
+int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
+			unsigned int len, u8 *out);
+
+/**
+ * crypto_shash_export() - extract operational state for message digest
+ * @desc: reference to the operational state handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * This function exports the hash state of the operational state handle into the
+ * caller-allocated output buffer out which must have sufficient size (e.g. by
+ * calling crypto_shash_descsize).
+ *
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
+static inline int crypto_shash_export(struct shash_desc *desc, void *out)
+{
+	return crypto_shash_alg(desc->tfm)->export(desc, out);
+}
+
+/**
+ * crypto_shash_import() - import operational state
+ * @desc: reference to the operational state handle the state imported into
+ * @in: buffer holding the state
+ *
+ * This function imports the hash state into the operational state handle from
+ * the input buffer. That buffer should have been generated with the
+ * crypto_ahash_export function.
+ *
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
+{
+	struct crypto_shash *tfm = desc->tfm;
+
+	if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+		return -ENOKEY;
+
+	return crypto_shash_alg(tfm)->import(desc, in);
+}
+
+/**
+ * crypto_shash_init() - (re)initialize message digest
+ * @desc: operational state handle that is already filled
+ *
+ * The call (re-)initializes the message digest referenced by the
+ * operational state handle. Any potentially existing state created by
+ * previous operations is discarded.
+ *
+ * Return: 0 if the message digest initialization was successful; < 0 if an
+ *	   error occurred
+ */
+static inline int crypto_shash_init(struct shash_desc *desc)
+{
+	struct crypto_shash *tfm = desc->tfm;
+
+	if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+		return -ENOKEY;
+
+	return crypto_shash_alg(tfm)->init(desc);
+}
+
+/**
+ * crypto_shash_update() - add data to message digest for processing
+ * @desc: operational state handle that is already initialized
+ * @data: input data to be added to the message digest
+ * @len: length of the input data
+ *
+ * Updates the message digest state of the operational state handle.
+ *
+ * Return: 0 if the message digest update was successful; < 0 if an error
+ *	   occurred
+ */
+int crypto_shash_update(struct shash_desc *desc, const u8 *data,
+			unsigned int len);
+
+/**
+ * crypto_shash_final() - calculate message digest
+ * @desc: operational state handle that is already filled with data
+ * @out: output buffer filled with the message digest
+ *
+ * Finalize the message digest operation and create the message digest
+ * based on all data added to the cipher handle. The message digest is placed
+ * into the output buffer. The caller must ensure that the output buffer is
+ * large enough by using crypto_shash_digestsize.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ *	   occurred
+ */
+int crypto_shash_final(struct shash_desc *desc, u8 *out);
+
+/**
+ * crypto_shash_finup() - calculate message digest of buffer
+ * @desc: see crypto_shash_final()
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This function is a "short-hand" for the function calls of
+ * crypto_shash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ *	   occurred
+ */
+int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
+		       unsigned int len, u8 *out);
+
+static inline void shash_desc_zero(struct shash_desc *desc)
+{
+	memzero_explicit(desc,
+			 sizeof(*desc) + crypto_shash_descsize(desc->tfm));
+}
+
+#endif	/* _CRYPTO_HASH_H */
diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h
new file mode 100644
index 0000000..56f217d
--- /dev/null
+++ b/include/crypto/hash_info.h
@@ -0,0 +1,43 @@
+/*
+ * Hash Info: Hash algorithms information
+ *
+ * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_HASH_INFO_H
+#define _CRYPTO_HASH_INFO_H
+
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+
+#include <uapi/linux/hash_info.h>
+
+/* not defined in include/crypto/ */
+#define RMD128_DIGEST_SIZE      16
+#define RMD160_DIGEST_SIZE	20
+#define RMD256_DIGEST_SIZE      32
+#define RMD320_DIGEST_SIZE      40
+
+/* not defined in include/crypto/ */
+#define WP512_DIGEST_SIZE	64
+#define WP384_DIGEST_SIZE	48
+#define WP256_DIGEST_SIZE	32
+
+/* not defined in include/crypto/ */
+#define TGR128_DIGEST_SIZE 16
+#define TGR160_DIGEST_SIZE 20
+#define TGR192_DIGEST_SIZE 24
+
+/* not defined in include/crypto/ */
+#define SM3256_DIGEST_SIZE 32
+
+extern const char *const hash_algo_name[HASH_ALGO__LAST];
+extern const int hash_digest_size[HASH_ALGO__LAST];
+
+#endif /* _CRYPTO_HASH_INFO_H */
diff --git a/include/crypto/hmac.h b/include/crypto/hmac.h
new file mode 100644
index 0000000..6677413
--- /dev/null
+++ b/include/crypto/hmac.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CRYPTO_HMAC_H
+#define _CRYPTO_HMAC_H
+
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+
+#endif /* _CRYPTO_HMAC_H */
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
new file mode 100644
index 0000000..482461d
--- /dev/null
+++ b/include/crypto/if_alg.h
@@ -0,0 +1,256 @@
+/*
+ * if_alg: User-space algorithm interface
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_IF_ALG_H
+#define _CRYPTO_IF_ALG_H
+
+#include <linux/compiler.h>
+#include <linux/completion.h>
+#include <linux/if_alg.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <net/sock.h>
+
+#include <crypto/aead.h>
+#include <crypto/skcipher.h>
+
+#define ALG_MAX_PAGES			16
+
+struct crypto_async_request;
+
+struct alg_sock {
+	/* struct sock must be the first member of struct alg_sock */
+	struct sock sk;
+
+	struct sock *parent;
+
+	unsigned int refcnt;
+	unsigned int nokey_refcnt;
+
+	const struct af_alg_type *type;
+	void *private;
+};
+
+struct af_alg_control {
+	struct af_alg_iv *iv;
+	int op;
+	unsigned int aead_assoclen;
+};
+
+struct af_alg_type {
+	void *(*bind)(const char *name, u32 type, u32 mask);
+	void (*release)(void *private);
+	int (*setkey)(void *private, const u8 *key, unsigned int keylen);
+	int (*accept)(void *private, struct sock *sk);
+	int (*accept_nokey)(void *private, struct sock *sk);
+	int (*setauthsize)(void *private, unsigned int authsize);
+
+	struct proto_ops *ops;
+	struct proto_ops *ops_nokey;
+	struct module *owner;
+	char name[14];
+};
+
+struct af_alg_sgl {
+	struct scatterlist sg[ALG_MAX_PAGES + 1];
+	struct page *pages[ALG_MAX_PAGES];
+	unsigned int npages;
+};
+
+/* TX SGL entry */
+struct af_alg_tsgl {
+	struct list_head list;
+	unsigned int cur;		/* Last processed SG entry */
+	struct scatterlist sg[0];	/* Array of SGs forming the SGL */
+};
+
+#define MAX_SGL_ENTS ((4096 - sizeof(struct af_alg_tsgl)) / \
+		      sizeof(struct scatterlist) - 1)
+
+/* RX SGL entry */
+struct af_alg_rsgl {
+	struct af_alg_sgl sgl;
+	struct list_head list;
+	size_t sg_num_bytes;		/* Bytes of data in that SGL */
+};
+
+/**
+ * struct af_alg_async_req - definition of crypto request
+ * @iocb:		IOCB for AIO operations
+ * @sk:			Socket the request is associated with
+ * @first_rsgl:		First RX SG
+ * @last_rsgl:		Pointer to last RX SG
+ * @rsgl_list:		Track RX SGs
+ * @tsgl:		Private, per request TX SGL of buffers to process
+ * @tsgl_entries:	Number of entries in priv. TX SGL
+ * @outlen:		Number of output bytes generated by crypto op
+ * @areqlen:		Length of this data structure
+ * @cra_u:		Cipher request
+ */
+struct af_alg_async_req {
+	struct kiocb *iocb;
+	struct sock *sk;
+
+	struct af_alg_rsgl first_rsgl;
+	struct af_alg_rsgl *last_rsgl;
+	struct list_head rsgl_list;
+
+	struct scatterlist *tsgl;
+	unsigned int tsgl_entries;
+
+	unsigned int outlen;
+	unsigned int areqlen;
+
+	union {
+		struct aead_request aead_req;
+		struct skcipher_request skcipher_req;
+	} cra_u;
+
+	/* req ctx trails this struct */
+};
+
+/**
+ * struct af_alg_ctx - definition of the crypto context
+ *
+ * The crypto context tracks the input data during the lifetime of an AF_ALG
+ * socket.
+ *
+ * @tsgl_list:		Link to TX SGL
+ * @iv:			IV for cipher operation
+ * @aead_assoclen:	Length of AAD for AEAD cipher operations
+ * @completion:		Work queue for synchronous operation
+ * @used:		TX bytes sent to kernel. This variable is used to
+ *			ensure that user space cannot cause the kernel
+ *			to allocate too much memory in sendmsg operation.
+ * @rcvused:		Total RX bytes to be filled by kernel. This variable
+ *			is used to ensure user space cannot cause the kernel
+ *			to allocate too much memory in a recvmsg operation.
+ * @more:		More data to be expected from user space?
+ * @merge:		Shall new data from user space be merged into existing
+ *			SG?
+ * @enc:		Cryptographic operation to be performed when
+ *			recvmsg is invoked.
+ * @len:		Length of memory allocated for this data structure.
+ */
+struct af_alg_ctx {
+	struct list_head tsgl_list;
+
+	void *iv;
+	size_t aead_assoclen;
+
+	struct crypto_wait wait;
+
+	size_t used;
+	atomic_t rcvused;
+
+	bool more;
+	bool merge;
+	bool enc;
+
+	unsigned int len;
+};
+
+int af_alg_register_type(const struct af_alg_type *type);
+int af_alg_unregister_type(const struct af_alg_type *type);
+
+int af_alg_release(struct socket *sock);
+void af_alg_release_parent(struct sock *sk);
+int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
+
+int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
+void af_alg_free_sg(struct af_alg_sgl *sgl);
+void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new);
+
+int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
+
+static inline struct alg_sock *alg_sk(struct sock *sk)
+{
+	return (struct alg_sock *)sk;
+}
+
+/**
+ * Size of available buffer for sending data from user space to kernel.
+ *
+ * @sk socket of connection to user space
+ * @return number of bytes still available
+ */
+static inline int af_alg_sndbuf(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct af_alg_ctx *ctx = ask->private;
+
+	return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
+			  ctx->used, 0);
+}
+
+/**
+ * Can the send buffer still be written to?
+ *
+ * @sk socket of connection to user space
+ * @return true => writable, false => not writable
+ */
+static inline bool af_alg_writable(struct sock *sk)
+{
+	return PAGE_SIZE <= af_alg_sndbuf(sk);
+}
+
+/**
+ * Size of available buffer used by kernel for the RX user space operation.
+ *
+ * @sk socket of connection to user space
+ * @return number of bytes still available
+ */
+static inline int af_alg_rcvbuf(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct af_alg_ctx *ctx = ask->private;
+
+	return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
+		     atomic_read(&ctx->rcvused), 0);
+}
+
+/**
+ * Can the RX buffer still be written to?
+ *
+ * @sk socket of connection to user space
+ * @return true => writable, false => not writable
+ */
+static inline bool af_alg_readable(struct sock *sk)
+{
+	return PAGE_SIZE <= af_alg_rcvbuf(sk);
+}
+
+int af_alg_alloc_tsgl(struct sock *sk);
+unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
+void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
+		      size_t dst_offset);
+void af_alg_free_areq_sgls(struct af_alg_async_req *areq);
+int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags);
+void af_alg_wmem_wakeup(struct sock *sk);
+int af_alg_wait_for_data(struct sock *sk, unsigned flags);
+void af_alg_data_wakeup(struct sock *sk);
+int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+		   unsigned int ivsize);
+ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
+			int offset, size_t size, int flags);
+void af_alg_free_resources(struct af_alg_async_req *areq);
+void af_alg_async_cb(struct crypto_async_request *_req, int err);
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
+			 poll_table *wait);
+struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
+					   unsigned int areqlen);
+int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
+		    struct af_alg_async_req *areq, size_t maxsize,
+		    size_t *outlen);
+
+#endif	/* _CRYPTO_IF_ALG_H */
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
new file mode 100644
index 0000000..51052f6
--- /dev/null
+++ b/include/crypto/internal/acompress.h
@@ -0,0 +1,84 @@
+/*
+ * Asynchronous Compression operations
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Weigang Li <weigang.li@intel.com>
+ *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ACOMP_INT_H
+#define _CRYPTO_ACOMP_INT_H
+#include <crypto/acompress.h>
+
+/*
+ * Transform internal helpers.
+ */
+static inline void *acomp_request_ctx(struct acomp_req *req)
+{
+	return req->__ctx;
+}
+
+static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
+{
+	return tfm->base.__crt_ctx;
+}
+
+static inline void acomp_request_complete(struct acomp_req *req,
+					  int err)
+{
+	req->base.complete(&req->base, err);
+}
+
+static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
+{
+	return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
+}
+
+static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
+{
+	struct acomp_req *req;
+
+	req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
+	if (likely(req))
+		acomp_request_set_tfm(req, tfm);
+	return req;
+}
+
+static inline void __acomp_request_free(struct acomp_req *req)
+{
+	kzfree(req);
+}
+
+/**
+ * crypto_register_acomp() -- Register asynchronous compression algorithm
+ *
+ * Function registers an implementation of an asynchronous
+ * compression algorithm
+ *
+ * @alg:	algorithm definition
+ *
+ * Return:	zero on success; error code in case of error
+ */
+int crypto_register_acomp(struct acomp_alg *alg);
+
+/**
+ * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
+ *
+ * Function unregisters an implementation of an asynchronous
+ * compression algorithm
+ *
+ * @alg:	algorithm definition
+ *
+ * Return:	zero on success; error code in case of error
+ */
+int crypto_unregister_acomp(struct acomp_alg *alg);
+
+int crypto_register_acomps(struct acomp_alg *algs, int count);
+void crypto_unregister_acomps(struct acomp_alg *algs, int count);
+
+#endif
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
new file mode 100644
index 0000000..6ad8e31
--- /dev/null
+++ b/include/crypto/internal/aead.h
@@ -0,0 +1,191 @@
+/*
+ * AEAD: Authenticated Encryption with Associated Data
+ * 
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) 
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_INTERNAL_AEAD_H
+#define _CRYPTO_INTERNAL_AEAD_H
+
+#include <crypto/aead.h>
+#include <crypto/algapi.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+struct rtattr;
+
+struct aead_instance {
+	void (*free)(struct aead_instance *inst);
+	union {
+		struct {
+			char head[offsetof(struct aead_alg, base)];
+			struct crypto_instance base;
+		} s;
+		struct aead_alg alg;
+	};
+};
+
+struct crypto_aead_spawn {
+	struct crypto_spawn base;
+};
+
+struct aead_queue {
+	struct crypto_queue base;
+};
+
+static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
+{
+	return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline struct crypto_instance *aead_crypto_instance(
+	struct aead_instance *inst)
+{
+	return container_of(&inst->alg.base, struct crypto_instance, alg);
+}
+
+static inline struct aead_instance *aead_instance(struct crypto_instance *inst)
+{
+	return container_of(&inst->alg, struct aead_instance, alg.base);
+}
+
+static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead)
+{
+	return aead_instance(crypto_tfm_alg_instance(&aead->base));
+}
+
+static inline void *aead_instance_ctx(struct aead_instance *inst)
+{
+	return crypto_instance_ctx(aead_crypto_instance(inst));
+}
+
+static inline void *aead_request_ctx(struct aead_request *req)
+{
+	return req->__ctx;
+}
+
+static inline void aead_request_complete(struct aead_request *req, int err)
+{
+	req->base.complete(&req->base, err);
+}
+
+static inline u32 aead_request_flags(struct aead_request *req)
+{
+	return req->base.flags;
+}
+
+static inline struct aead_request *aead_request_cast(
+	struct crypto_async_request *req)
+{
+	return container_of(req, struct aead_request, base);
+}
+
+static inline void crypto_set_aead_spawn(
+	struct crypto_aead_spawn *spawn, struct crypto_instance *inst)
+{
+	crypto_set_spawn(&spawn->base, inst);
+}
+
+int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
+		     u32 type, u32 mask);
+
+static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn)
+{
+	crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct aead_alg *crypto_spawn_aead_alg(
+	struct crypto_aead_spawn *spawn)
+{
+	return container_of(spawn->base.alg, struct aead_alg, base);
+}
+
+static inline struct crypto_aead *crypto_spawn_aead(
+	struct crypto_aead_spawn *spawn)
+{
+	return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
+					   unsigned int reqsize)
+{
+	aead->reqsize = reqsize;
+}
+
+static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
+{
+	return alg->maxauthsize;
+}
+
+static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
+{
+	return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
+}
+
+static inline void aead_init_queue(struct aead_queue *queue,
+				   unsigned int max_qlen)
+{
+	crypto_init_queue(&queue->base, max_qlen);
+}
+
+static inline int aead_enqueue_request(struct aead_queue *queue,
+				       struct aead_request *request)
+{
+	return crypto_enqueue_request(&queue->base, &request->base);
+}
+
+static inline struct aead_request *aead_dequeue_request(
+	struct aead_queue *queue)
+{
+	struct crypto_async_request *req;
+
+	req = crypto_dequeue_request(&queue->base);
+
+	return req ? container_of(req, struct aead_request, base) : NULL;
+}
+
+static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
+{
+	struct crypto_async_request *req;
+
+	req = crypto_get_backlog(&queue->base);
+
+	return req ? container_of(req, struct aead_request, base) : NULL;
+}
+
+static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg)
+{
+	return alg->chunksize;
+}
+
+/**
+ * crypto_aead_chunksize() - obtain chunk size
+ * @tfm: cipher handle
+ *
+ * The block size is set to one for ciphers such as CCM.  However,
+ * you still need to provide incremental updates in multiples of
+ * the underlying block size as the IV does not have sub-block
+ * granularity.  This is known in this API as the chunk size.
+ *
+ * Return: chunk size in bytes
+ */
+static inline unsigned int crypto_aead_chunksize(struct crypto_aead *tfm)
+{
+	return crypto_aead_alg_chunksize(crypto_aead_alg(tfm));
+}
+
+int crypto_register_aead(struct aead_alg *alg);
+void crypto_unregister_aead(struct aead_alg *alg);
+int crypto_register_aeads(struct aead_alg *algs, int count);
+void crypto_unregister_aeads(struct aead_alg *algs, int count);
+int aead_register_instance(struct crypto_template *tmpl,
+			   struct aead_instance *inst);
+
+#endif	/* _CRYPTO_INTERNAL_AEAD_H */
+
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
new file mode 100644
index 0000000..805686b
--- /dev/null
+++ b/include/crypto/internal/akcipher.h
@@ -0,0 +1,144 @@
+/*
+ * Public Key Encryption
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk <tadeusz.struk@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_AKCIPHER_INT_H
+#define _CRYPTO_AKCIPHER_INT_H
+#include <crypto/akcipher.h>
+#include <crypto/algapi.h>
+
+struct akcipher_instance {
+	void (*free)(struct akcipher_instance *inst);
+	union {
+		struct {
+			char head[offsetof(struct akcipher_alg, base)];
+			struct crypto_instance base;
+		} s;
+		struct akcipher_alg alg;
+	};
+};
+
+struct crypto_akcipher_spawn {
+	struct crypto_spawn base;
+};
+
+/*
+ * Transform internal helpers.
+ */
+static inline void *akcipher_request_ctx(struct akcipher_request *req)
+{
+	return req->__ctx;
+}
+
+static inline void akcipher_set_reqsize(struct crypto_akcipher *akcipher,
+					unsigned int reqsize)
+{
+	crypto_akcipher_alg(akcipher)->reqsize = reqsize;
+}
+
+static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm)
+{
+	return tfm->base.__crt_ctx;
+}
+
+static inline void akcipher_request_complete(struct akcipher_request *req,
+					     int err)
+{
+	req->base.complete(&req->base, err);
+}
+
+static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm)
+{
+	return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name;
+}
+
+static inline struct crypto_instance *akcipher_crypto_instance(
+		struct akcipher_instance *inst)
+{
+	return container_of(&inst->alg.base, struct crypto_instance, alg);
+}
+
+static inline struct akcipher_instance *akcipher_instance(
+		struct crypto_instance *inst)
+{
+	return container_of(&inst->alg, struct akcipher_instance, alg.base);
+}
+
+static inline struct akcipher_instance *akcipher_alg_instance(
+		struct crypto_akcipher *akcipher)
+{
+	return akcipher_instance(crypto_tfm_alg_instance(&akcipher->base));
+}
+
+static inline void *akcipher_instance_ctx(struct akcipher_instance *inst)
+{
+	return crypto_instance_ctx(akcipher_crypto_instance(inst));
+}
+
+static inline void crypto_set_akcipher_spawn(
+		struct crypto_akcipher_spawn *spawn,
+		struct crypto_instance *inst)
+{
+	crypto_set_spawn(&spawn->base, inst);
+}
+
+int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name,
+		u32 type, u32 mask);
+
+static inline struct crypto_akcipher *crypto_spawn_akcipher(
+		struct crypto_akcipher_spawn *spawn)
+{
+	return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline void crypto_drop_akcipher(struct crypto_akcipher_spawn *spawn)
+{
+	crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct akcipher_alg *crypto_spawn_akcipher_alg(
+		struct crypto_akcipher_spawn *spawn)
+{
+	return container_of(spawn->base.alg, struct akcipher_alg, base);
+}
+
+/**
+ * crypto_register_akcipher() -- Register public key algorithm
+ *
+ * Function registers an implementation of a public key verify algorithm
+ *
+ * @alg:	algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_akcipher(struct akcipher_alg *alg);
+
+/**
+ * crypto_unregister_akcipher() -- Unregister public key algorithm
+ *
+ * Function unregisters an implementation of a public key verify algorithm
+ *
+ * @alg:	algorithm definition
+ */
+void crypto_unregister_akcipher(struct akcipher_alg *alg);
+
+/**
+ * akcipher_register_instance() -- Unregister public key template instance
+ *
+ * Function registers an implementation of an asymmetric key algorithm
+ * created from a template
+ *
+ * @tmpl:	the template from which the algorithm was created
+ * @inst:	the template instance
+ */
+int akcipher_register_instance(struct crypto_template *tmpl,
+		struct akcipher_instance *inst);
+#endif
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
new file mode 100644
index 0000000..2bcfb93
--- /dev/null
+++ b/include/crypto/internal/geniv.h
@@ -0,0 +1,33 @@
+/*
+ * geniv: IV generation
+ *
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_INTERNAL_GENIV_H
+#define _CRYPTO_INTERNAL_GENIV_H
+
+#include <crypto/internal/aead.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+struct aead_geniv_ctx {
+	spinlock_t lock;
+	struct crypto_aead *child;
+	struct crypto_skcipher *sknull;
+	u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
+};
+
+struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
+				       struct rtattr **tb, u32 type, u32 mask);
+void aead_geniv_free(struct aead_instance *inst);
+int aead_init_geniv(struct crypto_aead *tfm);
+void aead_exit_geniv(struct crypto_aead *tfm);
+
+#endif	/* _CRYPTO_INTERNAL_GENIV_H */
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
new file mode 100644
index 0000000..a0b0ad9
--- /dev/null
+++ b/include/crypto/internal/hash.h
@@ -0,0 +1,257 @@
+/*
+ * Hash algorithms.
+ * 
+ * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) 
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_INTERNAL_HASH_H
+#define _CRYPTO_INTERNAL_HASH_H
+
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+
+struct ahash_request;
+struct scatterlist;
+
+struct crypto_hash_walk {
+	char *data;
+
+	unsigned int offset;
+	unsigned int alignmask;
+
+	struct page *pg;
+	unsigned int entrylen;
+
+	unsigned int total;
+	struct scatterlist *sg;
+
+	unsigned int flags;
+};
+
+struct ahash_instance {
+	struct ahash_alg alg;
+};
+
+struct shash_instance {
+	struct shash_alg alg;
+};
+
+struct crypto_ahash_spawn {
+	struct crypto_spawn base;
+};
+
+struct crypto_shash_spawn {
+	struct crypto_spawn base;
+};
+
+extern const struct crypto_type crypto_ahash_type;
+
+int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
+int crypto_hash_walk_first(struct ahash_request *req,
+			   struct crypto_hash_walk *walk);
+int crypto_ahash_walk_first(struct ahash_request *req,
+			   struct crypto_hash_walk *walk);
+
+static inline int crypto_ahash_walk_done(struct crypto_hash_walk *walk,
+					 int err)
+{
+	return crypto_hash_walk_done(walk, err);
+}
+
+static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
+{
+	return !(walk->entrylen | walk->total);
+}
+
+static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk)
+{
+	return crypto_hash_walk_last(walk);
+}
+
+int crypto_register_ahash(struct ahash_alg *alg);
+int crypto_unregister_ahash(struct ahash_alg *alg);
+int crypto_register_ahashes(struct ahash_alg *algs, int count);
+void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
+int ahash_register_instance(struct crypto_template *tmpl,
+			    struct ahash_instance *inst);
+void ahash_free_instance(struct crypto_instance *inst);
+
+int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+		    unsigned int keylen);
+
+static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
+{
+	return alg->setkey != shash_no_setkey;
+}
+
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
+
+int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
+			    struct hash_alg_common *alg,
+			    struct crypto_instance *inst);
+
+static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn)
+{
+	crypto_drop_spawn(&spawn->base);
+}
+
+struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
+
+int crypto_register_shash(struct shash_alg *alg);
+int crypto_unregister_shash(struct shash_alg *alg);
+int crypto_register_shashes(struct shash_alg *algs, int count);
+int crypto_unregister_shashes(struct shash_alg *algs, int count);
+int shash_register_instance(struct crypto_template *tmpl,
+			    struct shash_instance *inst);
+void shash_free_instance(struct crypto_instance *inst);
+
+int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
+			    struct shash_alg *alg,
+			    struct crypto_instance *inst);
+
+static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn)
+{
+	crypto_drop_spawn(&spawn->base);
+}
+
+struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
+
+int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
+int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
+int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
+
+int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
+
+static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
+{
+	return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+}
+
+static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg)
+{
+	return container_of(__crypto_hash_alg_common(alg), struct ahash_alg,
+			    halg);
+}
+
+static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
+					    unsigned int reqsize)
+{
+	tfm->reqsize = reqsize;
+}
+
+static inline struct crypto_instance *ahash_crypto_instance(
+	struct ahash_instance *inst)
+{
+	return container_of(&inst->alg.halg.base, struct crypto_instance, alg);
+}
+
+static inline struct ahash_instance *ahash_instance(
+	struct crypto_instance *inst)
+{
+	return container_of(&inst->alg, struct ahash_instance, alg.halg.base);
+}
+
+static inline void *ahash_instance_ctx(struct ahash_instance *inst)
+{
+	return crypto_instance_ctx(ahash_crypto_instance(inst));
+}
+
+static inline unsigned int ahash_instance_headroom(void)
+{
+	return sizeof(struct ahash_alg) - sizeof(struct crypto_alg);
+}
+
+static inline struct ahash_instance *ahash_alloc_instance(
+	const char *name, struct crypto_alg *alg)
+{
+	return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
+}
+
+static inline void ahash_request_complete(struct ahash_request *req, int err)
+{
+	req->base.complete(&req->base, err);
+}
+
+static inline u32 ahash_request_flags(struct ahash_request *req)
+{
+	return req->base.flags;
+}
+
+static inline struct crypto_ahash *crypto_spawn_ahash(
+	struct crypto_ahash_spawn *spawn)
+{
+	return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline int ahash_enqueue_request(struct crypto_queue *queue,
+					     struct ahash_request *request)
+{
+	return crypto_enqueue_request(queue, &request->base);
+}
+
+static inline struct ahash_request *ahash_dequeue_request(
+	struct crypto_queue *queue)
+{
+	return ahash_request_cast(crypto_dequeue_request(queue));
+}
+
+static inline int ahash_tfm_in_queue(struct crypto_queue *queue,
+					  struct crypto_ahash *tfm)
+{
+	return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm));
+}
+
+static inline void *crypto_shash_ctx(struct crypto_shash *tfm)
+{
+	return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline struct crypto_instance *shash_crypto_instance(
+	struct shash_instance *inst)
+{
+	return container_of(&inst->alg.base, struct crypto_instance, alg);
+}
+
+static inline struct shash_instance *shash_instance(
+	struct crypto_instance *inst)
+{
+	return container_of(__crypto_shash_alg(&inst->alg),
+			    struct shash_instance, alg);
+}
+
+static inline void *shash_instance_ctx(struct shash_instance *inst)
+{
+	return crypto_instance_ctx(shash_crypto_instance(inst));
+}
+
+static inline struct shash_instance *shash_alloc_instance(
+	const char *name, struct crypto_alg *alg)
+{
+	return crypto_alloc_instance2(name, alg,
+				      sizeof(struct shash_alg) - sizeof(*alg));
+}
+
+static inline struct crypto_shash *crypto_spawn_shash(
+	struct crypto_shash_spawn *spawn)
+{
+	return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm)
+{
+	return crypto_tfm_ctx_aligned(&tfm->base);
+}
+
+static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
+{
+	return container_of(tfm, struct crypto_shash, base);
+}
+
+#endif	/* _CRYPTO_INTERNAL_HASH_H */
+
diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h
new file mode 100644
index 0000000..ad3acf3
--- /dev/null
+++ b/include/crypto/internal/kpp.h
@@ -0,0 +1,64 @@
+/*
+ * Key-agreement Protocol Primitives (KPP)
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_KPP_INT_H
+#define _CRYPTO_KPP_INT_H
+#include <crypto/kpp.h>
+#include <crypto/algapi.h>
+
+/*
+ * Transform internal helpers.
+ */
+static inline void *kpp_request_ctx(struct kpp_request *req)
+{
+	return req->__ctx;
+}
+
+static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm)
+{
+	return tfm->base.__crt_ctx;
+}
+
+static inline void kpp_request_complete(struct kpp_request *req, int err)
+{
+	req->base.complete(&req->base, err);
+}
+
+static inline const char *kpp_alg_name(struct crypto_kpp *tfm)
+{
+	return crypto_kpp_tfm(tfm)->__crt_alg->cra_name;
+}
+
+/**
+ * crypto_register_kpp() -- Register key-agreement protocol primitives algorithm
+ *
+ * Function registers an implementation of a key-agreement protocol primitive
+ * algorithm
+ *
+ * @alg:	algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_kpp(struct kpp_alg *alg);
+
+/**
+ * crypto_unregister_kpp() -- Unregister key-agreement protocol primitive
+ * algorithm
+ *
+ * Function unregisters an implementation of a key-agreement protocol primitive
+ * algorithm
+ *
+ * @alg:	algorithm definition
+ */
+void crypto_unregister_kpp(struct kpp_alg *alg);
+
+#endif
diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h
new file mode 100644
index 0000000..a52ef34
--- /dev/null
+++ b/include/crypto/internal/rng.h
@@ -0,0 +1,45 @@
+/*
+ * RNG: Random Number Generator  algorithms under the crypto API
+ *
+ * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_INTERNAL_RNG_H
+#define _CRYPTO_INTERNAL_RNG_H
+
+#include <crypto/algapi.h>
+#include <crypto/rng.h>
+
+int crypto_register_rng(struct rng_alg *alg);
+void crypto_unregister_rng(struct rng_alg *alg);
+int crypto_register_rngs(struct rng_alg *algs, int count);
+void crypto_unregister_rngs(struct rng_alg *algs, int count);
+
+#if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE)
+int crypto_del_default_rng(void);
+#else
+static inline int crypto_del_default_rng(void)
+{
+	return 0;
+}
+#endif
+
+static inline void *crypto_rng_ctx(struct crypto_rng *tfm)
+{
+	return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void crypto_rng_set_entropy(struct crypto_rng *tfm,
+					  const u8 *data, unsigned int len)
+{
+	crypto_rng_alg(tfm)->set_ent(tfm, data, len);
+}
+
+#endif
diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h
new file mode 100644
index 0000000..9e8f159
--- /dev/null
+++ b/include/crypto/internal/rsa.h
@@ -0,0 +1,62 @@
+/*
+ * RSA internal helpers
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk <tadeusz.struk@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _RSA_HELPER_
+#define _RSA_HELPER_
+#include <linux/types.h>
+
+/**
+ * rsa_key - RSA key structure
+ * @n           : RSA modulus raw byte stream
+ * @e           : RSA public exponent raw byte stream
+ * @d           : RSA private exponent raw byte stream
+ * @p           : RSA prime factor p of n raw byte stream
+ * @q           : RSA prime factor q of n raw byte stream
+ * @dp          : RSA exponent d mod (p - 1) raw byte stream
+ * @dq          : RSA exponent d mod (q - 1) raw byte stream
+ * @qinv        : RSA CRT coefficient q^(-1) mod p raw byte stream
+ * @n_sz        : length in bytes of RSA modulus n
+ * @e_sz        : length in bytes of RSA public exponent
+ * @d_sz        : length in bytes of RSA private exponent
+ * @p_sz        : length in bytes of p field
+ * @q_sz        : length in bytes of q field
+ * @dp_sz       : length in bytes of dp field
+ * @dq_sz       : length in bytes of dq field
+ * @qinv_sz     : length in bytes of qinv field
+ */
+struct rsa_key {
+	const u8 *n;
+	const u8 *e;
+	const u8 *d;
+	const u8 *p;
+	const u8 *q;
+	const u8 *dp;
+	const u8 *dq;
+	const u8 *qinv;
+	size_t n_sz;
+	size_t e_sz;
+	size_t d_sz;
+	size_t p_sz;
+	size_t q_sz;
+	size_t dp_sz;
+	size_t dq_sz;
+	size_t qinv_sz;
+};
+
+int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key,
+		      unsigned int key_len);
+
+int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key,
+		       unsigned int key_len);
+
+extern struct crypto_template rsa_pkcs1pad_tmpl;
+#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
new file mode 100644
index 0000000..0f6ddac
--- /dev/null
+++ b/include/crypto/internal/scompress.h
@@ -0,0 +1,128 @@
+/*
+ * Synchronous Compression operations
+ *
+ * Copyright 2015 LG Electronics Inc.
+ * Copyright (c) 2016, Intel Corporation
+ * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_SCOMP_INT_H
+#define _CRYPTO_SCOMP_INT_H
+#include <linux/crypto.h>
+
+#define SCOMP_SCRATCH_SIZE	131072
+
+struct crypto_scomp {
+	struct crypto_tfm base;
+};
+
+/**
+ * struct scomp_alg - synchronous compression algorithm
+ *
+ * @alloc_ctx:	Function allocates algorithm specific context
+ * @free_ctx:	Function frees context allocated with alloc_ctx
+ * @compress:	Function performs a compress operation
+ * @decompress:	Function performs a de-compress operation
+ * @base:	Common crypto API algorithm data structure
+ */
+struct scomp_alg {
+	void *(*alloc_ctx)(struct crypto_scomp *tfm);
+	void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
+	int (*compress)(struct crypto_scomp *tfm, const u8 *src,
+			unsigned int slen, u8 *dst, unsigned int *dlen,
+			void *ctx);
+	int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
+			  unsigned int slen, u8 *dst, unsigned int *dlen,
+			  void *ctx);
+	struct crypto_alg base;
+};
+
+static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
+{
+	return container_of(alg, struct scomp_alg, base);
+}
+
+static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm)
+{
+	return container_of(tfm, struct crypto_scomp, base);
+}
+
+static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm)
+{
+	return &tfm->base;
+}
+
+static inline void crypto_free_scomp(struct crypto_scomp *tfm)
+{
+	crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm));
+}
+
+static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
+{
+	return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
+}
+
+static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
+{
+	return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
+}
+
+static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
+					 void *ctx)
+{
+	return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
+}
+
+static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
+					const u8 *src, unsigned int slen,
+					u8 *dst, unsigned int *dlen, void *ctx)
+{
+	return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx);
+}
+
+static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
+					  const u8 *src, unsigned int slen,
+					  u8 *dst, unsigned int *dlen,
+					  void *ctx)
+{
+	return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen,
+						 ctx);
+}
+
+int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
+struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
+void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
+
+/**
+ * crypto_register_scomp() -- Register synchronous compression algorithm
+ *
+ * Function registers an implementation of a synchronous
+ * compression algorithm
+ *
+ * @alg:	algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_scomp(struct scomp_alg *alg);
+
+/**
+ * crypto_unregister_scomp() -- Unregister synchronous compression algorithm
+ *
+ * Function unregisters an implementation of a synchronous
+ * compression algorithm
+ *
+ * @alg:	algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_unregister_scomp(struct scomp_alg *alg);
+
+int crypto_register_scomps(struct scomp_alg *algs, int count);
+void crypto_unregister_scomps(struct scomp_alg *algs, int count);
+
+#endif
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
new file mode 100644
index 0000000..f183445
--- /dev/null
+++ b/include/crypto/internal/simd.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Shared crypto simd helpers
+ */
+
+#ifndef _CRYPTO_INTERNAL_SIMD_H
+#define _CRYPTO_INTERNAL_SIMD_H
+
+struct simd_skcipher_alg;
+struct skcipher_alg;
+
+struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
+						      const char *drvname,
+						      const char *basename);
+struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
+					       const char *basename);
+void simd_skcipher_free(struct simd_skcipher_alg *alg);
+
+int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
+				   struct simd_skcipher_alg **simd_algs);
+
+void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
+			       struct simd_skcipher_alg **simd_algs);
+
+#endif /* _CRYPTO_INTERNAL_SIMD_H */
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
new file mode 100644
index 0000000..e42f706
--- /dev/null
+++ b/include/crypto/internal/skcipher.h
@@ -0,0 +1,211 @@
+/*
+ * Symmetric key ciphers.
+ * 
+ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) 
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_INTERNAL_SKCIPHER_H
+#define _CRYPTO_INTERNAL_SKCIPHER_H
+
+#include <crypto/algapi.h>
+#include <crypto/skcipher.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+struct aead_request;
+struct rtattr;
+
+struct skcipher_instance {
+	void (*free)(struct skcipher_instance *inst);
+	union {
+		struct {
+			char head[offsetof(struct skcipher_alg, base)];
+			struct crypto_instance base;
+		} s;
+		struct skcipher_alg alg;
+	};
+};
+
+struct crypto_skcipher_spawn {
+	struct crypto_spawn base;
+};
+
+struct skcipher_walk {
+	union {
+		struct {
+			struct page *page;
+			unsigned long offset;
+		} phys;
+
+		struct {
+			u8 *page;
+			void *addr;
+		} virt;
+	} src, dst;
+
+	struct scatter_walk in;
+	unsigned int nbytes;
+
+	struct scatter_walk out;
+	unsigned int total;
+
+	struct list_head buffers;
+
+	u8 *page;
+	u8 *buffer;
+	u8 *oiv;
+	void *iv;
+
+	unsigned int ivsize;
+
+	int flags;
+	unsigned int blocksize;
+	unsigned int stride;
+	unsigned int alignmask;
+};
+
+extern const struct crypto_type crypto_givcipher_type;
+
+static inline struct crypto_instance *skcipher_crypto_instance(
+	struct skcipher_instance *inst)
+{
+	return &inst->s.base;
+}
+
+static inline struct skcipher_instance *skcipher_alg_instance(
+	struct crypto_skcipher *skcipher)
+{
+	return container_of(crypto_skcipher_alg(skcipher),
+			    struct skcipher_instance, alg);
+}
+
+static inline void *skcipher_instance_ctx(struct skcipher_instance *inst)
+{
+	return crypto_instance_ctx(skcipher_crypto_instance(inst));
+}
+
+static inline void skcipher_request_complete(struct skcipher_request *req, int err)
+{
+	req->base.complete(&req->base, err);
+}
+
+static inline void crypto_set_skcipher_spawn(
+	struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst)
+{
+	crypto_set_spawn(&spawn->base, inst);
+}
+
+int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
+			 u32 type, u32 mask);
+
+static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
+{
+	crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct skcipher_alg *crypto_skcipher_spawn_alg(
+	struct crypto_skcipher_spawn *spawn)
+{
+	return container_of(spawn->base.alg, struct skcipher_alg, base);
+}
+
+static inline struct skcipher_alg *crypto_spawn_skcipher_alg(
+	struct crypto_skcipher_spawn *spawn)
+{
+	return crypto_skcipher_spawn_alg(spawn);
+}
+
+static inline struct crypto_skcipher *crypto_spawn_skcipher(
+	struct crypto_skcipher_spawn *spawn)
+{
+	return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline void crypto_skcipher_set_reqsize(
+	struct crypto_skcipher *skcipher, unsigned int reqsize)
+{
+	skcipher->reqsize = reqsize;
+}
+
+int crypto_register_skcipher(struct skcipher_alg *alg);
+void crypto_unregister_skcipher(struct skcipher_alg *alg);
+int crypto_register_skciphers(struct skcipher_alg *algs, int count);
+void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);
+int skcipher_register_instance(struct crypto_template *tmpl,
+			       struct skcipher_instance *inst);
+
+int skcipher_walk_done(struct skcipher_walk *walk, int err);
+int skcipher_walk_virt(struct skcipher_walk *walk,
+		       struct skcipher_request *req,
+		       bool atomic);
+void skcipher_walk_atomise(struct skcipher_walk *walk);
+int skcipher_walk_async(struct skcipher_walk *walk,
+			struct skcipher_request *req);
+int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
+		       bool atomic);
+int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
+			       struct aead_request *req, bool atomic);
+int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
+			       struct aead_request *req, bool atomic);
+void skcipher_walk_complete(struct skcipher_walk *walk, int err);
+
+static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
+					       int err)
+{
+	req->base.complete(&req->base, err);
+}
+
+static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
+{
+	return req->base.flags;
+}
+
+static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
+{
+	return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *skcipher_request_ctx(struct skcipher_request *req)
+{
+	return req->__ctx;
+}
+
+static inline u32 skcipher_request_flags(struct skcipher_request *req)
+{
+	return req->base.flags;
+}
+
+static inline unsigned int crypto_skcipher_alg_min_keysize(
+	struct skcipher_alg *alg)
+{
+	if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+	    CRYPTO_ALG_TYPE_BLKCIPHER)
+		return alg->base.cra_blkcipher.min_keysize;
+
+	if (alg->base.cra_ablkcipher.encrypt)
+		return alg->base.cra_ablkcipher.min_keysize;
+
+	return alg->min_keysize;
+}
+
+static inline unsigned int crypto_skcipher_alg_max_keysize(
+	struct skcipher_alg *alg)
+{
+	if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+	    CRYPTO_ALG_TYPE_BLKCIPHER)
+		return alg->base.cra_blkcipher.max_keysize;
+
+	if (alg->base.cra_ablkcipher.encrypt)
+		return alg->base.cra_ablkcipher.max_keysize;
+
+	return alg->max_keysize;
+}
+
+#endif	/* _CRYPTO_INTERNAL_SKCIPHER_H */
+
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
new file mode 100644
index 0000000..1bde0a6
--- /dev/null
+++ b/include/crypto/kpp.h
@@ -0,0 +1,350 @@
+/*
+ * Key-agreement Protocol Primitives (KPP)
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_KPP_
+#define _CRYPTO_KPP_
+#include <linux/crypto.h>
+
+/**
+ * struct kpp_request
+ *
+ * @base:	Common attributes for async crypto requests
+ * @src:	Source data
+ * @dst:	Destination data
+ * @src_len:	Size of the input buffer
+ * @dst_len:	Size of the output buffer. It needs to be at least
+ *		as big as the expected result depending	on the operation
+ *		After operation it will be updated with the actual size of the
+ *		result. In case of error where the dst sgl size was insufficient,
+ *		it will be updated to the size required for the operation.
+ * @__ctx:	Start of private context data
+ */
+struct kpp_request {
+	struct crypto_async_request base;
+	struct scatterlist *src;
+	struct scatterlist *dst;
+	unsigned int src_len;
+	unsigned int dst_len;
+	void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
+ * struct crypto_kpp - user-instantiated object which encapsulate
+ * algorithms and core processing logic
+ *
+ * @base:	Common crypto API algorithm data structure
+ */
+struct crypto_kpp {
+	struct crypto_tfm base;
+};
+
+/**
+ * struct kpp_alg - generic key-agreement protocol primitives
+ *
+ * @set_secret:		Function invokes the protocol specific function to
+ *			store the secret private key along with parameters.
+ *			The implementation knows how to decode the buffer
+ * @generate_public_key: Function generate the public key to be sent to the
+ *			counterpart. In case of error, where output is not big
+ *			enough req->dst_len will be updated to the size
+ *			required
+ * @compute_shared_secret: Function compute the shared secret as defined by
+ *			the algorithm. The result is given back to the user.
+ *			In case of error, where output is not big enough,
+ *			req->dst_len will be updated to the size required
+ * @max_size:		Function returns the size of the output buffer
+ * @init:		Initialize the object. This is called only once at
+ *			instantiation time. In case the cryptographic hardware
+ *			needs to be initialized. Software fallback should be
+ *			put in place here.
+ * @exit:		Undo everything @init did.
+ *
+ * @reqsize:		Request context size required by algorithm
+ *			implementation
+ * @base:		Common crypto API algorithm data structure
+ */
+struct kpp_alg {
+	int (*set_secret)(struct crypto_kpp *tfm, const void *buffer,
+			  unsigned int len);
+	int (*generate_public_key)(struct kpp_request *req);
+	int (*compute_shared_secret)(struct kpp_request *req);
+
+	unsigned int (*max_size)(struct crypto_kpp *tfm);
+
+	int (*init)(struct crypto_kpp *tfm);
+	void (*exit)(struct crypto_kpp *tfm);
+
+	unsigned int reqsize;
+	struct crypto_alg base;
+};
+
+/**
+ * DOC: Generic Key-agreement Protocol Primitives API
+ *
+ * The KPP API is used with the algorithm type
+ * CRYPTO_ALG_TYPE_KPP (listed as type "kpp" in /proc/crypto)
+ */
+
+/**
+ * crypto_alloc_kpp() - allocate KPP tfm handle
+ * @alg_name: is the name of the kpp algorithm (e.g. "dh", "ecdh")
+ * @type: specifies the type of the algorithm
+ * @mask: specifies the mask for the algorithm
+ *
+ * Allocate a handle for kpp algorithm. The returned struct crypto_kpp
+ * is required for any following API invocation
+ *
+ * Return: allocated handle in case of success; IS_ERR() is true in case of
+ *	   an error, PTR_ERR() returns the error code.
+ */
+struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm)
+{
+	return &tfm->base;
+}
+
+static inline struct kpp_alg *__crypto_kpp_alg(struct crypto_alg *alg)
+{
+	return container_of(alg, struct kpp_alg, base);
+}
+
+static inline struct crypto_kpp *__crypto_kpp_tfm(struct crypto_tfm *tfm)
+{
+	return container_of(tfm, struct crypto_kpp, base);
+}
+
+static inline struct kpp_alg *crypto_kpp_alg(struct crypto_kpp *tfm)
+{
+	return __crypto_kpp_alg(crypto_kpp_tfm(tfm)->__crt_alg);
+}
+
+static inline unsigned int crypto_kpp_reqsize(struct crypto_kpp *tfm)
+{
+	return crypto_kpp_alg(tfm)->reqsize;
+}
+
+static inline void kpp_request_set_tfm(struct kpp_request *req,
+				       struct crypto_kpp *tfm)
+{
+	req->base.tfm = crypto_kpp_tfm(tfm);
+}
+
+static inline struct crypto_kpp *crypto_kpp_reqtfm(struct kpp_request *req)
+{
+	return __crypto_kpp_tfm(req->base.tfm);
+}
+
+static inline u32 crypto_kpp_get_flags(struct crypto_kpp *tfm)
+{
+	return crypto_tfm_get_flags(crypto_kpp_tfm(tfm));
+}
+
+static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags)
+{
+	crypto_tfm_set_flags(crypto_kpp_tfm(tfm), flags);
+}
+
+/**
+ * crypto_free_kpp() - free KPP tfm handle
+ *
+ * @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
+ */
+static inline void crypto_free_kpp(struct crypto_kpp *tfm)
+{
+	crypto_destroy_tfm(tfm, crypto_kpp_tfm(tfm));
+}
+
+/**
+ * kpp_request_alloc() - allocates kpp request
+ *
+ * @tfm:	KPP tfm handle allocated with crypto_alloc_kpp()
+ * @gfp:	allocation flags
+ *
+ * Return: allocated handle in case of success or NULL in case of an error.
+ */
+static inline struct kpp_request *kpp_request_alloc(struct crypto_kpp *tfm,
+						    gfp_t gfp)
+{
+	struct kpp_request *req;
+
+	req = kmalloc(sizeof(*req) + crypto_kpp_reqsize(tfm), gfp);
+	if (likely(req))
+		kpp_request_set_tfm(req, tfm);
+
+	return req;
+}
+
+/**
+ * kpp_request_free() - zeroize and free kpp request
+ *
+ * @req:	request to free
+ */
+static inline void kpp_request_free(struct kpp_request *req)
+{
+	kzfree(req);
+}
+
+/**
+ * kpp_request_set_callback() - Sets an asynchronous callback.
+ *
+ * Callback will be called when an asynchronous operation on a given
+ * request is finished.
+ *
+ * @req:	request that the callback will be set for
+ * @flgs:	specify for instance if the operation may backlog
+ * @cmpl:	callback which will be called
+ * @data:	private data used by the caller
+ */
+static inline void kpp_request_set_callback(struct kpp_request *req,
+					    u32 flgs,
+					    crypto_completion_t cmpl,
+					    void *data)
+{
+	req->base.complete = cmpl;
+	req->base.data = data;
+	req->base.flags = flgs;
+}
+
+/**
+ * kpp_request_set_input() - Sets input buffer
+ *
+ * Sets parameters required by generate_public_key
+ *
+ * @req:	kpp request
+ * @input:	ptr to input scatter list
+ * @input_len:	size of the input scatter list
+ */
+static inline void kpp_request_set_input(struct kpp_request *req,
+					 struct scatterlist *input,
+					 unsigned int input_len)
+{
+	req->src = input;
+	req->src_len = input_len;
+}
+
+/**
+ * kpp_request_set_output() - Sets output buffer
+ *
+ * Sets parameters required by kpp operation
+ *
+ * @req:	kpp request
+ * @output:	ptr to output scatter list
+ * @output_len:	size of the output scatter list
+ */
+static inline void kpp_request_set_output(struct kpp_request *req,
+					  struct scatterlist *output,
+					  unsigned int output_len)
+{
+	req->dst = output;
+	req->dst_len = output_len;
+}
+
+enum {
+	CRYPTO_KPP_SECRET_TYPE_UNKNOWN,
+	CRYPTO_KPP_SECRET_TYPE_DH,
+	CRYPTO_KPP_SECRET_TYPE_ECDH,
+};
+
+/**
+ * struct kpp_secret - small header for packing secret buffer
+ *
+ * @type:	define type of secret. Each kpp type will define its own
+ * @len:	specify the len of the secret, include the header, that
+ *		follows the struct
+ */
+struct kpp_secret {
+	unsigned short type;
+	unsigned short len;
+};
+
+/**
+ * crypto_kpp_set_secret() - Invoke kpp operation
+ *
+ * Function invokes the specific kpp operation for a given alg.
+ *
+ * @tfm:	tfm handle
+ * @buffer:	Buffer holding the packet representation of the private
+ *		key. The structure of the packet key depends on the particular
+ *		KPP implementation. Packing and unpacking helpers are provided
+ *		for ECDH and DH (see the respective header files for those
+ *		implementations).
+ * @len:	Length of the packet private key buffer.
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
+					const void *buffer, unsigned int len)
+{
+	struct kpp_alg *alg = crypto_kpp_alg(tfm);
+
+	return alg->set_secret(tfm, buffer, len);
+}
+
+/**
+ * crypto_kpp_generate_public_key() - Invoke kpp operation
+ *
+ * Function invokes the specific kpp operation for generating the public part
+ * for a given kpp algorithm.
+ *
+ * To generate a private key, the caller should use a random number generator.
+ * The output of the requested length serves as the private key.
+ *
+ * @req:	kpp key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
+{
+	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+	struct kpp_alg *alg = crypto_kpp_alg(tfm);
+
+	return alg->generate_public_key(req);
+}
+
+/**
+ * crypto_kpp_compute_shared_secret() - Invoke kpp operation
+ *
+ * Function invokes the specific kpp operation for computing the shared secret
+ * for a given kpp algorithm.
+ *
+ * @req:	kpp key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
+{
+	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+	struct kpp_alg *alg = crypto_kpp_alg(tfm);
+
+	return alg->compute_shared_secret(req);
+}
+
+/**
+ * crypto_kpp_maxsize() - Get len for output buffer
+ *
+ * Function returns the output buffer size required for a given key.
+ * Function assumes that the key is already set in the transformation. If this
+ * function is called without a setkey or with a failed setkey, you will end up
+ * in a NULL dereference.
+ *
+ * @tfm:	KPP tfm handle allocated with crypto_alloc_kpp()
+ */
+static inline unsigned int crypto_kpp_maxsize(struct crypto_kpp *tfm)
+{
+	struct kpp_alg *alg = crypto_kpp_alg(tfm);
+
+	return alg->max_size(tfm);
+}
+
+#endif
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
new file mode 100644
index 0000000..b67404f
--- /dev/null
+++ b/include/crypto/mcryptd.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Software async multibuffer crypto daemon headers
+ *
+ *    Author:
+ *             Tim Chen <tim.c.chen@linux.intel.com>
+ *
+ *    Copyright (c) 2014, Intel Corporation.
+ */
+
+#ifndef _CRYPTO_MCRYPT_H
+#define _CRYPTO_MCRYPT_H
+
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <crypto/hash.h>
+
+struct mcryptd_ahash {
+	struct crypto_ahash base;
+};
+
+static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
+	struct crypto_ahash *tfm)
+{
+	return (struct mcryptd_ahash *)tfm;
+}
+
+struct mcryptd_cpu_queue {
+	struct crypto_queue queue;
+	spinlock_t q_lock;
+	struct work_struct work;
+};
+
+struct mcryptd_queue {
+	struct mcryptd_cpu_queue __percpu *cpu_queue;
+};
+
+struct mcryptd_instance_ctx {
+	struct crypto_spawn spawn;
+	struct mcryptd_queue *queue;
+};
+
+struct mcryptd_hash_ctx {
+	struct crypto_ahash *child;
+	struct mcryptd_alg_state *alg_state;
+};
+
+struct mcryptd_tag {
+	/* seq number of request */
+	unsigned seq_num;
+	/* arrival time of request */
+	unsigned long arrival;
+	unsigned long expire;
+	int	cpu;
+};
+
+struct mcryptd_hash_request_ctx {
+	struct list_head waiter;
+	crypto_completion_t complete;
+	struct mcryptd_tag tag;
+	struct crypto_hash_walk walk;
+	u8 *out;
+	int flag;
+	struct ahash_request areq;
+};
+
+struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
+					u32 type, u32 mask);
+struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm);
+struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req);
+void mcryptd_free_ahash(struct mcryptd_ahash *tfm);
+void mcryptd_flusher(struct work_struct *work);
+
+enum mcryptd_req_type {
+	MCRYPTD_NONE,
+	MCRYPTD_UPDATE,
+	MCRYPTD_FINUP,
+	MCRYPTD_DIGEST,
+	MCRYPTD_FINAL
+};
+
+struct mcryptd_alg_cstate {
+	unsigned long next_flush;
+	unsigned next_seq_num;
+	bool	flusher_engaged;
+	struct  delayed_work flush;
+	int	cpu;
+	struct  mcryptd_alg_state *alg_state;
+	void	*mgr;
+	spinlock_t work_lock;
+	struct list_head work_list;
+	struct list_head flush_list;
+};
+
+struct mcryptd_alg_state {
+	struct mcryptd_alg_cstate __percpu *alg_cstate;
+	unsigned long (*flusher)(struct mcryptd_alg_cstate *cstate);
+};
+
+/* return delay in jiffies from current time */
+static inline unsigned long get_delay(unsigned long t)
+{
+	long delay;
+
+	delay = (long) t - (long) jiffies;
+	if (delay <= 0)
+		return 0;
+	else
+		return (unsigned long) delay;
+}
+
+void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay);
+
+#endif
diff --git a/include/crypto/md5.h b/include/crypto/md5.h
new file mode 100644
index 0000000..cf9e9de
--- /dev/null
+++ b/include/crypto/md5.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CRYPTO_MD5_H
+#define _CRYPTO_MD5_H
+
+#include <linux/types.h>
+
+#define MD5_DIGEST_SIZE		16
+#define MD5_HMAC_BLOCK_SIZE	64
+#define MD5_BLOCK_WORDS		16
+#define MD5_HASH_WORDS		4
+
+#define MD5_H0	0x67452301UL
+#define MD5_H1	0xefcdab89UL
+#define MD5_H2	0x98badcfeUL
+#define MD5_H3	0x10325476UL
+
+extern const u8 md5_zero_message_hash[MD5_DIGEST_SIZE];
+
+struct md5_state {
+	u32 hash[MD5_HASH_WORDS];
+	u32 block[MD5_BLOCK_WORDS];
+	u64 byte_count;
+};
+
+#endif
diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h
new file mode 100644
index 0000000..b26dd70
--- /dev/null
+++ b/include/crypto/morus1280_glue.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The MORUS-1280 Authenticated-Encryption Algorithm
+ *   Common glue skeleton -- header file
+ *
+ * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
+ * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef _CRYPTO_MORUS1280_GLUE_H
+#define _CRYPTO_MORUS1280_GLUE_H
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/morus_common.h>
+
+#define MORUS1280_WORD_SIZE 8
+#define MORUS1280_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS1280_WORD_SIZE)
+
+struct morus1280_block {
+	u8 bytes[MORUS1280_BLOCK_SIZE];
+};
+
+struct morus1280_glue_ops {
+	void (*init)(void *state, const void *key, const void *iv);
+	void (*ad)(void *state, const void *data, unsigned int length);
+	void (*enc)(void *state, const void *src, void *dst, unsigned int length);
+	void (*dec)(void *state, const void *src, void *dst, unsigned int length);
+	void (*enc_tail)(void *state, const void *src, void *dst, unsigned int length);
+	void (*dec_tail)(void *state, const void *src, void *dst, unsigned int length);
+	void (*final)(void *state, void *tag_xor, u64 assoclen, u64 cryptlen);
+};
+
+struct morus1280_ctx {
+	const struct morus1280_glue_ops *ops;
+	struct morus1280_block key;
+};
+
+void crypto_morus1280_glue_init_ops(struct crypto_aead *aead,
+				    const struct morus1280_glue_ops *ops);
+int crypto_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
+				 unsigned int keylen);
+int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm,
+				      unsigned int authsize);
+int crypto_morus1280_glue_encrypt(struct aead_request *req);
+int crypto_morus1280_glue_decrypt(struct aead_request *req);
+
+int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
+				 unsigned int keylen);
+int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead,
+				      unsigned int authsize);
+int cryptd_morus1280_glue_encrypt(struct aead_request *req);
+int cryptd_morus1280_glue_decrypt(struct aead_request *req);
+int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead);
+void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead);
+
+#define MORUS1280_DECLARE_ALGS(id, driver_name, priority) \
+	static const struct morus1280_glue_ops crypto_morus1280_##id##_ops = {\
+		.init = crypto_morus1280_##id##_init, \
+		.ad = crypto_morus1280_##id##_ad, \
+		.enc = crypto_morus1280_##id##_enc, \
+		.enc_tail = crypto_morus1280_##id##_enc_tail, \
+		.dec = crypto_morus1280_##id##_dec, \
+		.dec_tail = crypto_morus1280_##id##_dec_tail, \
+		.final = crypto_morus1280_##id##_final, \
+	}; \
+	\
+	static int crypto_morus1280_##id##_init_tfm(struct crypto_aead *tfm) \
+	{ \
+		crypto_morus1280_glue_init_ops(tfm, &crypto_morus1280_##id##_ops); \
+		return 0; \
+	} \
+	\
+	static void crypto_morus1280_##id##_exit_tfm(struct crypto_aead *tfm) \
+	{ \
+	} \
+	\
+	struct aead_alg crypto_morus1280_##id##_algs[] = {\
+		{ \
+			.setkey = crypto_morus1280_glue_setkey, \
+			.setauthsize = crypto_morus1280_glue_setauthsize, \
+			.encrypt = crypto_morus1280_glue_encrypt, \
+			.decrypt = crypto_morus1280_glue_decrypt, \
+			.init = crypto_morus1280_##id##_init_tfm, \
+			.exit = crypto_morus1280_##id##_exit_tfm, \
+			\
+			.ivsize = MORUS_NONCE_SIZE, \
+			.maxauthsize = MORUS_MAX_AUTH_SIZE, \
+			.chunksize = MORUS1280_BLOCK_SIZE, \
+			\
+			.base = { \
+				.cra_flags = CRYPTO_ALG_INTERNAL, \
+				.cra_blocksize = 1, \
+				.cra_ctxsize = sizeof(struct morus1280_ctx), \
+				.cra_alignmask = 0, \
+				\
+				.cra_name = "__morus1280", \
+				.cra_driver_name = "__"driver_name, \
+				\
+				.cra_module = THIS_MODULE, \
+			} \
+		}, { \
+			.setkey = cryptd_morus1280_glue_setkey, \
+			.setauthsize = cryptd_morus1280_glue_setauthsize, \
+			.encrypt = cryptd_morus1280_glue_encrypt, \
+			.decrypt = cryptd_morus1280_glue_decrypt, \
+			.init = cryptd_morus1280_glue_init_tfm, \
+			.exit = cryptd_morus1280_glue_exit_tfm, \
+			\
+			.ivsize = MORUS_NONCE_SIZE, \
+			.maxauthsize = MORUS_MAX_AUTH_SIZE, \
+			.chunksize = MORUS1280_BLOCK_SIZE, \
+			\
+			.base = { \
+				.cra_flags = CRYPTO_ALG_ASYNC, \
+				.cra_blocksize = 1, \
+				.cra_ctxsize = sizeof(struct crypto_aead *), \
+				.cra_alignmask = 0, \
+				\
+				.cra_priority = priority, \
+				\
+				.cra_name = "morus1280", \
+				.cra_driver_name = driver_name, \
+				\
+				.cra_module = THIS_MODULE, \
+			} \
+		} \
+	}
+
+#endif /* _CRYPTO_MORUS1280_GLUE_H */
diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h
new file mode 100644
index 0000000..90c8db0
--- /dev/null
+++ b/include/crypto/morus640_glue.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The MORUS-640 Authenticated-Encryption Algorithm
+ *   Common glue skeleton -- header file
+ *
+ * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
+ * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef _CRYPTO_MORUS640_GLUE_H
+#define _CRYPTO_MORUS640_GLUE_H
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/morus_common.h>
+
+#define MORUS640_WORD_SIZE 4
+#define MORUS640_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS640_WORD_SIZE)
+
+struct morus640_block {
+	u8 bytes[MORUS640_BLOCK_SIZE];
+};
+
+struct morus640_glue_ops {
+	void (*init)(void *state, const void *key, const void *iv);
+	void (*ad)(void *state, const void *data, unsigned int length);
+	void (*enc)(void *state, const void *src, void *dst, unsigned int length);
+	void (*dec)(void *state, const void *src, void *dst, unsigned int length);
+	void (*enc_tail)(void *state, const void *src, void *dst, unsigned int length);
+	void (*dec_tail)(void *state, const void *src, void *dst, unsigned int length);
+	void (*final)(void *state, void *tag_xor, u64 assoclen, u64 cryptlen);
+};
+
+struct morus640_ctx {
+	const struct morus640_glue_ops *ops;
+	struct morus640_block key;
+};
+
+void crypto_morus640_glue_init_ops(struct crypto_aead *aead,
+				   const struct morus640_glue_ops *ops);
+int crypto_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
+				unsigned int keylen);
+int crypto_morus640_glue_setauthsize(struct crypto_aead *tfm,
+				     unsigned int authsize);
+int crypto_morus640_glue_encrypt(struct aead_request *req);
+int crypto_morus640_glue_decrypt(struct aead_request *req);
+
+int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
+				unsigned int keylen);
+int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead,
+				     unsigned int authsize);
+int cryptd_morus640_glue_encrypt(struct aead_request *req);
+int cryptd_morus640_glue_decrypt(struct aead_request *req);
+int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead);
+void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead);
+
+#define MORUS640_DECLARE_ALGS(id, driver_name, priority) \
+	static const struct morus640_glue_ops crypto_morus640_##id##_ops = {\
+		.init = crypto_morus640_##id##_init, \
+		.ad = crypto_morus640_##id##_ad, \
+		.enc = crypto_morus640_##id##_enc, \
+		.enc_tail = crypto_morus640_##id##_enc_tail, \
+		.dec = crypto_morus640_##id##_dec, \
+		.dec_tail = crypto_morus640_##id##_dec_tail, \
+		.final = crypto_morus640_##id##_final, \
+	}; \
+	\
+	static int crypto_morus640_##id##_init_tfm(struct crypto_aead *tfm) \
+	{ \
+		crypto_morus640_glue_init_ops(tfm, &crypto_morus640_##id##_ops); \
+		return 0; \
+	} \
+	\
+	static void crypto_morus640_##id##_exit_tfm(struct crypto_aead *tfm) \
+	{ \
+	} \
+	\
+	struct aead_alg crypto_morus640_##id##_algs[] = {\
+		{ \
+			.setkey = crypto_morus640_glue_setkey, \
+			.setauthsize = crypto_morus640_glue_setauthsize, \
+			.encrypt = crypto_morus640_glue_encrypt, \
+			.decrypt = crypto_morus640_glue_decrypt, \
+			.init = crypto_morus640_##id##_init_tfm, \
+			.exit = crypto_morus640_##id##_exit_tfm, \
+			\
+			.ivsize = MORUS_NONCE_SIZE, \
+			.maxauthsize = MORUS_MAX_AUTH_SIZE, \
+			.chunksize = MORUS640_BLOCK_SIZE, \
+			\
+			.base = { \
+				.cra_flags = CRYPTO_ALG_INTERNAL, \
+				.cra_blocksize = 1, \
+				.cra_ctxsize = sizeof(struct morus640_ctx), \
+				.cra_alignmask = 0, \
+				\
+				.cra_name = "__morus640", \
+				.cra_driver_name = "__"driver_name, \
+				\
+				.cra_module = THIS_MODULE, \
+			} \
+		}, { \
+			.setkey = cryptd_morus640_glue_setkey, \
+			.setauthsize = cryptd_morus640_glue_setauthsize, \
+			.encrypt = cryptd_morus640_glue_encrypt, \
+			.decrypt = cryptd_morus640_glue_decrypt, \
+			.init = cryptd_morus640_glue_init_tfm, \
+			.exit = cryptd_morus640_glue_exit_tfm, \
+			\
+			.ivsize = MORUS_NONCE_SIZE, \
+			.maxauthsize = MORUS_MAX_AUTH_SIZE, \
+			.chunksize = MORUS640_BLOCK_SIZE, \
+			\
+			.base = { \
+				.cra_flags = CRYPTO_ALG_ASYNC, \
+				.cra_blocksize = 1, \
+				.cra_ctxsize = sizeof(struct crypto_aead *), \
+				.cra_alignmask = 0, \
+				\
+				.cra_priority = priority, \
+				\
+				.cra_name = "morus640", \
+				.cra_driver_name = driver_name, \
+				\
+				.cra_module = THIS_MODULE, \
+			} \
+		} \
+	}
+
+#endif /* _CRYPTO_MORUS640_GLUE_H */
diff --git a/include/crypto/morus_common.h b/include/crypto/morus_common.h
new file mode 100644
index 0000000..39f28c7
--- /dev/null
+++ b/include/crypto/morus_common.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The MORUS Authenticated-Encryption Algorithm
+ *   Common definitions
+ *
+ * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
+ * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef _CRYPTO_MORUS_COMMON_H
+#define _CRYPTO_MORUS_COMMON_H
+
+#define MORUS_BLOCK_WORDS 4
+#define MORUS_STATE_BLOCKS 5
+#define MORUS_NONCE_SIZE 16
+#define MORUS_MAX_AUTH_SIZE 16
+
+#endif /* _CRYPTO_MORUS_COMMON_H */
diff --git a/include/crypto/null.h b/include/crypto/null.h
new file mode 100644
index 0000000..15aeef6
--- /dev/null
+++ b/include/crypto/null.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Values for NULL algorithms */
+
+#ifndef _CRYPTO_NULL_H
+#define _CRYPTO_NULL_H
+
+#define NULL_KEY_SIZE		0
+#define NULL_BLOCK_SIZE		1
+#define NULL_DIGEST_SIZE	0
+#define NULL_IV_SIZE		0
+
+struct crypto_skcipher *crypto_get_default_null_skcipher(void);
+void crypto_put_default_null_skcipher(void);
+
+#endif
diff --git a/include/crypto/padlock.h b/include/crypto/padlock.h
new file mode 100644
index 0000000..d2cfa2e
--- /dev/null
+++ b/include/crypto/padlock.h
@@ -0,0 +1,29 @@
+/*
+ * Driver for VIA PadLock
+ *
+ * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) 
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_PADLOCK_H
+#define _CRYPTO_PADLOCK_H
+
+#define PADLOCK_ALIGNMENT 16
+
+#define PFX	KBUILD_MODNAME ": "
+
+#define PADLOCK_CRA_PRIORITY	300
+#define PADLOCK_COMPOSITE_PRIORITY 400
+
+#ifdef CONFIG_64BIT
+#define STACK_ALIGN 16
+#else
+#define STACK_ALIGN 4
+#endif
+
+#endif	/* _CRYPTO_PADLOCK_H */
diff --git a/include/crypto/pcrypt.h b/include/crypto/pcrypt.h
new file mode 100644
index 0000000..d7d8bd8
--- /dev/null
+++ b/include/crypto/pcrypt.h
@@ -0,0 +1,51 @@
+/*
+ * pcrypt - Parallel crypto engine.
+ *
+ * Copyright (C) 2009 secunet Security Networks AG
+ * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CRYPTO_PCRYPT_H
+#define _CRYPTO_PCRYPT_H
+
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/padata.h>
+
+struct pcrypt_request {
+	struct padata_priv	padata;
+	void			*data;
+	void			*__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+static inline void *pcrypt_request_ctx(struct pcrypt_request *req)
+{
+	return req->__ctx;
+}
+
+static inline
+struct padata_priv *pcrypt_request_padata(struct pcrypt_request *req)
+{
+	return &req->padata;
+}
+
+static inline
+struct pcrypt_request *pcrypt_padata_request(struct padata_priv *padata)
+{
+	return container_of(padata, struct pcrypt_request, padata);
+}
+
+#endif
diff --git a/include/crypto/pkcs7.h b/include/crypto/pkcs7.h
new file mode 100644
index 0000000..583f199
--- /dev/null
+++ b/include/crypto/pkcs7.h
@@ -0,0 +1,47 @@
+/* PKCS#7 crypto data parser
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _CRYPTO_PKCS7_H
+#define _CRYPTO_PKCS7_H
+
+#include <linux/verification.h>
+#include <crypto/public_key.h>
+
+struct key;
+struct pkcs7_message;
+
+/*
+ * pkcs7_parser.c
+ */
+extern struct pkcs7_message *pkcs7_parse_message(const void *data,
+						 size_t datalen);
+extern void pkcs7_free_message(struct pkcs7_message *pkcs7);
+
+extern int pkcs7_get_content_data(const struct pkcs7_message *pkcs7,
+				  const void **_data, size_t *_datalen,
+				  size_t *_headerlen);
+
+/*
+ * pkcs7_trust.c
+ */
+extern int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
+				struct key *trust_keyring);
+
+/*
+ * pkcs7_verify.c
+ */
+extern int pkcs7_verify(struct pkcs7_message *pkcs7,
+			enum key_being_used_for usage);
+
+extern int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7,
+				      const void *data, size_t datalen);
+
+#endif /* _CRYPTO_PKCS7_H */
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
new file mode 100644
index 0000000..f718a19
--- /dev/null
+++ b/include/crypto/poly1305.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for the Poly1305 algorithm
+ */
+
+#ifndef _CRYPTO_POLY1305_H
+#define _CRYPTO_POLY1305_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define POLY1305_BLOCK_SIZE	16
+#define POLY1305_KEY_SIZE	32
+#define POLY1305_DIGEST_SIZE	16
+
+struct poly1305_desc_ctx {
+	/* key */
+	u32 r[5];
+	/* finalize key */
+	u32 s[4];
+	/* accumulator */
+	u32 h[5];
+	/* partial buffer */
+	u8 buf[POLY1305_BLOCK_SIZE];
+	/* bytes used in partial buffer */
+	unsigned int buflen;
+	/* r key has been set */
+	bool rset;
+	/* s key has been set */
+	bool sset;
+};
+
+int crypto_poly1305_init(struct shash_desc *desc);
+unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
+					const u8 *src, unsigned int srclen);
+int crypto_poly1305_update(struct shash_desc *desc,
+			   const u8 *src, unsigned int srclen);
+int crypto_poly1305_final(struct shash_desc *desc, u8 *dst);
+
+#endif
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
new file mode 100644
index 0000000..e0b681a
--- /dev/null
+++ b/include/crypto/public_key.h
@@ -0,0 +1,74 @@
+/* Asymmetric public-key algorithm definitions
+ *
+ * See Documentation/crypto/asymmetric-keys.txt
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_PUBLIC_KEY_H
+#define _LINUX_PUBLIC_KEY_H
+
+/*
+ * Cryptographic data for the public-key subtype of the asymmetric key type.
+ *
+ * Note that this may include private part of the key as well as the public
+ * part.
+ */
+struct public_key {
+	void *key;
+	u32 keylen;
+	const char *id_type;
+	const char *pkey_algo;
+};
+
+extern void public_key_free(struct public_key *key);
+
+/*
+ * Public key cryptography signature data
+ */
+struct public_key_signature {
+	struct asymmetric_key_id *auth_ids[2];
+	u8 *s;			/* Signature */
+	u32 s_size;		/* Number of bytes in signature */
+	u8 *digest;
+	u8 digest_size;		/* Number of bytes in digest */
+	const char *pkey_algo;
+	const char *hash_algo;
+};
+
+extern void public_key_signature_free(struct public_key_signature *sig);
+
+extern struct asymmetric_key_subtype public_key_subtype;
+
+struct key;
+struct key_type;
+union key_payload;
+
+extern int restrict_link_by_signature(struct key *dest_keyring,
+				      const struct key_type *type,
+				      const union key_payload *payload,
+				      struct key *trust_keyring);
+
+extern int restrict_link_by_key_or_keyring(struct key *dest_keyring,
+					   const struct key_type *type,
+					   const union key_payload *payload,
+					   struct key *trusted);
+
+extern int restrict_link_by_key_or_keyring_chain(struct key *trust_keyring,
+						 const struct key_type *type,
+						 const union key_payload *payload,
+						 struct key *trusted);
+
+extern int verify_signature(const struct key *key,
+			    const struct public_key_signature *sig);
+
+int public_key_verify_signature(const struct public_key *pkey,
+				const struct public_key_signature *sig);
+
+#endif /* _LINUX_PUBLIC_KEY_H */
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
new file mode 100644
index 0000000..b95ede3
--- /dev/null
+++ b/include/crypto/rng.h
@@ -0,0 +1,200 @@
+/*
+ * RNG: Random Number Generator  algorithms under the crypto API
+ *
+ * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_RNG_H
+#define _CRYPTO_RNG_H
+
+#include <linux/crypto.h>
+
+struct crypto_rng;
+
+/**
+ * struct rng_alg - random number generator definition
+ *
+ * @generate:	The function defined by this variable obtains a
+ *		random number. The random number generator transform
+ *		must generate the random number out of the context
+ *		provided with this call, plus any additional data
+ *		if provided to the call.
+ * @seed:	Seed or reseed the random number generator.  With the
+ *		invocation of this function call, the random number
+ *		generator shall become ready for generation.  If the
+ *		random number generator requires a seed for setting
+ *		up a new state, the seed must be provided by the
+ *		consumer while invoking this function. The required
+ *		size of the seed is defined with @seedsize .
+ * @set_ent:	Set entropy that would otherwise be obtained from
+ *		entropy source.  Internal use only.
+ * @seedsize:	The seed size required for a random number generator
+ *		initialization defined with this variable. Some
+ *		random number generators does not require a seed
+ *		as the seeding is implemented internally without
+ *		the need of support by the consumer. In this case,
+ *		the seed size is set to zero.
+ * @base:	Common crypto API algorithm data structure.
+ */
+struct rng_alg {
+	int (*generate)(struct crypto_rng *tfm,
+			const u8 *src, unsigned int slen,
+			u8 *dst, unsigned int dlen);
+	int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen);
+	void (*set_ent)(struct crypto_rng *tfm, const u8 *data,
+			unsigned int len);
+
+	unsigned int seedsize;
+
+	struct crypto_alg base;
+};
+
+struct crypto_rng {
+	struct crypto_tfm base;
+};
+
+extern struct crypto_rng *crypto_default_rng;
+
+int crypto_get_default_rng(void);
+void crypto_put_default_rng(void);
+
+/**
+ * DOC: Random number generator API
+ *
+ * The random number generator API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto)
+ */
+
+/**
+ * crypto_alloc_rng() -- allocate RNG handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a random number generator. The returned struct
+ * crypto_rng is the cipher handle that is required for any subsequent
+ * API invocation for that random number generator.
+ *
+ * For all random number generators, this call creates a new private copy of
+ * the random number generator that does not share a state with other
+ * instances. The only exception is the "krng" random number generator which
+ * is a kernel crypto API use case for the get_random_bytes() function of the
+ * /dev/random driver.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
+{
+	return &tfm->base;
+}
+
+/**
+ * crypto_rng_alg - obtain name of RNG
+ * @tfm: cipher handle
+ *
+ * Return the generic name (cra_name) of the initialized random number generator
+ *
+ * Return: generic name string
+ */
+static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
+{
+	return container_of(crypto_rng_tfm(tfm)->__crt_alg,
+			    struct rng_alg, base);
+}
+
+/**
+ * crypto_free_rng() - zeroize and free RNG handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_rng(struct crypto_rng *tfm)
+{
+	crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
+}
+
+/**
+ * crypto_rng_generate() - get random number
+ * @tfm: cipher handle
+ * @src: Input buffer holding additional data, may be NULL
+ * @slen: Length of additional data
+ * @dst: output buffer holding the random numbers
+ * @dlen: length of the output buffer
+ *
+ * This function fills the caller-allocated buffer with random
+ * numbers using the random number generator referenced by the
+ * cipher handle.
+ *
+ * Return: 0 function was successful; < 0 if an error occurred
+ */
+static inline int crypto_rng_generate(struct crypto_rng *tfm,
+				      const u8 *src, unsigned int slen,
+				      u8 *dst, unsigned int dlen)
+{
+	return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
+}
+
+/**
+ * crypto_rng_get_bytes() - get random number
+ * @tfm: cipher handle
+ * @rdata: output buffer holding the random numbers
+ * @dlen: length of the output buffer
+ *
+ * This function fills the caller-allocated buffer with random numbers using the
+ * random number generator referenced by the cipher handle.
+ *
+ * Return: 0 function was successful; < 0 if an error occurred
+ */
+static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
+				       u8 *rdata, unsigned int dlen)
+{
+	return crypto_rng_generate(tfm, NULL, 0, rdata, dlen);
+}
+
+/**
+ * crypto_rng_reset() - re-initialize the RNG
+ * @tfm: cipher handle
+ * @seed: seed input data
+ * @slen: length of the seed input data
+ *
+ * The reset function completely re-initializes the random number generator
+ * referenced by the cipher handle by clearing the current state. The new state
+ * is initialized with the caller provided seed or automatically, depending
+ * on the random number generator type (the ANSI X9.31 RNG requires
+ * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding).
+ * The seed is provided as a parameter to this function call. The provided seed
+ * should have the length of the seed size defined for the random number
+ * generator as defined by crypto_rng_seedsize.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed,
+		     unsigned int slen);
+
+/**
+ * crypto_rng_seedsize() - obtain seed size of RNG
+ * @tfm: cipher handle
+ *
+ * The function returns the seed size for the random number generator
+ * referenced by the cipher handle. This value may be zero if the random
+ * number generator does not implement or require a reseeding. For example,
+ * the SP800-90A DRBGs implement an automated reseeding after reaching a
+ * pre-defined threshold.
+ *
+ * Return: seed size for the random number generator
+ */
+static inline int crypto_rng_seedsize(struct crypto_rng *tfm)
+{
+	return crypto_rng_alg(tfm)->seedsize;
+}
+
+#endif
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
new file mode 100644
index 0000000..a66c127
--- /dev/null
+++ b/include/crypto/scatterwalk.h
@@ -0,0 +1,120 @@
+/*
+ * Cryptographic scatter and gather helpers.
+ *
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2002 Adam J. Richter <adam@yggdrasil.com>
+ * Copyright (c) 2004 Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_SCATTERWALK_H
+#define _CRYPTO_SCATTERWALK_H
+
+#include <crypto/algapi.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/scatterlist.h>
+
+static inline void scatterwalk_crypto_chain(struct scatterlist *head,
+					    struct scatterlist *sg, int num)
+{
+	if (sg)
+		sg_chain(head, num, sg);
+	else
+		sg_mark_end(head);
+}
+
+static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
+{
+	unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
+	unsigned int len_this_page = offset_in_page(~walk->offset) + 1;
+	return len_this_page > len ? len : len_this_page;
+}
+
+static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,
+					     unsigned int nbytes)
+{
+	unsigned int len_this_page = scatterwalk_pagelen(walk);
+	return nbytes > len_this_page ? len_this_page : nbytes;
+}
+
+static inline void scatterwalk_advance(struct scatter_walk *walk,
+				       unsigned int nbytes)
+{
+	walk->offset += nbytes;
+}
+
+static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
+					       unsigned int alignmask)
+{
+	return !(walk->offset & alignmask);
+}
+
+static inline struct page *scatterwalk_page(struct scatter_walk *walk)
+{
+	return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+}
+
+static inline void scatterwalk_unmap(void *vaddr)
+{
+	kunmap_atomic(vaddr);
+}
+
+static inline void scatterwalk_start(struct scatter_walk *walk,
+				     struct scatterlist *sg)
+{
+	walk->sg = sg;
+	walk->offset = sg->offset;
+}
+
+static inline void *scatterwalk_map(struct scatter_walk *walk)
+{
+	return kmap_atomic(scatterwalk_page(walk)) +
+	       offset_in_page(walk->offset);
+}
+
+static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out,
+					unsigned int more)
+{
+	if (out) {
+		struct page *page;
+
+		page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
+		/* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as
+		 * PageSlab cannot be optimised away per se due to
+		 * use of volatile pointer.
+		 */
+		if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page))
+			flush_dcache_page(page);
+	}
+
+	if (more && walk->offset >= walk->sg->offset + walk->sg->length)
+		scatterwalk_start(walk, sg_next(walk->sg));
+}
+
+static inline void scatterwalk_done(struct scatter_walk *walk, int out,
+				    int more)
+{
+	if (!more || walk->offset >= walk->sg->offset + walk->sg->length ||
+	    !(walk->offset & (PAGE_SIZE - 1)))
+		scatterwalk_pagedone(walk, out, more);
+}
+
+void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
+			    size_t nbytes, int out);
+void *scatterwalk_map(struct scatter_walk *walk);
+
+void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
+			      unsigned int start, unsigned int nbytes, int out);
+
+struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
+				     struct scatterlist *src,
+				     unsigned int len);
+
+#endif  /* _CRYPTO_SCATTERWALK_H */
diff --git a/include/crypto/serpent.h b/include/crypto/serpent.h
new file mode 100644
index 0000000..7dd780c
--- /dev/null
+++ b/include/crypto/serpent.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for serpent algorithms
+ */
+
+#ifndef _CRYPTO_SERPENT_H
+#define _CRYPTO_SERPENT_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define SERPENT_MIN_KEY_SIZE		  0
+#define SERPENT_MAX_KEY_SIZE		 32
+#define SERPENT_EXPKEY_WORDS		132
+#define SERPENT_BLOCK_SIZE		 16
+
+struct serpent_ctx {
+	u32 expkey[SERPENT_EXPKEY_WORDS];
+};
+
+int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
+		     unsigned int keylen);
+int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
+
+void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
+void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
+
+#endif
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
new file mode 100644
index 0000000..8a46202
--- /dev/null
+++ b/include/crypto/sha.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for SHA algorithms
+ */
+
+#ifndef _CRYPTO_SHA_H
+#define _CRYPTO_SHA_H
+
+#include <linux/types.h>
+
+#define SHA1_DIGEST_SIZE        20
+#define SHA1_BLOCK_SIZE         64
+
+#define SHA224_DIGEST_SIZE	28
+#define SHA224_BLOCK_SIZE	64
+
+#define SHA256_DIGEST_SIZE      32
+#define SHA256_BLOCK_SIZE       64
+
+#define SHA384_DIGEST_SIZE      48
+#define SHA384_BLOCK_SIZE       128
+
+#define SHA512_DIGEST_SIZE      64
+#define SHA512_BLOCK_SIZE       128
+
+#define SHA1_H0		0x67452301UL
+#define SHA1_H1		0xefcdab89UL
+#define SHA1_H2		0x98badcfeUL
+#define SHA1_H3		0x10325476UL
+#define SHA1_H4		0xc3d2e1f0UL
+
+#define SHA224_H0	0xc1059ed8UL
+#define SHA224_H1	0x367cd507UL
+#define SHA224_H2	0x3070dd17UL
+#define SHA224_H3	0xf70e5939UL
+#define SHA224_H4	0xffc00b31UL
+#define SHA224_H5	0x68581511UL
+#define SHA224_H6	0x64f98fa7UL
+#define SHA224_H7	0xbefa4fa4UL
+
+#define SHA256_H0	0x6a09e667UL
+#define SHA256_H1	0xbb67ae85UL
+#define SHA256_H2	0x3c6ef372UL
+#define SHA256_H3	0xa54ff53aUL
+#define SHA256_H4	0x510e527fUL
+#define SHA256_H5	0x9b05688cUL
+#define SHA256_H6	0x1f83d9abUL
+#define SHA256_H7	0x5be0cd19UL
+
+#define SHA384_H0	0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1	0x629a292a367cd507ULL
+#define SHA384_H2	0x9159015a3070dd17ULL
+#define SHA384_H3	0x152fecd8f70e5939ULL
+#define SHA384_H4	0x67332667ffc00b31ULL
+#define SHA384_H5	0x8eb44a8768581511ULL
+#define SHA384_H6	0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7	0x47b5481dbefa4fa4ULL
+
+#define SHA512_H0	0x6a09e667f3bcc908ULL
+#define SHA512_H1	0xbb67ae8584caa73bULL
+#define SHA512_H2	0x3c6ef372fe94f82bULL
+#define SHA512_H3	0xa54ff53a5f1d36f1ULL
+#define SHA512_H4	0x510e527fade682d1ULL
+#define SHA512_H5	0x9b05688c2b3e6c1fULL
+#define SHA512_H6	0x1f83d9abfb41bd6bULL
+#define SHA512_H7	0x5be0cd19137e2179ULL
+
+extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE];
+
+extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE];
+
+extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE];
+
+extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE];
+
+extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE];
+
+struct sha1_state {
+	u32 state[SHA1_DIGEST_SIZE / 4];
+	u64 count;
+	u8 buffer[SHA1_BLOCK_SIZE];
+};
+
+struct sha256_state {
+	u32 state[SHA256_DIGEST_SIZE / 4];
+	u64 count;
+	u8 buf[SHA256_BLOCK_SIZE];
+};
+
+struct sha512_state {
+	u64 state[SHA512_DIGEST_SIZE / 8];
+	u64 count[2];
+	u8 buf[SHA512_BLOCK_SIZE];
+};
+
+struct shash_desc;
+
+extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
+			      unsigned int len);
+
+extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
+			     unsigned int len, u8 *hash);
+
+extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
+			      unsigned int len);
+
+extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
+			       unsigned int len, u8 *hash);
+
+extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
+			      unsigned int len);
+
+extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
+			       unsigned int len, u8 *hash);
+#endif
diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h
new file mode 100644
index 0000000..d0df431
--- /dev/null
+++ b/include/crypto/sha1_base.h
@@ -0,0 +1,106 @@
+/*
+ * sha1_base.h - core logic for SHA-1 implementations
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks);
+
+static inline int sha1_base_init(struct shash_desc *desc)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+
+	sctx->state[0] = SHA1_H0;
+	sctx->state[1] = SHA1_H1;
+	sctx->state[2] = SHA1_H2;
+	sctx->state[3] = SHA1_H3;
+	sctx->state[4] = SHA1_H4;
+	sctx->count = 0;
+
+	return 0;
+}
+
+static inline int sha1_base_do_update(struct shash_desc *desc,
+				      const u8 *data,
+				      unsigned int len,
+				      sha1_block_fn *block_fn)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+	unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+
+	sctx->count += len;
+
+	if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) {
+		int blocks;
+
+		if (partial) {
+			int p = SHA1_BLOCK_SIZE - partial;
+
+			memcpy(sctx->buffer + partial, data, p);
+			data += p;
+			len -= p;
+
+			block_fn(sctx, sctx->buffer, 1);
+		}
+
+		blocks = len / SHA1_BLOCK_SIZE;
+		len %= SHA1_BLOCK_SIZE;
+
+		if (blocks) {
+			block_fn(sctx, data, blocks);
+			data += blocks * SHA1_BLOCK_SIZE;
+		}
+		partial = 0;
+	}
+	if (len)
+		memcpy(sctx->buffer + partial, data, len);
+
+	return 0;
+}
+
+static inline int sha1_base_do_finalize(struct shash_desc *desc,
+					sha1_block_fn *block_fn)
+{
+	const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+	__be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
+	unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+
+	sctx->buffer[partial++] = 0x80;
+	if (partial > bit_offset) {
+		memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial);
+		partial = 0;
+
+		block_fn(sctx, sctx->buffer, 1);
+	}
+
+	memset(sctx->buffer + partial, 0x0, bit_offset - partial);
+	*bits = cpu_to_be64(sctx->count << 3);
+	block_fn(sctx, sctx->buffer, 1);
+
+	return 0;
+}
+
+static inline int sha1_base_finish(struct shash_desc *desc, u8 *out)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+	__be32 *digest = (__be32 *)out;
+	int i;
+
+	for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
+		put_unaligned_be32(sctx->state[i], digest++);
+
+	*sctx = (struct sha1_state){};
+	return 0;
+}
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
new file mode 100644
index 0000000..d1f2195
--- /dev/null
+++ b/include/crypto/sha256_base.h
@@ -0,0 +1,128 @@
+/*
+ * sha256_base.h - core logic for SHA-256 implementations
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src,
+			       int blocks);
+
+static inline int sha224_base_init(struct shash_desc *desc)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+
+	sctx->state[0] = SHA224_H0;
+	sctx->state[1] = SHA224_H1;
+	sctx->state[2] = SHA224_H2;
+	sctx->state[3] = SHA224_H3;
+	sctx->state[4] = SHA224_H4;
+	sctx->state[5] = SHA224_H5;
+	sctx->state[6] = SHA224_H6;
+	sctx->state[7] = SHA224_H7;
+	sctx->count = 0;
+
+	return 0;
+}
+
+static inline int sha256_base_init(struct shash_desc *desc)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+
+	sctx->state[0] = SHA256_H0;
+	sctx->state[1] = SHA256_H1;
+	sctx->state[2] = SHA256_H2;
+	sctx->state[3] = SHA256_H3;
+	sctx->state[4] = SHA256_H4;
+	sctx->state[5] = SHA256_H5;
+	sctx->state[6] = SHA256_H6;
+	sctx->state[7] = SHA256_H7;
+	sctx->count = 0;
+
+	return 0;
+}
+
+static inline int sha256_base_do_update(struct shash_desc *desc,
+					const u8 *data,
+					unsigned int len,
+					sha256_block_fn *block_fn)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
+
+	sctx->count += len;
+
+	if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) {
+		int blocks;
+
+		if (partial) {
+			int p = SHA256_BLOCK_SIZE - partial;
+
+			memcpy(sctx->buf + partial, data, p);
+			data += p;
+			len -= p;
+
+			block_fn(sctx, sctx->buf, 1);
+		}
+
+		blocks = len / SHA256_BLOCK_SIZE;
+		len %= SHA256_BLOCK_SIZE;
+
+		if (blocks) {
+			block_fn(sctx, data, blocks);
+			data += blocks * SHA256_BLOCK_SIZE;
+		}
+		partial = 0;
+	}
+	if (len)
+		memcpy(sctx->buf + partial, data, len);
+
+	return 0;
+}
+
+static inline int sha256_base_do_finalize(struct shash_desc *desc,
+					  sha256_block_fn *block_fn)
+{
+	const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	__be64 *bits = (__be64 *)(sctx->buf + bit_offset);
+	unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
+
+	sctx->buf[partial++] = 0x80;
+	if (partial > bit_offset) {
+		memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial);
+		partial = 0;
+
+		block_fn(sctx, sctx->buf, 1);
+	}
+
+	memset(sctx->buf + partial, 0x0, bit_offset - partial);
+	*bits = cpu_to_be64(sctx->count << 3);
+	block_fn(sctx, sctx->buf, 1);
+
+	return 0;
+}
+
+static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
+{
+	unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	__be32 *digest = (__be32 *)out;
+	int i;
+
+	for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32))
+		put_unaligned_be32(sctx->state[i], digest++);
+
+	*sctx = (struct sha256_state){};
+	return 0;
+}
diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h
new file mode 100644
index 0000000..080f60c
--- /dev/null
+++ b/include/crypto/sha3.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for SHA-3 algorithms
+ */
+#ifndef __CRYPTO_SHA3_H__
+#define __CRYPTO_SHA3_H__
+
+#define SHA3_224_DIGEST_SIZE	(224 / 8)
+#define SHA3_224_BLOCK_SIZE	(200 - 2 * SHA3_224_DIGEST_SIZE)
+
+#define SHA3_256_DIGEST_SIZE	(256 / 8)
+#define SHA3_256_BLOCK_SIZE	(200 - 2 * SHA3_256_DIGEST_SIZE)
+
+#define SHA3_384_DIGEST_SIZE	(384 / 8)
+#define SHA3_384_BLOCK_SIZE	(200 - 2 * SHA3_384_DIGEST_SIZE)
+
+#define SHA3_512_DIGEST_SIZE	(512 / 8)
+#define SHA3_512_BLOCK_SIZE	(200 - 2 * SHA3_512_DIGEST_SIZE)
+
+struct sha3_state {
+	u64		st[25];
+	unsigned int	rsiz;
+	unsigned int	rsizw;
+
+	unsigned int	partial;
+	u8		buf[SHA3_224_BLOCK_SIZE];
+};
+
+int crypto_sha3_init(struct shash_desc *desc);
+int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
+		       unsigned int len);
+int crypto_sha3_final(struct shash_desc *desc, u8 *out);
+
+#endif
diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h
new file mode 100644
index 0000000..6c5341e
--- /dev/null
+++ b/include/crypto/sha512_base.h
@@ -0,0 +1,131 @@
+/*
+ * sha512_base.h - core logic for SHA-512 implementations
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src,
+			       int blocks);
+
+static inline int sha384_base_init(struct shash_desc *desc)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+
+	sctx->state[0] = SHA384_H0;
+	sctx->state[1] = SHA384_H1;
+	sctx->state[2] = SHA384_H2;
+	sctx->state[3] = SHA384_H3;
+	sctx->state[4] = SHA384_H4;
+	sctx->state[5] = SHA384_H5;
+	sctx->state[6] = SHA384_H6;
+	sctx->state[7] = SHA384_H7;
+	sctx->count[0] = sctx->count[1] = 0;
+
+	return 0;
+}
+
+static inline int sha512_base_init(struct shash_desc *desc)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+
+	sctx->state[0] = SHA512_H0;
+	sctx->state[1] = SHA512_H1;
+	sctx->state[2] = SHA512_H2;
+	sctx->state[3] = SHA512_H3;
+	sctx->state[4] = SHA512_H4;
+	sctx->state[5] = SHA512_H5;
+	sctx->state[6] = SHA512_H6;
+	sctx->state[7] = SHA512_H7;
+	sctx->count[0] = sctx->count[1] = 0;
+
+	return 0;
+}
+
+static inline int sha512_base_do_update(struct shash_desc *desc,
+					const u8 *data,
+					unsigned int len,
+					sha512_block_fn *block_fn)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+	unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+
+	sctx->count[0] += len;
+	if (sctx->count[0] < len)
+		sctx->count[1]++;
+
+	if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) {
+		int blocks;
+
+		if (partial) {
+			int p = SHA512_BLOCK_SIZE - partial;
+
+			memcpy(sctx->buf + partial, data, p);
+			data += p;
+			len -= p;
+
+			block_fn(sctx, sctx->buf, 1);
+		}
+
+		blocks = len / SHA512_BLOCK_SIZE;
+		len %= SHA512_BLOCK_SIZE;
+
+		if (blocks) {
+			block_fn(sctx, data, blocks);
+			data += blocks * SHA512_BLOCK_SIZE;
+		}
+		partial = 0;
+	}
+	if (len)
+		memcpy(sctx->buf + partial, data, len);
+
+	return 0;
+}
+
+static inline int sha512_base_do_finalize(struct shash_desc *desc,
+					  sha512_block_fn *block_fn)
+{
+	const int bit_offset = SHA512_BLOCK_SIZE - sizeof(__be64[2]);
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+	__be64 *bits = (__be64 *)(sctx->buf + bit_offset);
+	unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+
+	sctx->buf[partial++] = 0x80;
+	if (partial > bit_offset) {
+		memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial);
+		partial = 0;
+
+		block_fn(sctx, sctx->buf, 1);
+	}
+
+	memset(sctx->buf + partial, 0x0, bit_offset - partial);
+	bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
+	bits[1] = cpu_to_be64(sctx->count[0] << 3);
+	block_fn(sctx, sctx->buf, 1);
+
+	return 0;
+}
+
+static inline int sha512_base_finish(struct shash_desc *desc, u8 *out)
+{
+	unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+	__be64 *digest = (__be64 *)out;
+	int i;
+
+	for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64))
+		put_unaligned_be64(sctx->state[i], digest++);
+
+	*sctx = (struct sha512_state){};
+	return 0;
+}
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
new file mode 100644
index 0000000..2f327f0
--- /dev/null
+++ b/include/crypto/skcipher.h
@@ -0,0 +1,613 @@
+/*
+ * Symmetric key ciphers.
+ * 
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) 
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_SKCIPHER_H
+#define _CRYPTO_SKCIPHER_H
+
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+/**
+ *	struct skcipher_request - Symmetric key cipher request
+ *	@cryptlen: Number of bytes to encrypt or decrypt
+ *	@iv: Initialisation Vector
+ *	@src: Source SG list
+ *	@dst: Destination SG list
+ *	@base: Underlying async request request
+ *	@__ctx: Start of private context data
+ */
+struct skcipher_request {
+	unsigned int cryptlen;
+
+	u8 *iv;
+
+	struct scatterlist *src;
+	struct scatterlist *dst;
+
+	struct crypto_async_request base;
+
+	void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
+ *	struct skcipher_givcrypt_request - Crypto request with IV generation
+ *	@seq: Sequence number for IV generation
+ *	@giv: Space for generated IV
+ *	@creq: The crypto request itself
+ */
+struct skcipher_givcrypt_request {
+	u64 seq;
+	u8 *giv;
+
+	struct ablkcipher_request creq;
+};
+
+struct crypto_skcipher {
+	int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
+	              unsigned int keylen);
+	int (*encrypt)(struct skcipher_request *req);
+	int (*decrypt)(struct skcipher_request *req);
+
+	unsigned int ivsize;
+	unsigned int reqsize;
+	unsigned int keysize;
+
+	struct crypto_tfm base;
+};
+
+/**
+ * struct skcipher_alg - symmetric key cipher definition
+ * @min_keysize: Minimum key size supported by the transformation. This is the
+ *		 smallest key length supported by this transformation algorithm.
+ *		 This must be set to one of the pre-defined values as this is
+ *		 not hardware specific. Possible values for this field can be
+ *		 found via git grep "_MIN_KEY_SIZE" include/crypto/
+ * @max_keysize: Maximum key size supported by the transformation. This is the
+ *		 largest key length supported by this transformation algorithm.
+ *		 This must be set to one of the pre-defined values as this is
+ *		 not hardware specific. Possible values for this field can be
+ *		 found via git grep "_MAX_KEY_SIZE" include/crypto/
+ * @setkey: Set key for the transformation. This function is used to either
+ *	    program a supplied key into the hardware or store the key in the
+ *	    transformation context for programming it later. Note that this
+ *	    function does modify the transformation context. This function can
+ *	    be called multiple times during the existence of the transformation
+ *	    object, so one must make sure the key is properly reprogrammed into
+ *	    the hardware. This function is also responsible for checking the key
+ *	    length for validity. In case a software fallback was put in place in
+ *	    the @cra_init call, this function might need to use the fallback if
+ *	    the algorithm doesn't support all of the key sizes.
+ * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
+ *	     the supplied scatterlist containing the blocks of data. The crypto
+ *	     API consumer is responsible for aligning the entries of the
+ *	     scatterlist properly and making sure the chunks are correctly
+ *	     sized. In case a software fallback was put in place in the
+ *	     @cra_init call, this function might need to use the fallback if
+ *	     the algorithm doesn't support all of the key sizes. In case the
+ *	     key was stored in transformation context, the key might need to be
+ *	     re-programmed into the hardware in this function. This function
+ *	     shall not modify the transformation context, as this function may
+ *	     be called in parallel with the same transformation object.
+ * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
+ *	     and the conditions are exactly the same.
+ * @init: Initialize the cryptographic transformation object. This function
+ *	  is used to initialize the cryptographic transformation object.
+ *	  This function is called only once at the instantiation time, right
+ *	  after the transformation context was allocated. In case the
+ *	  cryptographic hardware has some special requirements which need to
+ *	  be handled by software, this function shall check for the precise
+ *	  requirement of the transformation and put any software fallbacks
+ *	  in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ *	  counterpart to @init, used to remove various changes set in
+ *	  @init.
+ * @ivsize: IV size applicable for transformation. The consumer must provide an
+ *	    IV of exactly that size to perform the encrypt or decrypt operation.
+ * @chunksize: Equal to the block size except for stream ciphers such as
+ *	       CTR where it is set to the underlying block size.
+ * @walksize: Equal to the chunk size except in cases where the algorithm is
+ * 	      considerably more efficient if it can operate on multiple chunks
+ * 	      in parallel. Should be a multiple of chunksize.
+ * @base: Definition of a generic crypto algorithm.
+ *
+ * All fields except @ivsize are mandatory and must be filled.
+ */
+struct skcipher_alg {
+	int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
+	              unsigned int keylen);
+	int (*encrypt)(struct skcipher_request *req);
+	int (*decrypt)(struct skcipher_request *req);
+	int (*init)(struct crypto_skcipher *tfm);
+	void (*exit)(struct crypto_skcipher *tfm);
+
+	unsigned int min_keysize;
+	unsigned int max_keysize;
+	unsigned int ivsize;
+	unsigned int chunksize;
+	unsigned int walksize;
+
+	struct crypto_alg base;
+};
+
+#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \
+	char __##name##_desc[sizeof(struct skcipher_request) + \
+		crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \
+	struct skcipher_request *name = (void *)__##name##_desc
+
+/**
+ * DOC: Symmetric Key Cipher API
+ *
+ * Symmetric key cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_SKCIPHER (listed as type "skcipher" in /proc/crypto).
+ *
+ * Asynchronous cipher operations imply that the function invocation for a
+ * cipher request returns immediately before the completion of the operation.
+ * The cipher request is scheduled as a separate kernel thread and therefore
+ * load-balanced on the different CPUs via the process scheduler. To allow
+ * the kernel crypto API to inform the caller about the completion of a cipher
+ * request, the caller must provide a callback function. That function is
+ * invoked with the cipher handle when the request completes.
+ *
+ * To support the asynchronous operation, additional information than just the
+ * cipher handle must be supplied to the kernel crypto API. That additional
+ * information is given by filling in the skcipher_request data structure.
+ *
+ * For the symmetric key cipher API, the state is maintained with the tfm
+ * cipher handle. A single tfm can be used across multiple calls and in
+ * parallel. For asynchronous block cipher calls, context data supplied and
+ * only used by the caller can be referenced the request data structure in
+ * addition to the IV used for the cipher request. The maintenance of such
+ * state information would be important for a crypto driver implementer to
+ * have, because when calling the callback function upon completion of the
+ * cipher operation, that callback function may need some information about
+ * which operation just finished if it invoked multiple in parallel. This
+ * state information is unused by the kernel crypto API.
+ */
+
+static inline struct crypto_skcipher *__crypto_skcipher_cast(
+	struct crypto_tfm *tfm)
+{
+	return container_of(tfm, struct crypto_skcipher, base);
+}
+
+/**
+ * crypto_alloc_skcipher() - allocate symmetric key cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      skcipher cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an skcipher. The returned struct
+ * crypto_skcipher is the cipher handle that is required for any subsequent
+ * API invocation for that skcipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
+					      u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_skcipher_tfm(
+	struct crypto_skcipher *tfm)
+{
+	return &tfm->base;
+}
+
+/**
+ * crypto_free_skcipher() - zeroize and free cipher handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
+{
+	crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm));
+}
+
+/**
+ * crypto_has_skcipher() - Search for the availability of an skcipher.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      skcipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the skcipher is known to the kernel crypto API; false
+ *	   otherwise
+ */
+static inline int crypto_has_skcipher(const char *alg_name, u32 type,
+					u32 mask)
+{
+	return crypto_has_alg(alg_name, crypto_skcipher_type(type),
+			      crypto_skcipher_mask(mask));
+}
+
+/**
+ * crypto_has_skcipher2() - Search for the availability of an skcipher.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      skcipher
+ * @type: specifies the type of the skcipher
+ * @mask: specifies the mask for the skcipher
+ *
+ * Return: true when the skcipher is known to the kernel crypto API; false
+ *	   otherwise
+ */
+int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask);
+
+static inline const char *crypto_skcipher_driver_name(
+	struct crypto_skcipher *tfm)
+{
+	return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
+}
+
+static inline struct skcipher_alg *crypto_skcipher_alg(
+	struct crypto_skcipher *tfm)
+{
+	return container_of(crypto_skcipher_tfm(tfm)->__crt_alg,
+			    struct skcipher_alg, base);
+}
+
+static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
+{
+	if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+	    CRYPTO_ALG_TYPE_BLKCIPHER)
+		return alg->base.cra_blkcipher.ivsize;
+
+	if (alg->base.cra_ablkcipher.encrypt)
+		return alg->base.cra_ablkcipher.ivsize;
+
+	return alg->ivsize;
+}
+
+/**
+ * crypto_skcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the skcipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
+{
+	return tfm->ivsize;
+}
+
+static inline unsigned int crypto_skcipher_alg_chunksize(
+	struct skcipher_alg *alg)
+{
+	if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+	    CRYPTO_ALG_TYPE_BLKCIPHER)
+		return alg->base.cra_blocksize;
+
+	if (alg->base.cra_ablkcipher.encrypt)
+		return alg->base.cra_blocksize;
+
+	return alg->chunksize;
+}
+
+static inline unsigned int crypto_skcipher_alg_walksize(
+	struct skcipher_alg *alg)
+{
+	if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+	    CRYPTO_ALG_TYPE_BLKCIPHER)
+		return alg->base.cra_blocksize;
+
+	if (alg->base.cra_ablkcipher.encrypt)
+		return alg->base.cra_blocksize;
+
+	return alg->walksize;
+}
+
+/**
+ * crypto_skcipher_chunksize() - obtain chunk size
+ * @tfm: cipher handle
+ *
+ * The block size is set to one for ciphers such as CTR.  However,
+ * you still need to provide incremental updates in multiples of
+ * the underlying block size as the IV does not have sub-block
+ * granularity.  This is known in this API as the chunk size.
+ *
+ * Return: chunk size in bytes
+ */
+static inline unsigned int crypto_skcipher_chunksize(
+	struct crypto_skcipher *tfm)
+{
+	return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
+}
+
+/**
+ * crypto_skcipher_walksize() - obtain walk size
+ * @tfm: cipher handle
+ *
+ * In some cases, algorithms can only perform optimally when operating on
+ * multiple blocks in parallel. This is reflected by the walksize, which
+ * must be a multiple of the chunksize (or equal if the concern does not
+ * apply)
+ *
+ * Return: walk size in bytes
+ */
+static inline unsigned int crypto_skcipher_walksize(
+	struct crypto_skcipher *tfm)
+{
+	return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
+}
+
+/**
+ * crypto_skcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the skcipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_skcipher_blocksize(
+	struct crypto_skcipher *tfm)
+{
+	return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
+}
+
+static inline unsigned int crypto_skcipher_alignmask(
+	struct crypto_skcipher *tfm)
+{
+	return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm));
+}
+
+static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm)
+{
+	return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm));
+}
+
+static inline void crypto_skcipher_set_flags(struct crypto_skcipher *tfm,
+					       u32 flags)
+{
+	crypto_tfm_set_flags(crypto_skcipher_tfm(tfm), flags);
+}
+
+static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm,
+						 u32 flags)
+{
+	crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags);
+}
+
+/**
+ * crypto_skcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the skcipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
+					 const u8 *key, unsigned int keylen)
+{
+	return tfm->setkey(tfm, key, keylen);
+}
+
+static inline unsigned int crypto_skcipher_default_keysize(
+	struct crypto_skcipher *tfm)
+{
+	return tfm->keysize;
+}
+
+/**
+ * crypto_skcipher_reqtfm() - obtain cipher handle from request
+ * @req: skcipher_request out of which the cipher handle is to be obtained
+ *
+ * Return the crypto_skcipher handle when furnishing an skcipher_request
+ * data structure.
+ *
+ * Return: crypto_skcipher handle
+ */
+static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
+	struct skcipher_request *req)
+{
+	return __crypto_skcipher_cast(req->base.tfm);
+}
+
+/**
+ * crypto_skcipher_encrypt() - encrypt plaintext
+ * @req: reference to the skcipher_request handle that holds all information
+ *	 needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the skcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * skcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+	if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+		return -ENOKEY;
+
+	return tfm->encrypt(req);
+}
+
+/**
+ * crypto_skcipher_decrypt() - decrypt ciphertext
+ * @req: reference to the skcipher_request handle that holds all information
+ *	 needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the skcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * skcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+	if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+		return -ENOKEY;
+
+	return tfm->decrypt(req);
+}
+
+/**
+ * DOC: Symmetric Key Cipher Request Handle
+ *
+ * The skcipher_request data structure contains all pointers to data
+ * required for the symmetric key cipher operation. This includes the cipher
+ * handle (which can be used by multiple skcipher_request instances), pointer
+ * to plaintext and ciphertext, asynchronous callback function, etc. It acts
+ * as a handle to the skcipher_request_* API calls in a similar way as
+ * skcipher handle to the crypto_skcipher_* API calls.
+ */
+
+/**
+ * crypto_skcipher_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
+static inline unsigned int crypto_skcipher_reqsize(struct crypto_skcipher *tfm)
+{
+	return tfm->reqsize;
+}
+
+/**
+ * skcipher_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing skcipher handle in the request
+ * data structure with a different one.
+ */
+static inline void skcipher_request_set_tfm(struct skcipher_request *req,
+					    struct crypto_skcipher *tfm)
+{
+	req->base.tfm = crypto_skcipher_tfm(tfm);
+}
+
+static inline struct skcipher_request *skcipher_request_cast(
+	struct crypto_async_request *req)
+{
+	return container_of(req, struct skcipher_request, base);
+}
+
+/**
+ * skcipher_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the skcipher
+ * encrypt and decrypt API calls. During the allocation, the provided skcipher
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success, or NULL if out of memory
+ */
+static inline struct skcipher_request *skcipher_request_alloc(
+	struct crypto_skcipher *tfm, gfp_t gfp)
+{
+	struct skcipher_request *req;
+
+	req = kmalloc(sizeof(struct skcipher_request) +
+		      crypto_skcipher_reqsize(tfm), gfp);
+
+	if (likely(req))
+		skcipher_request_set_tfm(req, tfm);
+
+	return req;
+}
+
+/**
+ * skcipher_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
+static inline void skcipher_request_free(struct skcipher_request *req)
+{
+	kzfree(req);
+}
+
+static inline void skcipher_request_zero(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+	memzero_explicit(req, sizeof(*req) + crypto_skcipher_reqsize(tfm));
+}
+
+/**
+ * skcipher_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ *	   CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ *	   increase the wait queue beyond the initial maximum size;
+ *	   CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ *	  crypto API, but provided to the callback function for it to use. Here,
+ *	  the caller can provide a reference to memory the callback function can
+ *	  operate on. As the callback function is invoked asynchronously to the
+ *	  related functionality, it may need to access data structures of the
+ *	  related functionality which can be referenced using this pointer. The
+ *	  callback function can access the memory via the "data" field in the
+ *	  crypto_async_request data structure provided to the callback function.
+ *
+ * This function allows setting the callback function that is triggered once the
+ * cipher operation completes.
+ *
+ * The callback function is registered with the skcipher_request handle and
+ * must comply with the following template::
+ *
+ *	void callback_function(struct crypto_async_request *req, int error)
+ */
+static inline void skcipher_request_set_callback(struct skcipher_request *req,
+						 u32 flags,
+						 crypto_completion_t compl,
+						 void *data)
+{
+	req->base.complete = compl;
+	req->base.data = data;
+	req->base.flags = flags;
+}
+
+/**
+ * skcipher_request_set_crypt() - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @cryptlen: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ *      by crypto_skcipher_ivsize
+ *
+ * This function allows setting of the source data and destination data
+ * scatter / gather lists.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed - the source is the ciphertext and the destination is the plaintext.
+ */
+static inline void skcipher_request_set_crypt(
+	struct skcipher_request *req,
+	struct scatterlist *src, struct scatterlist *dst,
+	unsigned int cryptlen, void *iv)
+{
+	req->src = src;
+	req->dst = dst;
+	req->cryptlen = cryptlen;
+	req->iv = iv;
+}
+
+#endif	/* _CRYPTO_SKCIPHER_H */
+
diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h
new file mode 100644
index 0000000..1438942
--- /dev/null
+++ b/include/crypto/sm3.h
@@ -0,0 +1,40 @@
+/*
+ * Common values for SM3 algorithm
+ */
+
+#ifndef _CRYPTO_SM3_H
+#define _CRYPTO_SM3_H
+
+#include <linux/types.h>
+
+#define SM3_DIGEST_SIZE	32
+#define SM3_BLOCK_SIZE	64
+
+#define SM3_T1		0x79CC4519
+#define SM3_T2		0x7A879D8A
+
+#define SM3_IVA		0x7380166f
+#define SM3_IVB		0x4914b2b9
+#define SM3_IVC		0x172442d7
+#define SM3_IVD		0xda8a0600
+#define SM3_IVE		0xa96f30bc
+#define SM3_IVF		0x163138aa
+#define SM3_IVG		0xe38dee4d
+#define SM3_IVH		0xb0fb0e4e
+
+extern const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE];
+
+struct sm3_state {
+	u32 state[SM3_DIGEST_SIZE / 4];
+	u64 count;
+	u8 buffer[SM3_BLOCK_SIZE];
+};
+
+struct shash_desc;
+
+extern int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
+			      unsigned int len);
+
+extern int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
+			     unsigned int len, u8 *hash);
+#endif
diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h
new file mode 100644
index 0000000..256948e
--- /dev/null
+++ b/include/crypto/sm3_base.h
@@ -0,0 +1,117 @@
+/*
+ * sm3_base.h - core logic for SM3 implementations
+ *
+ * Copyright (C) 2017 ARM Limited or its affiliates.
+ * Written by Gilad Ben-Yossef <gilad@benyossef.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sm3.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+
+typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks);
+
+static inline int sm3_base_init(struct shash_desc *desc)
+{
+	struct sm3_state *sctx = shash_desc_ctx(desc);
+
+	sctx->state[0] = SM3_IVA;
+	sctx->state[1] = SM3_IVB;
+	sctx->state[2] = SM3_IVC;
+	sctx->state[3] = SM3_IVD;
+	sctx->state[4] = SM3_IVE;
+	sctx->state[5] = SM3_IVF;
+	sctx->state[6] = SM3_IVG;
+	sctx->state[7] = SM3_IVH;
+	sctx->count = 0;
+
+	return 0;
+}
+
+static inline int sm3_base_do_update(struct shash_desc *desc,
+				      const u8 *data,
+				      unsigned int len,
+				      sm3_block_fn *block_fn)
+{
+	struct sm3_state *sctx = shash_desc_ctx(desc);
+	unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
+
+	sctx->count += len;
+
+	if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) {
+		int blocks;
+
+		if (partial) {
+			int p = SM3_BLOCK_SIZE - partial;
+
+			memcpy(sctx->buffer + partial, data, p);
+			data += p;
+			len -= p;
+
+			block_fn(sctx, sctx->buffer, 1);
+		}
+
+		blocks = len / SM3_BLOCK_SIZE;
+		len %= SM3_BLOCK_SIZE;
+
+		if (blocks) {
+			block_fn(sctx, data, blocks);
+			data += blocks * SM3_BLOCK_SIZE;
+		}
+		partial = 0;
+	}
+	if (len)
+		memcpy(sctx->buffer + partial, data, len);
+
+	return 0;
+}
+
+static inline int sm3_base_do_finalize(struct shash_desc *desc,
+					sm3_block_fn *block_fn)
+{
+	const int bit_offset = SM3_BLOCK_SIZE - sizeof(__be64);
+	struct sm3_state *sctx = shash_desc_ctx(desc);
+	__be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
+	unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
+
+	sctx->buffer[partial++] = 0x80;
+	if (partial > bit_offset) {
+		memset(sctx->buffer + partial, 0x0, SM3_BLOCK_SIZE - partial);
+		partial = 0;
+
+		block_fn(sctx, sctx->buffer, 1);
+	}
+
+	memset(sctx->buffer + partial, 0x0, bit_offset - partial);
+	*bits = cpu_to_be64(sctx->count << 3);
+	block_fn(sctx, sctx->buffer, 1);
+
+	return 0;
+}
+
+static inline int sm3_base_finish(struct shash_desc *desc, u8 *out)
+{
+	struct sm3_state *sctx = shash_desc_ctx(desc);
+	__be32 *digest = (__be32 *)out;
+	int i;
+
+	for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++)
+		put_unaligned_be32(sctx->state[i], digest++);
+
+	*sctx = (struct sm3_state){};
+	return 0;
+}
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
new file mode 100644
index 0000000..7afd730
--- /dev/null
+++ b/include/crypto/sm4.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Common values for the SM4 algorithm
+ * Copyright (C) 2018 ARM Limited or its affiliates.
+ */
+
+#ifndef _CRYPTO_SM4_H
+#define _CRYPTO_SM4_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define SM4_KEY_SIZE	16
+#define SM4_BLOCK_SIZE	16
+#define SM4_RKEY_WORDS	32
+
+struct crypto_sm4_ctx {
+	u32 rkey_enc[SM4_RKEY_WORDS];
+	u32 rkey_dec[SM4_RKEY_WORDS];
+};
+
+int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+		       unsigned int key_len);
+int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
+			  unsigned int key_len);
+
+void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+
+#endif
diff --git a/include/crypto/twofish.h b/include/crypto/twofish.h
new file mode 100644
index 0000000..2e2c096
--- /dev/null
+++ b/include/crypto/twofish.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CRYPTO_TWOFISH_H
+#define _CRYPTO_TWOFISH_H
+
+#include <linux/types.h>
+
+#define TF_MIN_KEY_SIZE 16
+#define TF_MAX_KEY_SIZE 32
+#define TF_BLOCK_SIZE 16
+
+struct crypto_tfm;
+
+/* Structure for an expanded Twofish key.  s contains the key-dependent
+ * S-boxes composed with the MDS matrix; w contains the eight "whitening"
+ * subkeys, K[0] through K[7].	k holds the remaining, "round" subkeys.  Note
+ * that k[i] corresponds to what the Twofish paper calls K[i+8]. */
+struct twofish_ctx {
+	u32 s[4][256], w[8], k[32];
+};
+
+int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key,
+		     unsigned int key_len, u32 *flags);
+int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len);
+
+#endif
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
new file mode 100644
index 0000000..34d94c9
--- /dev/null
+++ b/include/crypto/xts.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CRYPTO_XTS_H
+#define _CRYPTO_XTS_H
+
+#include <crypto/b128ops.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/fips.h>
+
+#define XTS_BLOCK_SIZE 16
+
+#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x))
+
+static inline int xts_check_key(struct crypto_tfm *tfm,
+				const u8 *key, unsigned int keylen)
+{
+	u32 *flags = &tfm->crt_flags;
+
+	/*
+	 * key consists of keys of equal size concatenated, therefore
+	 * the length must be even.
+	 */
+	if (keylen % 2) {
+		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		return -EINVAL;
+	}
+
+	/* ensure that the AES and tweak key are not identical */
+	if (fips_enabled &&
+	    !crypto_memneq(key, key + (keylen / 2), keylen / 2)) {
+		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline int xts_verify_key(struct crypto_skcipher *tfm,
+				 const u8 *key, unsigned int keylen)
+{
+	/*
+	 * key consists of keys of equal size concatenated, therefore
+	 * the length must be even.
+	 */
+	if (keylen % 2) {
+		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	/* ensure that the AES and tweak key are not identical */
+	if ((fips_enabled || crypto_skcipher_get_flags(tfm) &
+			     CRYPTO_TFM_REQ_WEAK_KEY) &&
+	    !crypto_memneq(key, key + (keylen / 2), keylen / 2)) {
+		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#endif  /* _CRYPTO_XTS_H */