Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index ed3b785..a23b675 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -1,9 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
 menu "DMABUF options"
 
 config SYNC_FILE
 	bool "Explicit Synchronization Framework"
 	default n
-	select ANON_INODES
 	select DMA_SHARED_BUFFER
 	---help---
 	  The Sync File Framework adds explicit syncronization via
@@ -15,7 +15,7 @@
 	  associated with a buffer. When a job is submitted to the GPU a fence
 	  is attached to the buffer and is transferred via userspace, using Sync
 	  Files fds, to the DRM driver for example. More details at
-	  Documentation/sync_file.txt.
+	  Documentation/driver-api/sync_file.rst.
 
 config SW_SYNC
 	bool "Sync File Validation Framework"
@@ -30,4 +30,18 @@
 	  WARNING: improper use of this can result in deadlocking kernel
 	  drivers from userspace. Intended for test and debug only.
 
+config UDMABUF
+	bool "userspace dmabuf misc driver"
+	default n
+	depends on DMA_SHARED_BUFFER
+	depends on MEMFD_CREATE || COMPILE_TEST
+	help
+	  A driver to let userspace turn memfd regions into dma-bufs.
+	  Qemu can use this to create host dmabufs for guest framebuffers.
+
+config DMABUF_SELFTESTS
+	tristate "Selftests for the dma-buf interfaces"
+	default n
+	depends on DMA_SHARED_BUFFER
+
 endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index c33bf88..03479da 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,3 +1,12 @@
-obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
+	 dma-resv.o seqno-fence.o
 obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
 obj-$(CONFIG_SW_SYNC)		+= sw_sync.o sync_debug.o
+obj-$(CONFIG_UDMABUF)		+= udmabuf.o
+
+dmabuf_selftests-y := \
+	selftest.o \
+	st-dma-fence.o
+
+obj-$(CONFIG_DMABUF_SELFTESTS)	+= dmabuf_selftests.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 1388447..433d91d 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Framework for buffer objects that can be shared across devices/subsystems.
  *
@@ -8,18 +9,6 @@
  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
  * refining of this idea.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/fs.h>
@@ -32,10 +21,13 @@
 #include <linux/module.h>
 #include <linux/seq_file.h>
 #include <linux/poll.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/mm.h>
+#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
 
 #include <uapi/linux/dma-buf.h>
+#include <uapi/linux/magic.h>
 
 static inline int is_dma_buf_file(struct file *);
 
@@ -46,6 +38,45 @@
 
 static struct dma_buf_list db_list;
 
+static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+	struct dma_buf *dmabuf;
+	char name[DMA_BUF_NAME_LEN];
+	size_t ret = 0;
+
+	dmabuf = dentry->d_fsdata;
+	mutex_lock(&dmabuf->lock);
+	if (dmabuf->name)
+		ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
+	mutex_unlock(&dmabuf->lock);
+
+	return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
+			     dentry->d_name.name, ret > 0 ? name : "");
+}
+
+static const struct dentry_operations dma_buf_dentry_ops = {
+	.d_dname = dmabuffs_dname,
+};
+
+static struct vfsmount *dma_buf_mnt;
+
+static int dma_buf_fs_init_context(struct fs_context *fc)
+{
+	struct pseudo_fs_context *ctx;
+
+	ctx = init_pseudo(fc, DMA_BUF_MAGIC);
+	if (!ctx)
+		return -ENOMEM;
+	ctx->dops = &dma_buf_dentry_ops;
+	return 0;
+}
+
+static struct file_system_type dma_buf_fs_type = {
+	.name = "dmabuf",
+	.init_fs_context = dma_buf_fs_init_context,
+	.kill_sb = kill_anon_super,
+};
+
 static int dma_buf_release(struct inode *inode, struct file *file)
 {
 	struct dma_buf *dmabuf;
@@ -73,8 +104,8 @@
 	list_del(&dmabuf->list_node);
 	mutex_unlock(&db_list.lock);
 
-	if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
-		reservation_object_fini(dmabuf->resv);
+	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
+		dma_resv_fini(dmabuf->resv);
 
 	module_put(dmabuf->owner);
 	kfree(dmabuf);
@@ -90,6 +121,10 @@
 
 	dmabuf = file->private_data;
 
+	/* check if buffer supports mmap */
+	if (!dmabuf->ops->mmap)
+		return -EINVAL;
+
 	/* check for overflowing the buffer's size */
 	if (vma->vm_pgoff + vma_pages(vma) >
 	    dmabuf->size >> PAGE_SHIFT)
@@ -130,7 +165,7 @@
  * To support cross-device and cross-driver synchronization of buffer access
  * implicit fences (represented internally in the kernel with &struct fence) can
  * be attached to a &dma_buf. The glue for that and a few related things are
- * provided in the &reservation_object structure.
+ * provided in the &dma_resv structure.
  *
  * Userspace can query the state of these implicitly tracked fences using poll()
  * and related system calls:
@@ -160,8 +195,8 @@
 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 {
 	struct dma_buf *dmabuf;
-	struct reservation_object *resv;
-	struct reservation_object_list *fobj;
+	struct dma_resv *resv;
+	struct dma_resv_list *fobj;
 	struct dma_fence *fence_excl;
 	__poll_t events;
 	unsigned shared_count, seq;
@@ -276,6 +311,43 @@
 	return events;
 }
 
+/**
+ * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
+ * The name of the dma-buf buffer can only be set when the dma-buf is not
+ * attached to any devices. It could theoritically support changing the
+ * name of the dma-buf if the same piece of memory is used for multiple
+ * purpose between different devices.
+ *
+ * @dmabuf [in]     dmabuf buffer that will be renamed.
+ * @buf:   [in]     A piece of userspace memory that contains the name of
+ *                  the dma-buf.
+ *
+ * Returns 0 on success. If the dma-buf buffer is already attached to
+ * devices, return -EBUSY.
+ *
+ */
+static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
+{
+	char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
+	long ret = 0;
+
+	if (IS_ERR(name))
+		return PTR_ERR(name);
+
+	mutex_lock(&dmabuf->lock);
+	if (!list_empty(&dmabuf->attachments)) {
+		ret = -EBUSY;
+		kfree(name);
+		goto out_unlock;
+	}
+	kfree(dmabuf->name);
+	dmabuf->name = name;
+
+out_unlock:
+	mutex_unlock(&dmabuf->lock);
+	return ret;
+}
+
 static long dma_buf_ioctl(struct file *file,
 			  unsigned int cmd, unsigned long arg)
 {
@@ -314,11 +386,29 @@
 			ret = dma_buf_begin_cpu_access(dmabuf, direction);
 
 		return ret;
+
+	case DMA_BUF_SET_NAME:
+		return dma_buf_set_name(dmabuf, (const char __user *)arg);
+
 	default:
 		return -ENOTTY;
 	}
 }
 
+static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
+{
+	struct dma_buf *dmabuf = file->private_data;
+
+	seq_printf(m, "size:\t%zu\n", dmabuf->size);
+	/* Don't count the temporary reference taken inside procfs seq_show */
+	seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
+	seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
+	mutex_lock(&dmabuf->lock);
+	if (dmabuf->name)
+		seq_printf(m, "name:\t%s\n", dmabuf->name);
+	mutex_unlock(&dmabuf->lock);
+}
+
 static const struct file_operations dma_buf_fops = {
 	.release	= dma_buf_release,
 	.mmap		= dma_buf_mmap_internal,
@@ -328,6 +418,7 @@
 #ifdef CONFIG_COMPAT
 	.compat_ioctl	= dma_buf_ioctl,
 #endif
+	.show_fdinfo	= dma_buf_show_fdinfo,
 };
 
 /*
@@ -338,6 +429,32 @@
 	return file->f_op == &dma_buf_fops;
 }
 
+static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
+{
+	struct file *file;
+	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
+
+	if (IS_ERR(inode))
+		return ERR_CAST(inode);
+
+	inode->i_size = dmabuf->size;
+	inode_set_bytes(inode, dmabuf->size);
+
+	file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
+				 flags, &dma_buf_fops);
+	if (IS_ERR(file))
+		goto err_alloc_file;
+	file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
+	file->private_data = dmabuf;
+	file->f_path.dentry->d_fsdata = dmabuf;
+
+	return file;
+
+err_alloc_file:
+	iput(inode);
+	return file;
+}
+
 /**
  * DOC: dma buf device access
  *
@@ -389,13 +506,13 @@
 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 {
 	struct dma_buf *dmabuf;
-	struct reservation_object *resv = exp_info->resv;
+	struct dma_resv *resv = exp_info->resv;
 	struct file *file;
 	size_t alloc_size = sizeof(struct dma_buf);
 	int ret;
 
 	if (!exp_info->resv)
-		alloc_size += sizeof(struct reservation_object);
+		alloc_size += sizeof(struct dma_resv);
 	else
 		/* prevent &dma_buf[1] == dma_buf->resv */
 		alloc_size += 1;
@@ -404,9 +521,7 @@
 			  || !exp_info->ops
 			  || !exp_info->ops->map_dma_buf
 			  || !exp_info->ops->unmap_dma_buf
-			  || !exp_info->ops->release
-			  || !exp_info->ops->map
-			  || !exp_info->ops->mmap)) {
+			  || !exp_info->ops->release)) {
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -429,13 +544,12 @@
 	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
 
 	if (!resv) {
-		resv = (struct reservation_object *)&dmabuf[1];
-		reservation_object_init(resv);
+		resv = (struct dma_resv *)&dmabuf[1];
+		dma_resv_init(resv);
 	}
 	dmabuf->resv = resv;
 
-	file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
-					exp_info->flags);
+	file = dma_buf_getfile(dmabuf, exp_info->flags);
 	if (IS_ERR(file)) {
 		ret = PTR_ERR(file);
 		goto err_dmabuf;
@@ -574,6 +688,7 @@
 	list_add(&attach->node, &dmabuf->attachments);
 
 	mutex_unlock(&dmabuf->lock);
+
 	return attach;
 
 err_attach:
@@ -596,6 +711,9 @@
 	if (WARN_ON(!dmabuf || !attach))
 		return;
 
+	if (attach->sgt)
+		dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
+
 	mutex_lock(&dmabuf->lock);
 	list_del(&attach->node);
 	if (dmabuf->ops->detach)
@@ -631,10 +749,27 @@
 	if (WARN_ON(!attach || !attach->dmabuf))
 		return ERR_PTR(-EINVAL);
 
+	if (attach->sgt) {
+		/*
+		 * Two mappings with different directions for the same
+		 * attachment are not allowed.
+		 */
+		if (attach->dir != direction &&
+		    attach->dir != DMA_BIDIRECTIONAL)
+			return ERR_PTR(-EBUSY);
+
+		return attach->sgt;
+	}
+
 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
 	if (!sg_table)
 		sg_table = ERR_PTR(-ENOMEM);
 
+	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
+		attach->sgt = sg_table;
+		attach->dir = direction;
+	}
+
 	return sg_table;
 }
 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
@@ -658,8 +793,10 @@
 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
 		return;
 
-	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
-						direction);
+	if (attach->sgt == sg_table)
+		return;
+
+	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
 }
 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 
@@ -772,11 +909,11 @@
 {
 	bool write = (direction == DMA_BIDIRECTIONAL ||
 		      direction == DMA_TO_DEVICE);
-	struct reservation_object *resv = dmabuf->resv;
+	struct dma_resv *resv = dmabuf->resv;
 	long ret;
 
 	/* Wait on any implicit rendering fences */
-	ret = reservation_object_wait_timeout_rcu(resv, write, true,
+	ret = dma_resv_wait_timeout_rcu(resv, write, true,
 						  MAX_SCHEDULE_TIMEOUT);
 	if (ret < 0)
 		return ret;
@@ -907,6 +1044,10 @@
 	if (WARN_ON(!dmabuf || !vma))
 		return -EINVAL;
 
+	/* check if buffer supports mmap */
+	if (!dmabuf->ops->mmap)
+		return -EINVAL;
+
 	/* check for offset overflow */
 	if (pgoff + vma_pages(vma) < pgoff)
 		return -EOVERFLOW;
@@ -1013,8 +1154,8 @@
 	int ret;
 	struct dma_buf *buf_obj;
 	struct dma_buf_attachment *attach_obj;
-	struct reservation_object *robj;
-	struct reservation_object_list *fobj;
+	struct dma_resv *robj;
+	struct dma_resv_list *fobj;
 	struct dma_fence *fence;
 	unsigned seq;
 	int count = 0, attach_count, shared_count, i;
@@ -1026,8 +1167,8 @@
 		return ret;
 
 	seq_puts(s, "\nDma-buf Objects:\n");
-	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\n",
-		   "size", "flags", "mode", "count");
+	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
+		   "size", "flags", "mode", "count", "ino");
 
 	list_for_each_entry(buf_obj, &db_list.head, list_node) {
 		ret = mutex_lock_interruptible(&buf_obj->lock);
@@ -1038,11 +1179,13 @@
 			continue;
 		}
 
-		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
+		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
 				buf_obj->size,
 				buf_obj->file->f_flags, buf_obj->file->f_mode,
 				file_count(buf_obj->file),
-				buf_obj->exp_name);
+				buf_obj->exp_name,
+				file_inode(buf_obj->file)->i_ino,
+				buf_obj->name ?: "");
 
 		robj = buf_obj->resv;
 		while (true) {
@@ -1069,6 +1212,7 @@
 				   fence->ops->get_driver_name(fence),
 				   fence->ops->get_timeline_name(fence),
 				   dma_fence_is_signaled(fence) ? "" : "un");
+			dma_fence_put(fence);
 		}
 		rcu_read_unlock();
 
@@ -1094,17 +1238,7 @@
 	return 0;
 }
 
-static int dma_buf_debug_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, dma_buf_debug_show, NULL);
-}
-
-static const struct file_operations dma_buf_debug_fops = {
-	.open           = dma_buf_debug_open,
-	.read           = seq_read,
-	.llseek         = seq_lseek,
-	.release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
 
 static struct dentry *dma_buf_debugfs_dir;
 
@@ -1147,6 +1281,10 @@
 
 static int __init dma_buf_init(void)
 {
+	dma_buf_mnt = kern_mount(&dma_buf_fs_type);
+	if (IS_ERR(dma_buf_mnt))
+		return PTR_ERR(dma_buf_mnt);
+
 	mutex_init(&db_list.lock);
 	INIT_LIST_HEAD(&db_list.head);
 	dma_buf_init_debugfs();
@@ -1157,5 +1295,6 @@
 static void __exit dma_buf_deinit(void)
 {
 	dma_buf_uninit_debugfs();
+	kern_unmount(dma_buf_mnt);
 }
 __exitcall(dma_buf_deinit);
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index a8c2544..d3fbd95 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * dma-fence-array: aggregate fences to be waited together
  *
@@ -6,21 +7,14 @@
  * Authors:
  *	Gustavo Padovan <gustavo@padovan.org>
  *	Christian König <christian.koenig@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
  */
 
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/dma-fence-array.h>
 
+#define PENDING_ERROR 1
+
 static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
 {
 	return "dma_fence_array";
@@ -31,10 +25,29 @@
 	return "unbound";
 }
 
+static void dma_fence_array_set_pending_error(struct dma_fence_array *array,
+					      int error)
+{
+	/*
+	 * Propagate the first error reported by any of our fences, but only
+	 * before we ourselves are signaled.
+	 */
+	if (error)
+		cmpxchg(&array->base.error, PENDING_ERROR, error);
+}
+
+static void dma_fence_array_clear_pending_error(struct dma_fence_array *array)
+{
+	/* Clear the error flag if not actually set. */
+	cmpxchg(&array->base.error, PENDING_ERROR, 0);
+}
+
 static void irq_dma_fence_array_work(struct irq_work *wrk)
 {
 	struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
 
+	dma_fence_array_clear_pending_error(array);
+
 	dma_fence_signal(&array->base);
 	dma_fence_put(&array->base);
 }
@@ -46,6 +59,8 @@
 		container_of(cb, struct dma_fence_array_cb, cb);
 	struct dma_fence_array *array = array_cb->array;
 
+	dma_fence_array_set_pending_error(array, f->error);
+
 	if (atomic_dec_and_test(&array->num_pending))
 		irq_work_queue(&array->work);
 	else
@@ -71,9 +86,14 @@
 		dma_fence_get(&array->base);
 		if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
 					   dma_fence_array_cb_func)) {
+			int error = array->fences[i]->error;
+
+			dma_fence_array_set_pending_error(array, error);
 			dma_fence_put(&array->base);
-			if (atomic_dec_and_test(&array->num_pending))
+			if (atomic_dec_and_test(&array->num_pending)) {
+				dma_fence_array_clear_pending_error(array);
 				return false;
+			}
 		}
 	}
 
@@ -150,6 +170,8 @@
 	atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
 	array->fences = fences;
 
+	array->base.error = PENDING_ERROR;
+
 	return array;
 }
 EXPORT_SYMBOL(dma_fence_array_create);
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
new file mode 100644
index 0000000..44a7416
--- /dev/null
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * fence-chain: chain fences together in a timeline
+ *
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ * Authors:
+ *	Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/dma-fence-chain.h>
+
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
+
+/**
+ * dma_fence_chain_get_prev - use RCU to get a reference to the previous fence
+ * @chain: chain node to get the previous node from
+ *
+ * Use dma_fence_get_rcu_safe to get a reference to the previous fence of the
+ * chain node.
+ */
+static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain)
+{
+	struct dma_fence *prev;
+
+	rcu_read_lock();
+	prev = dma_fence_get_rcu_safe(&chain->prev);
+	rcu_read_unlock();
+	return prev;
+}
+
+/**
+ * dma_fence_chain_walk - chain walking function
+ * @fence: current chain node
+ *
+ * Walk the chain to the next node. Returns the next fence or NULL if we are at
+ * the end of the chain. Garbage collects chain nodes which are already
+ * signaled.
+ */
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
+{
+	struct dma_fence_chain *chain, *prev_chain;
+	struct dma_fence *prev, *replacement, *tmp;
+
+	chain = to_dma_fence_chain(fence);
+	if (!chain) {
+		dma_fence_put(fence);
+		return NULL;
+	}
+
+	while ((prev = dma_fence_chain_get_prev(chain))) {
+
+		prev_chain = to_dma_fence_chain(prev);
+		if (prev_chain) {
+			if (!dma_fence_is_signaled(prev_chain->fence))
+				break;
+
+			replacement = dma_fence_chain_get_prev(prev_chain);
+		} else {
+			if (!dma_fence_is_signaled(prev))
+				break;
+
+			replacement = NULL;
+		}
+
+		tmp = cmpxchg((void **)&chain->prev, (void *)prev, (void *)replacement);
+		if (tmp == prev)
+			dma_fence_put(tmp);
+		else
+			dma_fence_put(replacement);
+		dma_fence_put(prev);
+	}
+
+	dma_fence_put(fence);
+	return prev;
+}
+EXPORT_SYMBOL(dma_fence_chain_walk);
+
+/**
+ * dma_fence_chain_find_seqno - find fence chain node by seqno
+ * @pfence: pointer to the chain node where to start
+ * @seqno: the sequence number to search for
+ *
+ * Advance the fence pointer to the chain node which will signal this sequence
+ * number. If no sequence number is provided then this is a no-op.
+ *
+ * Returns EINVAL if the fence is not a chain node or the sequence number has
+ * not yet advanced far enough.
+ */
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
+{
+	struct dma_fence_chain *chain;
+
+	if (!seqno)
+		return 0;
+
+	chain = to_dma_fence_chain(*pfence);
+	if (!chain || chain->base.seqno < seqno)
+		return -EINVAL;
+
+	dma_fence_chain_for_each(*pfence, &chain->base) {
+		if ((*pfence)->context != chain->base.context ||
+		    to_dma_fence_chain(*pfence)->prev_seqno < seqno)
+			break;
+	}
+	dma_fence_put(&chain->base);
+
+	return 0;
+}
+EXPORT_SYMBOL(dma_fence_chain_find_seqno);
+
+static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence)
+{
+        return "dma_fence_chain";
+}
+
+static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
+{
+        return "unbound";
+}
+
+static void dma_fence_chain_irq_work(struct irq_work *work)
+{
+	struct dma_fence_chain *chain;
+
+	chain = container_of(work, typeof(*chain), work);
+
+	/* Try to rearm the callback */
+	if (!dma_fence_chain_enable_signaling(&chain->base))
+		/* Ok, we are done. No more unsignaled fences left */
+		dma_fence_signal(&chain->base);
+	dma_fence_put(&chain->base);
+}
+
+static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+	struct dma_fence_chain *chain;
+
+	chain = container_of(cb, typeof(*chain), cb);
+	irq_work_queue(&chain->work);
+	dma_fence_put(f);
+}
+
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
+{
+	struct dma_fence_chain *head = to_dma_fence_chain(fence);
+
+	dma_fence_get(&head->base);
+	dma_fence_chain_for_each(fence, &head->base) {
+		struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+		struct dma_fence *f = chain ? chain->fence : fence;
+
+		dma_fence_get(f);
+		if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
+			dma_fence_put(fence);
+			return true;
+		}
+		dma_fence_put(f);
+	}
+	dma_fence_put(&head->base);
+	return false;
+}
+
+static bool dma_fence_chain_signaled(struct dma_fence *fence)
+{
+	dma_fence_chain_for_each(fence, fence) {
+		struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+		struct dma_fence *f = chain ? chain->fence : fence;
+
+		if (!dma_fence_is_signaled(f)) {
+			dma_fence_put(fence);
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static void dma_fence_chain_release(struct dma_fence *fence)
+{
+	struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+	struct dma_fence *prev;
+
+	/* Manually unlink the chain as much as possible to avoid recursion
+	 * and potential stack overflow.
+	 */
+	while ((prev = rcu_dereference_protected(chain->prev, true))) {
+		struct dma_fence_chain *prev_chain;
+
+		if (kref_read(&prev->refcount) > 1)
+		       break;
+
+		prev_chain = to_dma_fence_chain(prev);
+		if (!prev_chain)
+			break;
+
+		/* No need for atomic operations since we hold the last
+		 * reference to prev_chain.
+		 */
+		chain->prev = prev_chain->prev;
+		RCU_INIT_POINTER(prev_chain->prev, NULL);
+		dma_fence_put(prev);
+	}
+	dma_fence_put(prev);
+
+	dma_fence_put(chain->fence);
+	dma_fence_free(fence);
+}
+
+const struct dma_fence_ops dma_fence_chain_ops = {
+	.use_64bit_seqno = true,
+	.get_driver_name = dma_fence_chain_get_driver_name,
+	.get_timeline_name = dma_fence_chain_get_timeline_name,
+	.enable_signaling = dma_fence_chain_enable_signaling,
+	.signaled = dma_fence_chain_signaled,
+	.release = dma_fence_chain_release,
+};
+EXPORT_SYMBOL(dma_fence_chain_ops);
+
+/**
+ * dma_fence_chain_init - initialize a fence chain
+ * @chain: the chain node to initialize
+ * @prev: the previous fence
+ * @fence: the current fence
+ *
+ * Initialize a new chain node and either start a new chain or add the node to
+ * the existing chain of the previous fence.
+ */
+void dma_fence_chain_init(struct dma_fence_chain *chain,
+			  struct dma_fence *prev,
+			  struct dma_fence *fence,
+			  uint64_t seqno)
+{
+	struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
+	uint64_t context;
+
+	spin_lock_init(&chain->lock);
+	rcu_assign_pointer(chain->prev, prev);
+	chain->fence = fence;
+	chain->prev_seqno = 0;
+	init_irq_work(&chain->work, dma_fence_chain_irq_work);
+
+	/* Try to reuse the context of the previous chain node. */
+	if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
+		context = prev->context;
+		chain->prev_seqno = prev->seqno;
+	} else {
+		context = dma_fence_context_alloc(1);
+		/* Make sure that we always have a valid sequence number. */
+		if (prev_chain)
+			seqno = max(prev->seqno, seqno);
+	}
+
+	dma_fence_init(&chain->base, &dma_fence_chain_ops,
+		       &chain->lock, context, seqno);
+}
+EXPORT_SYMBOL(dma_fence_chain_init);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 1551ca7..2c136ae 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Fence mechanism for dma-buf and to allow for asynchronous dma access
  *
@@ -7,15 +8,6 @@
  * Authors:
  * Rob Clark <robdclark@gmail.com>
  * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
  */
 
 #include <linux/slab.h>
@@ -29,6 +21,10 @@
 
 EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
 EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
+
+static DEFINE_SPINLOCK(dma_fence_stub_lock);
+static struct dma_fence dma_fence_stub;
 
 /*
  * fence context counter: each execution context should have its own
@@ -36,7 +32,7 @@
  * context or not. One device can have multiple separate contexts,
  * and they're used if some engine can run independently of another.
  */
-static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
+static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
 
 /**
  * DOC: DMA fences overview
@@ -64,10 +60,41 @@
  *
  * - Then there's also implicit fencing, where the synchronization points are
  *   implicitly passed around as part of shared &dma_buf instances. Such
- *   implicit fences are stored in &struct reservation_object through the
+ *   implicit fences are stored in &struct dma_resv through the
  *   &dma_buf.resv pointer.
  */
 
+static const char *dma_fence_stub_get_name(struct dma_fence *fence)
+{
+        return "stub";
+}
+
+static const struct dma_fence_ops dma_fence_stub_ops = {
+	.get_driver_name = dma_fence_stub_get_name,
+	.get_timeline_name = dma_fence_stub_get_name,
+};
+
+/**
+ * dma_fence_get_stub - return a signaled fence
+ *
+ * Return a stub fence which is already signaled.
+ */
+struct dma_fence *dma_fence_get_stub(void)
+{
+	spin_lock(&dma_fence_stub_lock);
+	if (!dma_fence_stub.ops) {
+		dma_fence_init(&dma_fence_stub,
+			       &dma_fence_stub_ops,
+			       &dma_fence_stub_lock,
+			       0, 0);
+		dma_fence_signal_locked(&dma_fence_stub);
+	}
+	spin_unlock(&dma_fence_stub_lock);
+
+	return dma_fence_get(&dma_fence_stub);
+}
+EXPORT_SYMBOL(dma_fence_get_stub);
+
 /**
  * dma_fence_context_alloc - allocate an array of fence contexts
  * @num: amount of contexts to allocate
@@ -102,31 +129,27 @@
 int dma_fence_signal_locked(struct dma_fence *fence)
 {
 	struct dma_fence_cb *cur, *tmp;
-	int ret = 0;
+	struct list_head cb_list;
 
 	lockdep_assert_held(fence->lock);
 
-	if (WARN_ON(!fence))
+	if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+				      &fence->flags)))
 		return -EINVAL;
 
-	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
-		ret = -EINVAL;
+	/* Stash the cb_list before replacing it with the timestamp */
+	list_replace(&fence->cb_list, &cb_list);
 
-		/*
-		 * we might have raced with the unlocked dma_fence_signal,
-		 * still run through all callbacks
-		 */
-	} else {
-		fence->timestamp = ktime_get();
-		set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
-		trace_dma_fence_signaled(fence);
-	}
+	fence->timestamp = ktime_get();
+	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
+	trace_dma_fence_signaled(fence);
 
-	list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
-		list_del_init(&cur->node);
+	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
+		INIT_LIST_HEAD(&cur->node);
 		cur->func(fence, cur);
 	}
-	return ret;
+
+	return 0;
 }
 EXPORT_SYMBOL(dma_fence_signal_locked);
 
@@ -146,28 +169,16 @@
 int dma_fence_signal(struct dma_fence *fence)
 {
 	unsigned long flags;
+	int ret;
 
 	if (!fence)
 		return -EINVAL;
 
-	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
-		return -EINVAL;
+	spin_lock_irqsave(fence->lock, flags);
+	ret = dma_fence_signal_locked(fence);
+	spin_unlock_irqrestore(fence->lock, flags);
 
-	fence->timestamp = ktime_get();
-	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
-	trace_dma_fence_signaled(fence);
-
-	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
-		struct dma_fence_cb *cur, *tmp;
-
-		spin_lock_irqsave(fence->lock, flags);
-		list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
-			list_del_init(&cur->node);
-			cur->func(fence, cur);
-		}
-		spin_unlock_irqrestore(fence->lock, flags);
-	}
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL(dma_fence_signal);
 
@@ -221,8 +232,26 @@
 
 	trace_dma_fence_destroy(fence);
 
-	/* Failed to signal before release, could be a refcounting issue */
-	WARN_ON(!list_empty(&fence->cb_list));
+	if (WARN(!list_empty(&fence->cb_list) &&
+		 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
+		 "Fence %s:%s:%llx:%llx released with pending signals!\n",
+		 fence->ops->get_driver_name(fence),
+		 fence->ops->get_timeline_name(fence),
+		 fence->context, fence->seqno)) {
+		unsigned long flags;
+
+		/*
+		 * Failed to signal before release, likely a refcounting issue.
+		 *
+		 * This should never happen, but if it does make sure that we
+		 * don't leave chains dangling. We set the error flag first
+		 * so that the callbacks know this signal is due to an error.
+		 */
+		spin_lock_irqsave(fence->lock, flags);
+		fence->error = -EDEADLK;
+		dma_fence_signal_locked(fence);
+		spin_unlock_irqrestore(fence->lock, flags);
+	}
 
 	if (fence->ops->release)
 		fence->ops->release(fence);
@@ -615,7 +644,7 @@
  */
 void
 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
-	       spinlock_t *lock, u64 context, unsigned seqno)
+	       spinlock_t *lock, u64 context, u64 seqno)
 {
 	BUG_ON(!lock);
 	BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/dma-resv.c
similarity index 62%
rename from drivers/dma-buf/reservation.c
rename to drivers/dma-buf/dma-resv.c
index 6c95f61..7090025 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -32,7 +32,7 @@
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
 
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/export.h>
 
 /**
@@ -56,117 +56,117 @@
 EXPORT_SYMBOL(reservation_seqcount_string);
 
 /**
- * reservation_object_reserve_shared - Reserve space to add a shared
- * fence to a reservation_object.
- * @obj: reservation object
+ * dma_resv_list_alloc - allocate fence list
+ * @shared_max: number of fences we need space for
  *
- * Should be called before reservation_object_add_shared_fence().  Must
+ * Allocate a new dma_resv_list and make sure to correctly initialize
+ * shared_max.
+ */
+static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
+{
+	struct dma_resv_list *list;
+
+	list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
+	if (!list)
+		return NULL;
+
+	list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
+		sizeof(*list->shared);
+
+	return list;
+}
+
+/**
+ * dma_resv_list_free - free fence list
+ * @list: list to free
+ *
+ * Free a dma_resv_list and make sure to drop all references.
+ */
+static void dma_resv_list_free(struct dma_resv_list *list)
+{
+	unsigned int i;
+
+	if (!list)
+		return;
+
+	for (i = 0; i < list->shared_count; ++i)
+		dma_fence_put(rcu_dereference_protected(list->shared[i], true));
+
+	kfree_rcu(list, rcu);
+}
+
+/**
+ * dma_resv_init - initialize a reservation object
+ * @obj: the reservation object
+ */
+void dma_resv_init(struct dma_resv *obj)
+{
+	ww_mutex_init(&obj->lock, &reservation_ww_class);
+
+	__seqcount_init(&obj->seq, reservation_seqcount_string,
+			&reservation_seqcount_class);
+	RCU_INIT_POINTER(obj->fence, NULL);
+	RCU_INIT_POINTER(obj->fence_excl, NULL);
+}
+EXPORT_SYMBOL(dma_resv_init);
+
+/**
+ * dma_resv_fini - destroys a reservation object
+ * @obj: the reservation object
+ */
+void dma_resv_fini(struct dma_resv *obj)
+{
+	struct dma_resv_list *fobj;
+	struct dma_fence *excl;
+
+	/*
+	 * This object should be dead and all references must have
+	 * been released to it, so no need to be protected with rcu.
+	 */
+	excl = rcu_dereference_protected(obj->fence_excl, 1);
+	if (excl)
+		dma_fence_put(excl);
+
+	fobj = rcu_dereference_protected(obj->fence, 1);
+	dma_resv_list_free(fobj);
+	ww_mutex_destroy(&obj->lock);
+}
+EXPORT_SYMBOL(dma_resv_fini);
+
+/**
+ * dma_resv_reserve_shared - Reserve space to add shared fences to
+ * a dma_resv.
+ * @obj: reservation object
+ * @num_fences: number of fences we want to add
+ *
+ * Should be called before dma_resv_add_shared_fence().  Must
  * be called with obj->lock held.
  *
  * RETURNS
  * Zero for success, or -errno
  */
-int reservation_object_reserve_shared(struct reservation_object *obj)
+int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
 {
-	struct reservation_object_list *fobj, *old;
-	u32 max;
+	struct dma_resv_list *old, *new;
+	unsigned int i, j, k, max;
 
-	old = reservation_object_get_list(obj);
+	dma_resv_assert_held(obj);
+
+	old = dma_resv_get_list(obj);
 
 	if (old && old->shared_max) {
-		if (old->shared_count < old->shared_max) {
-			/* perform an in-place update */
-			kfree(obj->staged);
-			obj->staged = NULL;
+		if ((old->shared_count + num_fences) <= old->shared_max)
 			return 0;
-		} else
-			max = old->shared_max * 2;
-	} else
-		max = 4;
-
-	/*
-	 * resize obj->staged or allocate if it doesn't exist,
-	 * noop if already correct size
-	 */
-	fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
-			GFP_KERNEL);
-	if (!fobj)
-		return -ENOMEM;
-
-	obj->staged = fobj;
-	fobj->shared_max = max;
-	return 0;
-}
-EXPORT_SYMBOL(reservation_object_reserve_shared);
-
-static void
-reservation_object_add_shared_inplace(struct reservation_object *obj,
-				      struct reservation_object_list *fobj,
-				      struct dma_fence *fence)
-{
-	struct dma_fence *signaled = NULL;
-	u32 i, signaled_idx;
-
-	dma_fence_get(fence);
-
-	preempt_disable();
-	write_seqcount_begin(&obj->seq);
-
-	for (i = 0; i < fobj->shared_count; ++i) {
-		struct dma_fence *old_fence;
-
-		old_fence = rcu_dereference_protected(fobj->shared[i],
-						reservation_object_held(obj));
-
-		if (old_fence->context == fence->context) {
-			/* memory barrier is added by write_seqcount_begin */
-			RCU_INIT_POINTER(fobj->shared[i], fence);
-			write_seqcount_end(&obj->seq);
-			preempt_enable();
-
-			dma_fence_put(old_fence);
-			return;
-		}
-
-		if (!signaled && dma_fence_is_signaled(old_fence)) {
-			signaled = old_fence;
-			signaled_idx = i;
-		}
-	}
-
-	/*
-	 * memory barrier is added by write_seqcount_begin,
-	 * fobj->shared_count is protected by this lock too
-	 */
-	if (signaled) {
-		RCU_INIT_POINTER(fobj->shared[signaled_idx], fence);
+		else
+			max = max(old->shared_count + num_fences,
+				  old->shared_max * 2);
 	} else {
-		BUG_ON(fobj->shared_count >= fobj->shared_max);
-		RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
-		fobj->shared_count++;
+		max = 4;
 	}
 
-	write_seqcount_end(&obj->seq);
-	preempt_enable();
-
-	dma_fence_put(signaled);
-}
-
-static void
-reservation_object_add_shared_replace(struct reservation_object *obj,
-				      struct reservation_object_list *old,
-				      struct reservation_object_list *fobj,
-				      struct dma_fence *fence)
-{
-	unsigned i, j, k;
-
-	dma_fence_get(fence);
-
-	if (!old) {
-		RCU_INIT_POINTER(fobj->shared[0], fence);
-		fobj->shared_count = 1;
-		goto done;
-	}
+	new = dma_resv_list_alloc(max);
+	if (!new)
+		return -ENOMEM;
 
 	/*
 	 * no need to bump fence refcounts, rcu_read access
@@ -174,85 +174,109 @@
 	 * references from the old struct are carried over to
 	 * the new.
 	 */
-	for (i = 0, j = 0, k = fobj->shared_max; i < old->shared_count; ++i) {
-		struct dma_fence *check;
+	for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
+		struct dma_fence *fence;
 
-		check = rcu_dereference_protected(old->shared[i],
-						reservation_object_held(obj));
-
-		if (check->context == fence->context ||
-		    dma_fence_is_signaled(check))
-			RCU_INIT_POINTER(fobj->shared[--k], check);
+		fence = rcu_dereference_protected(old->shared[i],
+						  dma_resv_held(obj));
+		if (dma_fence_is_signaled(fence))
+			RCU_INIT_POINTER(new->shared[--k], fence);
 		else
-			RCU_INIT_POINTER(fobj->shared[j++], check);
+			RCU_INIT_POINTER(new->shared[j++], fence);
 	}
-	fobj->shared_count = j;
-	RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
-	fobj->shared_count++;
+	new->shared_count = j;
 
-done:
-	preempt_disable();
-	write_seqcount_begin(&obj->seq);
 	/*
-	 * RCU_INIT_POINTER can be used here,
-	 * seqcount provides the necessary barriers
+	 * We are not changing the effective set of fences here so can
+	 * merely update the pointer to the new array; both existing
+	 * readers and new readers will see exactly the same set of
+	 * active (unsignaled) shared fences. Individual fences and the
+	 * old array are protected by RCU and so will not vanish under
+	 * the gaze of the rcu_read_lock() readers.
 	 */
-	RCU_INIT_POINTER(obj->fence, fobj);
-	write_seqcount_end(&obj->seq);
-	preempt_enable();
+	rcu_assign_pointer(obj->fence, new);
 
 	if (!old)
-		return;
+		return 0;
 
 	/* Drop the references to the signaled fences */
-	for (i = k; i < fobj->shared_max; ++i) {
-		struct dma_fence *f;
+	for (i = k; i < max; ++i) {
+		struct dma_fence *fence;
 
-		f = rcu_dereference_protected(fobj->shared[i],
-					      reservation_object_held(obj));
-		dma_fence_put(f);
+		fence = rcu_dereference_protected(new->shared[i],
+						  dma_resv_held(obj));
+		dma_fence_put(fence);
 	}
 	kfree_rcu(old, rcu);
+
+	return 0;
 }
+EXPORT_SYMBOL(dma_resv_reserve_shared);
 
 /**
- * reservation_object_add_shared_fence - Add a fence to a shared slot
+ * dma_resv_add_shared_fence - Add a fence to a shared slot
  * @obj: the reservation object
  * @fence: the shared fence to add
  *
  * Add a fence to a shared slot, obj->lock must be held, and
- * reservation_object_reserve_shared() has been called.
+ * dma_resv_reserve_shared() has been called.
  */
-void reservation_object_add_shared_fence(struct reservation_object *obj,
-					 struct dma_fence *fence)
+void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
 {
-	struct reservation_object_list *old, *fobj = obj->staged;
+	struct dma_resv_list *fobj;
+	struct dma_fence *old;
+	unsigned int i, count;
 
-	old = reservation_object_get_list(obj);
-	obj->staged = NULL;
+	dma_fence_get(fence);
 
-	if (!fobj)
-		reservation_object_add_shared_inplace(obj, old, fence);
-	else
-		reservation_object_add_shared_replace(obj, old, fobj, fence);
+	dma_resv_assert_held(obj);
+
+	fobj = dma_resv_get_list(obj);
+	count = fobj->shared_count;
+
+	preempt_disable();
+	write_seqcount_begin(&obj->seq);
+
+	for (i = 0; i < count; ++i) {
+
+		old = rcu_dereference_protected(fobj->shared[i],
+						dma_resv_held(obj));
+		if (old->context == fence->context ||
+		    dma_fence_is_signaled(old))
+			goto replace;
+	}
+
+	BUG_ON(fobj->shared_count >= fobj->shared_max);
+	old = NULL;
+	count++;
+
+replace:
+	RCU_INIT_POINTER(fobj->shared[i], fence);
+	/* pointer update must be visible before we extend the shared_count */
+	smp_store_mb(fobj->shared_count, count);
+
+	write_seqcount_end(&obj->seq);
+	preempt_enable();
+	dma_fence_put(old);
 }
-EXPORT_SYMBOL(reservation_object_add_shared_fence);
+EXPORT_SYMBOL(dma_resv_add_shared_fence);
 
 /**
- * reservation_object_add_excl_fence - Add an exclusive fence.
+ * dma_resv_add_excl_fence - Add an exclusive fence.
  * @obj: the reservation object
  * @fence: the shared fence to add
  *
  * Add a fence to the exclusive slot.  The obj->lock must be held.
  */
-void reservation_object_add_excl_fence(struct reservation_object *obj,
-				       struct dma_fence *fence)
+void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 {
-	struct dma_fence *old_fence = reservation_object_get_excl(obj);
-	struct reservation_object_list *old;
+	struct dma_fence *old_fence = dma_resv_get_excl(obj);
+	struct dma_resv_list *old;
 	u32 i = 0;
 
-	old = reservation_object_get_list(obj);
+	dma_resv_assert_held(obj);
+
+	old = dma_resv_get_list(obj);
 	if (old)
 		i = old->shared_count;
 
@@ -271,27 +295,27 @@
 	/* inplace update, no shared fences */
 	while (i--)
 		dma_fence_put(rcu_dereference_protected(old->shared[i],
-						reservation_object_held(obj)));
+						dma_resv_held(obj)));
 
 	dma_fence_put(old_fence);
 }
-EXPORT_SYMBOL(reservation_object_add_excl_fence);
+EXPORT_SYMBOL(dma_resv_add_excl_fence);
 
 /**
-* reservation_object_copy_fences - Copy all fences from src to dst.
+* dma_resv_copy_fences - Copy all fences from src to dst.
 * @dst: the destination reservation object
 * @src: the source reservation object
 *
 * Copy all fences from src to dst. dst-lock must be held.
 */
-int reservation_object_copy_fences(struct reservation_object *dst,
-				   struct reservation_object *src)
+int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 {
-	struct reservation_object_list *src_list, *dst_list;
+	struct dma_resv_list *src_list, *dst_list;
 	struct dma_fence *old, *new;
-	size_t size;
 	unsigned i;
 
+	dma_resv_assert_held(dst);
+
 	rcu_read_lock();
 	src_list = rcu_dereference(src->fence);
 
@@ -299,10 +323,9 @@
 	if (src_list) {
 		unsigned shared_count = src_list->shared_count;
 
-		size = offsetof(typeof(*src_list), shared[shared_count]);
 		rcu_read_unlock();
 
-		dst_list = kmalloc(size, GFP_KERNEL);
+		dst_list = dma_resv_list_alloc(shared_count);
 		if (!dst_list)
 			return -ENOMEM;
 
@@ -314,7 +337,6 @@
 		}
 
 		dst_list->shared_count = 0;
-		dst_list->shared_max = shared_count;
 		for (i = 0; i < src_list->shared_count; ++i) {
 			struct dma_fence *fence;
 
@@ -324,7 +346,7 @@
 				continue;
 
 			if (!dma_fence_get_rcu(fence)) {
-				kfree(dst_list);
+				dma_resv_list_free(dst_list);
 				src_list = rcu_dereference(src->fence);
 				goto retry;
 			}
@@ -343,11 +365,8 @@
 	new = dma_fence_get_rcu_safe(&src->fence_excl);
 	rcu_read_unlock();
 
-	kfree(dst->staged);
-	dst->staged = NULL;
-
-	src_list = reservation_object_get_list(dst);
-	old = reservation_object_get_excl(dst);
+	src_list = dma_resv_get_list(dst);
+	old = dma_resv_get_excl(dst);
 
 	preempt_disable();
 	write_seqcount_begin(&dst->seq);
@@ -357,16 +376,15 @@
 	write_seqcount_end(&dst->seq);
 	preempt_enable();
 
-	if (src_list)
-		kfree_rcu(src_list, rcu);
+	dma_resv_list_free(src_list);
 	dma_fence_put(old);
 
 	return 0;
 }
-EXPORT_SYMBOL(reservation_object_copy_fences);
+EXPORT_SYMBOL(dma_resv_copy_fences);
 
 /**
- * reservation_object_get_fences_rcu - Get an object's shared and exclusive
+ * dma_resv_get_fences_rcu - Get an object's shared and exclusive
  * fences without update side lock held
  * @obj: the reservation object
  * @pfence_excl: the returned exclusive fence (or NULL)
@@ -378,10 +396,10 @@
  * exclusive fence is not specified the fence is put into the array of the
  * shared fences as well. Returns either zero or -ENOMEM.
  */
-int reservation_object_get_fences_rcu(struct reservation_object *obj,
-				      struct dma_fence **pfence_excl,
-				      unsigned *pshared_count,
-				      struct dma_fence ***pshared)
+int dma_resv_get_fences_rcu(struct dma_resv *obj,
+			    struct dma_fence **pfence_excl,
+			    unsigned *pshared_count,
+			    struct dma_fence ***pshared)
 {
 	struct dma_fence **shared = NULL;
 	struct dma_fence *fence_excl;
@@ -389,7 +407,7 @@
 	int ret = 1;
 
 	do {
-		struct reservation_object_list *fobj;
+		struct dma_resv_list *fobj;
 		unsigned int i, seq;
 		size_t sz = 0;
 
@@ -416,6 +434,10 @@
 					   GFP_NOWAIT | __GFP_NOWARN);
 			if (!nshared) {
 				rcu_read_unlock();
+
+				dma_fence_put(fence_excl);
+				fence_excl = NULL;
+
 				nshared = krealloc(shared, sz, GFP_KERNEL);
 				if (nshared) {
 					shared = nshared;
@@ -432,13 +454,6 @@
 				if (!dma_fence_get_rcu(shared[i]))
 					break;
 			}
-
-			if (!pfence_excl && fence_excl) {
-				shared[i] = fence_excl;
-				fence_excl = NULL;
-				++i;
-				++shared_count;
-			}
 		}
 
 		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
@@ -453,6 +468,11 @@
 		rcu_read_unlock();
 	} while (ret);
 
+	if (pfence_excl)
+		*pfence_excl = fence_excl;
+	else if (fence_excl)
+		shared[shared_count++] = fence_excl;
+
 	if (!shared_count) {
 		kfree(shared);
 		shared = NULL;
@@ -460,15 +480,12 @@
 
 	*pshared_count = shared_count;
 	*pshared = shared;
-	if (pfence_excl)
-		*pfence_excl = fence_excl;
-
 	return ret;
 }
-EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
 
 /**
- * reservation_object_wait_timeout_rcu - Wait on reservation's objects
+ * dma_resv_wait_timeout_rcu - Wait on reservation's objects
  * shared and/or exclusive fences.
  * @obj: the reservation object
  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
@@ -479,9 +496,9 @@
  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
  * greater than zer on success.
  */
-long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
-					 bool wait_all, bool intr,
-					 unsigned long timeout)
+long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
+			       bool wait_all, bool intr,
+			       unsigned long timeout)
 {
 	struct dma_fence *fence;
 	unsigned seq, shared_count;
@@ -509,8 +526,7 @@
 	}
 
 	if (wait_all) {
-		struct reservation_object_list *fobj =
-						rcu_dereference(obj->fence);
+		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
 
 		if (fobj)
 			shared_count = fobj->shared_count;
@@ -553,11 +569,10 @@
 	rcu_read_unlock();
 	goto retry;
 }
-EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
 
 
-static inline int
-reservation_object_test_signaled_single(struct dma_fence *passed_fence)
+static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
 {
 	struct dma_fence *fence, *lfence = passed_fence;
 	int ret = 1;
@@ -574,7 +589,7 @@
 }
 
 /**
- * reservation_object_test_signaled_rcu - Test if a reservation object's
+ * dma_resv_test_signaled_rcu - Test if a reservation object's
  * fences have been signaled.
  * @obj: the reservation object
  * @test_all: if true, test all fences, otherwise only test the exclusive
@@ -583,8 +598,7 @@
  * RETURNS
  * true if all fences signaled, else false
  */
-bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
-					  bool test_all)
+bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 {
 	unsigned seq, shared_count;
 	int ret;
@@ -598,8 +612,7 @@
 	if (test_all) {
 		unsigned i;
 
-		struct reservation_object_list *fobj =
-						rcu_dereference(obj->fence);
+		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
 
 		if (fobj)
 			shared_count = fobj->shared_count;
@@ -607,7 +620,7 @@
 		for (i = 0; i < shared_count; ++i) {
 			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
 
-			ret = reservation_object_test_signaled_single(fence);
+			ret = dma_resv_test_signaled_single(fence);
 			if (ret < 0)
 				goto retry;
 			else if (!ret)
@@ -622,8 +635,7 @@
 		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
 
 		if (fence_excl) {
-			ret = reservation_object_test_signaled_single(
-								fence_excl);
+			ret = dma_resv_test_signaled_single(fence_excl);
 			if (ret < 0)
 				goto retry;
 
@@ -635,4 +647,4 @@
 	rcu_read_unlock();
 	return ret;
 }
-EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
diff --git a/drivers/dma-buf/selftest.c b/drivers/dma-buf/selftest.c
new file mode 100644
index 0000000..c60b694
--- /dev/null
+++ b/drivers/dma-buf/selftest.c
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+
+#include "selftest.h"
+
+enum {
+#define selftest(n, func) __idx_##n,
+#include "selftests.h"
+#undef selftest
+};
+
+#define selftest(n, f) [__idx_##n] = { .name = #n, .func = f },
+static struct selftest {
+	bool enabled;
+	const char *name;
+	int (*func)(void);
+} selftests[] = {
+#include "selftests.h"
+};
+#undef selftest
+
+/* Embed the line number into the parameter name so that we can order tests */
+#define param(n) __PASTE(igt__, __PASTE(__PASTE(__LINE__, __), n))
+#define selftest_0(n, func, id) \
+module_param_named(id, selftests[__idx_##n].enabled, bool, 0400);
+#define selftest(n, func) selftest_0(n, func, param(n))
+#include "selftests.h"
+#undef selftest
+
+int __sanitycheck__(void)
+{
+	pr_debug("Hello World!\n");
+	return 0;
+}
+
+static char *__st_filter;
+
+static bool apply_subtest_filter(const char *caller, const char *name)
+{
+	char *filter, *sep, *tok;
+	bool result = true;
+
+	filter = kstrdup(__st_filter, GFP_KERNEL);
+	for (sep = filter; (tok = strsep(&sep, ","));) {
+		bool allow = true;
+		char *sl;
+
+		if (*tok == '!') {
+			allow = false;
+			tok++;
+		}
+
+		if (*tok == '\0')
+			continue;
+
+		sl = strchr(tok, '/');
+		if (sl) {
+			*sl++ = '\0';
+			if (strcmp(tok, caller)) {
+				if (allow)
+					result = false;
+				continue;
+			}
+			tok = sl;
+		}
+
+		if (strcmp(tok, name)) {
+			if (allow)
+				result = false;
+			continue;
+		}
+
+		result = allow;
+		break;
+	}
+	kfree(filter);
+
+	return result;
+}
+
+int
+__subtests(const char *caller, const struct subtest *st, int count, void *data)
+{
+	int err;
+
+	for (; count--; st++) {
+		cond_resched();
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (!apply_subtest_filter(caller, st->name))
+			continue;
+
+		pr_info("dma-buf: Running %s/%s\n", caller, st->name);
+
+		err = st->func(data);
+		if (err && err != -EINTR) {
+			pr_err("dma-buf/%s: %s failed with error %d\n",
+			       caller, st->name, err);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static void set_default_test_all(struct selftest *st, unsigned long count)
+{
+	unsigned long i;
+
+	for (i = 0; i < count; i++)
+		if (st[i].enabled)
+			return;
+
+	for (i = 0; i < count; i++)
+		st[i].enabled = true;
+}
+
+static int run_selftests(struct selftest *st, unsigned long count)
+{
+	int err = 0;
+
+	set_default_test_all(st, count);
+
+	/* Tests are listed in natural order in selftests.h */
+	for (; count--; st++) {
+		if (!st->enabled)
+			continue;
+
+		pr_info("dma-buf: Running %s\n", st->name);
+		err = st->func();
+		if (err)
+			break;
+	}
+
+	if (WARN(err > 0 || err == -ENOTTY,
+		 "%s returned %d, conflicting with selftest's magic values!\n",
+		 st->name, err))
+		err = -1;
+
+	return err;
+}
+
+static int __init st_init(void)
+{
+	return run_selftests(selftests, ARRAY_SIZE(selftests));
+}
+
+static void __exit st_exit(void)
+{
+}
+
+module_param_named(st_filter, __st_filter, charp, 0400);
+module_init(st_init);
+module_exit(st_exit);
+
+MODULE_DESCRIPTION("Self-test harness for dma-buf");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/dma-buf/selftest.h b/drivers/dma-buf/selftest.h
new file mode 100644
index 0000000..45793af
--- /dev/null
+++ b/drivers/dma-buf/selftest.h
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __SELFTEST_H__
+#define __SELFTEST_H__
+
+#include <linux/compiler.h>
+
+#define selftest(name, func) int func(void);
+#include "selftests.h"
+#undef selftest
+
+struct subtest {
+	int (*func)(void *data);
+	const char *name;
+};
+
+int __subtests(const char *caller,
+	       const struct subtest *st,
+	       int count,
+	       void *data);
+#define subtests(T, data) \
+	__subtests(__func__, T, ARRAY_SIZE(T), data)
+
+#define SUBTEST(x) { x, #x }
+
+#endif /* __SELFTEST_H__ */
diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h
new file mode 100644
index 0000000..5320386
--- /dev/null
+++ b/drivers/dma-buf/selftests.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/* List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as subtest__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * The function should be of type int function(void). It may be conditionally
+ * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ *
+ * Tests are executed in order by igt/dmabuf_selftest
+ */
+selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
+selftest(dma_fence, dma_fence)
diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c
index f47112a..bfe14e9 100644
--- a/drivers/dma-buf/seqno-fence.c
+++ b/drivers/dma-buf/seqno-fence.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * seqno-fence, using a dma-buf to synchronize fencing
  *
@@ -6,15 +7,6 @@
  * Authors:
  *   Rob Clark <robdclark@gmail.com>
  *   Maarten Lankhorst <maarten.lankhorst@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
  */
 
 #include <linux/slab.h>
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
new file mode 100644
index 0000000..e593064
--- /dev/null
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -0,0 +1,574 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-fence.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "selftest.h"
+
+static struct kmem_cache *slab_fences;
+
+static struct mock_fence {
+	struct dma_fence base;
+	struct spinlock lock;
+} *to_mock_fence(struct dma_fence *f) {
+	return container_of(f, struct mock_fence, base);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+	return "mock";
+}
+
+static void mock_fence_release(struct dma_fence *f)
+{
+	kmem_cache_free(slab_fences, to_mock_fence(f));
+}
+
+struct wait_cb {
+	struct dma_fence_cb cb;
+	struct task_struct *task;
+};
+
+static void mock_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+	wake_up_process(container_of(cb, struct wait_cb, cb)->task);
+}
+
+static long mock_wait(struct dma_fence *f, bool intr, long timeout)
+{
+	const int state = intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+	struct wait_cb cb = { .task = current };
+
+	if (dma_fence_add_callback(f, &cb.cb, mock_wakeup))
+		return timeout;
+
+	while (timeout) {
+		set_current_state(state);
+
+		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
+			break;
+
+		if (signal_pending_state(state, current))
+			break;
+
+		timeout = schedule_timeout(timeout);
+	}
+	__set_current_state(TASK_RUNNING);
+
+	if (!dma_fence_remove_callback(f, &cb.cb))
+		return timeout;
+
+	if (signal_pending_state(state, current))
+		return -ERESTARTSYS;
+
+	return -ETIME;
+}
+
+static const struct dma_fence_ops mock_ops = {
+	.get_driver_name = mock_name,
+	.get_timeline_name = mock_name,
+	.wait = mock_wait,
+	.release = mock_fence_release,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+	struct mock_fence *f;
+
+	f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
+	if (!f)
+		return NULL;
+
+	spin_lock_init(&f->lock);
+	dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+
+	return &f->base;
+}
+
+static int sanitycheck(void *arg)
+{
+	struct dma_fence *f;
+
+	f = mock_fence();
+	if (!f)
+		return -ENOMEM;
+
+	dma_fence_signal(f);
+	dma_fence_put(f);
+
+	return 0;
+}
+
+static int test_signaling(void *arg)
+{
+	struct dma_fence *f;
+	int err = -EINVAL;
+
+	f = mock_fence();
+	if (!f)
+		return -ENOMEM;
+
+	if (dma_fence_is_signaled(f)) {
+		pr_err("Fence unexpectedly signaled on creation\n");
+		goto err_free;
+	}
+
+	if (dma_fence_signal(f)) {
+		pr_err("Fence reported being already signaled\n");
+		goto err_free;
+	}
+
+	if (!dma_fence_is_signaled(f)) {
+		pr_err("Fence not reporting signaled\n");
+		goto err_free;
+	}
+
+	if (!dma_fence_signal(f)) {
+		pr_err("Fence reported not being already signaled\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	dma_fence_put(f);
+	return err;
+}
+
+struct simple_cb {
+	struct dma_fence_cb cb;
+	bool seen;
+};
+
+static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+	smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true);
+}
+
+static int test_add_callback(void *arg)
+{
+	struct simple_cb cb = {};
+	struct dma_fence *f;
+	int err = -EINVAL;
+
+	f = mock_fence();
+	if (!f)
+		return -ENOMEM;
+
+	if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
+		pr_err("Failed to add callback, fence already signaled!\n");
+		goto err_free;
+	}
+
+	dma_fence_signal(f);
+	if (!cb.seen) {
+		pr_err("Callback failed!\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	dma_fence_put(f);
+	return err;
+}
+
+static int test_late_add_callback(void *arg)
+{
+	struct simple_cb cb = {};
+	struct dma_fence *f;
+	int err = -EINVAL;
+
+	f = mock_fence();
+	if (!f)
+		return -ENOMEM;
+
+	dma_fence_signal(f);
+
+	if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) {
+		pr_err("Added callback, but fence was already signaled!\n");
+		goto err_free;
+	}
+
+	dma_fence_signal(f);
+	if (cb.seen) {
+		pr_err("Callback called after failed attachment !\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	dma_fence_put(f);
+	return err;
+}
+
+static int test_rm_callback(void *arg)
+{
+	struct simple_cb cb = {};
+	struct dma_fence *f;
+	int err = -EINVAL;
+
+	f = mock_fence();
+	if (!f)
+		return -ENOMEM;
+
+	if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
+		pr_err("Failed to add callback, fence already signaled!\n");
+		goto err_free;
+	}
+
+	if (!dma_fence_remove_callback(f, &cb.cb)) {
+		pr_err("Failed to remove callback!\n");
+		goto err_free;
+	}
+
+	dma_fence_signal(f);
+	if (cb.seen) {
+		pr_err("Callback still signaled after removal!\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	dma_fence_put(f);
+	return err;
+}
+
+static int test_late_rm_callback(void *arg)
+{
+	struct simple_cb cb = {};
+	struct dma_fence *f;
+	int err = -EINVAL;
+
+	f = mock_fence();
+	if (!f)
+		return -ENOMEM;
+
+	if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
+		pr_err("Failed to add callback, fence already signaled!\n");
+		goto err_free;
+	}
+
+	dma_fence_signal(f);
+	if (!cb.seen) {
+		pr_err("Callback failed!\n");
+		goto err_free;
+	}
+
+	if (dma_fence_remove_callback(f, &cb.cb)) {
+		pr_err("Callback removal succeed after being executed!\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	dma_fence_put(f);
+	return err;
+}
+
+static int test_status(void *arg)
+{
+	struct dma_fence *f;
+	int err = -EINVAL;
+
+	f = mock_fence();
+	if (!f)
+		return -ENOMEM;
+
+	if (dma_fence_get_status(f)) {
+		pr_err("Fence unexpectedly has signaled status on creation\n");
+		goto err_free;
+	}
+
+	dma_fence_signal(f);
+	if (!dma_fence_get_status(f)) {
+		pr_err("Fence not reporting signaled status\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	dma_fence_put(f);
+	return err;
+}
+
+static int test_error(void *arg)
+{
+	struct dma_fence *f;
+	int err = -EINVAL;
+
+	f = mock_fence();
+	if (!f)
+		return -ENOMEM;
+
+	dma_fence_set_error(f, -EIO);
+
+	if (dma_fence_get_status(f)) {
+		pr_err("Fence unexpectedly has error status before signal\n");
+		goto err_free;
+	}
+
+	dma_fence_signal(f);
+	if (dma_fence_get_status(f) != -EIO) {
+		pr_err("Fence not reporting error status, got %d\n",
+		       dma_fence_get_status(f));
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	dma_fence_put(f);
+	return err;
+}
+
+static int test_wait(void *arg)
+{
+	struct dma_fence *f;
+	int err = -EINVAL;
+
+	f = mock_fence();
+	if (!f)
+		return -ENOMEM;
+
+	if (dma_fence_wait_timeout(f, false, 0) != -ETIME) {
+		pr_err("Wait reported complete before being signaled\n");
+		goto err_free;
+	}
+
+	dma_fence_signal(f);
+
+	if (dma_fence_wait_timeout(f, false, 0) != 0) {
+		pr_err("Wait reported incomplete after being signaled\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	dma_fence_signal(f);
+	dma_fence_put(f);
+	return err;
+}
+
+struct wait_timer {
+	struct timer_list timer;
+	struct dma_fence *f;
+};
+
+static void wait_timer(struct timer_list *timer)
+{
+	struct wait_timer *wt = from_timer(wt, timer, timer);
+
+	dma_fence_signal(wt->f);
+}
+
+static int test_wait_timeout(void *arg)
+{
+	struct wait_timer wt;
+	int err = -EINVAL;
+
+	timer_setup_on_stack(&wt.timer, wait_timer, 0);
+
+	wt.f = mock_fence();
+	if (!wt.f)
+		return -ENOMEM;
+
+	if (dma_fence_wait_timeout(wt.f, false, 1) != -ETIME) {
+		pr_err("Wait reported complete before being signaled\n");
+		goto err_free;
+	}
+
+	mod_timer(&wt.timer, jiffies + 1);
+
+	if (dma_fence_wait_timeout(wt.f, false, 2) == -ETIME) {
+		if (timer_pending(&wt.timer)) {
+			pr_notice("Timer did not fire within the jiffie!\n");
+			err = 0; /* not our fault! */
+		} else {
+			pr_err("Wait reported incomplete after timeout\n");
+		}
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	del_timer_sync(&wt.timer);
+	destroy_timer_on_stack(&wt.timer);
+	dma_fence_signal(wt.f);
+	dma_fence_put(wt.f);
+	return err;
+}
+
+static int test_stub(void *arg)
+{
+	struct dma_fence *f[64];
+	int err = -EINVAL;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(f); i++) {
+		f[i] = dma_fence_get_stub();
+		if (!dma_fence_is_signaled(f[i])) {
+			pr_err("Obtained unsignaled stub fence!\n");
+			goto err;
+		}
+	}
+
+	err = 0;
+err:
+	while (i--)
+		dma_fence_put(f[i]);
+	return err;
+}
+
+/* Now off to the races! */
+
+struct race_thread {
+	struct dma_fence __rcu **fences;
+	struct task_struct *task;
+	bool before;
+	int id;
+};
+
+static void __wait_for_callbacks(struct dma_fence *f)
+{
+	spin_lock_irq(f->lock);
+	spin_unlock_irq(f->lock);
+}
+
+static int thread_signal_callback(void *arg)
+{
+	const struct race_thread *t = arg;
+	unsigned long pass = 0;
+	unsigned long miss = 0;
+	int err = 0;
+
+	while (!err && !kthread_should_stop()) {
+		struct dma_fence *f1, *f2;
+		struct simple_cb cb;
+
+		f1 = mock_fence();
+		if (!f1) {
+			err = -ENOMEM;
+			break;
+		}
+
+		rcu_assign_pointer(t->fences[t->id], f1);
+		smp_wmb();
+
+		rcu_read_lock();
+		do {
+			f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]);
+		} while (!f2 && !kthread_should_stop());
+		rcu_read_unlock();
+
+		if (t->before)
+			dma_fence_signal(f1);
+
+		smp_store_mb(cb.seen, false);
+		if (!f2 || dma_fence_add_callback(f2, &cb.cb, simple_callback))
+			miss++, cb.seen = true;
+
+		if (!t->before)
+			dma_fence_signal(f1);
+
+		if (!cb.seen) {
+			dma_fence_wait(f2, false);
+			__wait_for_callbacks(f2);
+		}
+
+		if (!READ_ONCE(cb.seen)) {
+			pr_err("Callback not seen on thread %d, pass %lu (%lu misses), signaling %s add_callback; fence signaled? %s\n",
+			       t->id, pass, miss,
+			       t->before ? "before" : "after",
+			       dma_fence_is_signaled(f2) ? "yes" : "no");
+			err = -EINVAL;
+		}
+
+		dma_fence_put(f2);
+
+		rcu_assign_pointer(t->fences[t->id], NULL);
+		smp_wmb();
+
+		dma_fence_put(f1);
+
+		pass++;
+	}
+
+	pr_info("%s[%d] completed %lu passes, %lu misses\n",
+		__func__, t->id, pass, miss);
+	return err;
+}
+
+static int race_signal_callback(void *arg)
+{
+	struct dma_fence __rcu *f[2] = {};
+	int ret = 0;
+	int pass;
+
+	for (pass = 0; !ret && pass <= 1; pass++) {
+		struct race_thread t[2];
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(t); i++) {
+			t[i].fences = f;
+			t[i].id = i;
+			t[i].before = pass;
+			t[i].task = kthread_run(thread_signal_callback, &t[i],
+						"dma-fence:%d", i);
+			get_task_struct(t[i].task);
+		}
+
+		msleep(50);
+
+		for (i = 0; i < ARRAY_SIZE(t); i++) {
+			int err;
+
+			err = kthread_stop(t[i].task);
+			if (err && !ret)
+				ret = err;
+
+			put_task_struct(t[i].task);
+		}
+	}
+
+	return ret;
+}
+
+int dma_fence(void)
+{
+	static const struct subtest tests[] = {
+		SUBTEST(sanitycheck),
+		SUBTEST(test_signaling),
+		SUBTEST(test_add_callback),
+		SUBTEST(test_late_add_callback),
+		SUBTEST(test_rm_callback),
+		SUBTEST(test_late_rm_callback),
+		SUBTEST(test_status),
+		SUBTEST(test_error),
+		SUBTEST(test_wait),
+		SUBTEST(test_wait_timeout),
+		SUBTEST(test_stub),
+		SUBTEST(race_signal_callback),
+	};
+	int ret;
+
+	pr_info("sizeof(dma_fence)=%zu\n", sizeof(struct dma_fence));
+
+	slab_fences = KMEM_CACHE(mock_fence,
+				 SLAB_TYPESAFE_BY_RCU |
+				 SLAB_HWCACHE_ALIGN);
+	if (!slab_fences)
+		return -ENOMEM;
+
+	ret = subtests(tests, NULL);
+
+	kmem_cache_destroy(slab_fences);
+
+	return ret;
+}
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 53c1d6d..6713cfb 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Sync File validation framework
  *
  * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
 
 #include <linux/file.h>
@@ -141,17 +132,14 @@
 {
 	struct sync_pt *pt = dma_fence_to_sync_pt(fence);
 	struct sync_timeline *parent = dma_fence_parent(fence);
+	unsigned long flags;
 
+	spin_lock_irqsave(fence->lock, flags);
 	if (!list_empty(&pt->link)) {
-		unsigned long flags;
-
-		spin_lock_irqsave(fence->lock, flags);
-		if (!list_empty(&pt->link)) {
-			list_del(&pt->link);
-			rb_erase(&pt->node, &parent->pt_tree);
-		}
-		spin_unlock_irqrestore(fence->lock, flags);
+		list_del(&pt->link);
+		rb_erase(&pt->node, &parent->pt_tree);
 	}
+	spin_unlock_irqrestore(fence->lock, flags);
 
 	sync_timeline_put(parent);
 	dma_fence_free(fence);
@@ -161,7 +149,7 @@
 {
 	struct sync_timeline *parent = dma_fence_parent(fence);
 
-	return !__dma_fence_is_later(fence->seqno, parent->value);
+	return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
 }
 
 static bool timeline_fence_enable_signaling(struct dma_fence *fence)
@@ -172,7 +160,7 @@
 static void timeline_fence_value_str(struct dma_fence *fence,
 				    char *str, int size)
 {
-	snprintf(str, size, "%d", fence->seqno);
+	snprintf(str, size, "%lld", fence->seqno);
 }
 
 static void timeline_fence_timeline_value_str(struct dma_fence *fence,
@@ -274,7 +262,8 @@
 				p = &parent->rb_left;
 			} else {
 				if (dma_fence_get_rcu(&other->base)) {
-					dma_fence_put(&pt->base);
+					sync_timeline_put(obj);
+					kfree(pt);
 					pt = other;
 					goto unlock;
 				}
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index c4c8ecb..101394f 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Sync File validation framework and debug information
  *
  * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
 
 #include <linux/debugfs.h>
@@ -147,7 +138,7 @@
 	}
 }
 
-static int sync_debugfs_show(struct seq_file *s, void *unused)
+static int sync_info_debugfs_show(struct seq_file *s, void *unused)
 {
 	struct list_head *pos;
 
@@ -178,17 +169,7 @@
 	return 0;
 }
 
-static int sync_info_debugfs_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, sync_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations sync_info_debugfs_fops = {
-	.open           = sync_info_debugfs_open,
-	.read           = seq_read,
-	.llseek         = seq_lseek,
-	.release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(sync_info_debugfs);
 
 static __init int sync_debugfs_init(void)
 {
@@ -207,29 +188,3 @@
 	return 0;
 }
 late_initcall(sync_debugfs_init);
-
-#define DUMP_CHUNK 256
-static char sync_dump_buf[64 * 1024];
-void sync_dump(void)
-{
-	struct seq_file s = {
-		.buf = sync_dump_buf,
-		.size = sizeof(sync_dump_buf) - 1,
-	};
-	int i;
-
-	sync_debugfs_show(&s, NULL);
-
-	for (i = 0; i < s.count; i += DUMP_CHUNK) {
-		if ((s.count - i) > DUMP_CHUNK) {
-			char c = s.buf[i + DUMP_CHUNK];
-
-			s.buf[i + DUMP_CHUNK] = 0;
-			pr_cont("%s", s.buf + i);
-			s.buf[i + DUMP_CHUNK] = c;
-		} else {
-			s.buf[s.count] = 0;
-			pr_cont("%s", s.buf + i);
-		}
-	}
-}
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index 05e33f9..6176e52 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -68,6 +68,5 @@
 void sync_timeline_debug_remove(struct sync_timeline *obj);
 void sync_file_debug_add(struct sync_file *fence);
 void sync_file_debug_remove(struct sync_file *fence);
-void sync_dump(void);
 
 #endif /* _LINUX_SYNC_H */
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 35dd064..25c5c07 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * drivers/dma-buf/sync_file.c
  *
  * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
 
 #include <linux/export.h>
@@ -144,7 +135,7 @@
 	} else {
 		struct dma_fence *fence = sync_file->fence;
 
-		snprintf(buf, len, "%s-%s%llu-%d",
+		snprintf(buf, len, "%s-%s%llu-%lld",
 			 fence->ops->get_driver_name(fence),
 			 fence->ops->get_timeline_name(fence),
 			 fence->context,
@@ -258,7 +249,8 @@
 
 			i_b++;
 		} else {
-			if (pt_a->seqno - pt_b->seqno <= INT_MAX)
+			if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno,
+						 pt_a->ops))
 				add_fence(fences, &i, pt_a);
 			else
 				add_fence(fences, &i, pt_b);
@@ -427,7 +419,7 @@
 	 * info->num_fences.
 	 */
 	if (!info.num_fences) {
-		info.status = dma_fence_is_signaled(sync_file->fence);
+		info.status = dma_fence_get_status(sync_file->fence);
 		goto no_fences;
 	} else {
 		info.status = 1;
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
new file mode 100644
index 0000000..9635897
--- /dev/null
+++ b/drivers/dma-buf/udmabuf.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/cred.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/memfd.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/udmabuf.h>
+
+static const u32    list_limit = 1024;  /* udmabuf_create_list->count limit */
+static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes  */
+
+struct udmabuf {
+	pgoff_t pagecount;
+	struct page **pages;
+};
+
+static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+	struct udmabuf *ubuf = vma->vm_private_data;
+
+	vmf->page = ubuf->pages[vmf->pgoff];
+	get_page(vmf->page);
+	return 0;
+}
+
+static const struct vm_operations_struct udmabuf_vm_ops = {
+	.fault = udmabuf_vm_fault,
+};
+
+static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+	struct udmabuf *ubuf = buf->priv;
+
+	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
+		return -EINVAL;
+
+	vma->vm_ops = &udmabuf_vm_ops;
+	vma->vm_private_data = ubuf;
+	return 0;
+}
+
+static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
+				    enum dma_data_direction direction)
+{
+	struct udmabuf *ubuf = at->dmabuf->priv;
+	struct sg_table *sg;
+	int ret;
+
+	sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+	if (!sg)
+		return ERR_PTR(-ENOMEM);
+	ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
+					0, ubuf->pagecount << PAGE_SHIFT,
+					GFP_KERNEL);
+	if (ret < 0)
+		goto err;
+	if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction)) {
+		ret = -EINVAL;
+		goto err;
+	}
+	return sg;
+
+err:
+	sg_free_table(sg);
+	kfree(sg);
+	return ERR_PTR(ret);
+}
+
+static void unmap_udmabuf(struct dma_buf_attachment *at,
+			  struct sg_table *sg,
+			  enum dma_data_direction direction)
+{
+	dma_unmap_sg(at->dev, sg->sgl, sg->nents, direction);
+	sg_free_table(sg);
+	kfree(sg);
+}
+
+static void release_udmabuf(struct dma_buf *buf)
+{
+	struct udmabuf *ubuf = buf->priv;
+	pgoff_t pg;
+
+	for (pg = 0; pg < ubuf->pagecount; pg++)
+		put_page(ubuf->pages[pg]);
+	kfree(ubuf->pages);
+	kfree(ubuf);
+}
+
+static void *kmap_udmabuf(struct dma_buf *buf, unsigned long page_num)
+{
+	struct udmabuf *ubuf = buf->priv;
+	struct page *page = ubuf->pages[page_num];
+
+	return kmap(page);
+}
+
+static void kunmap_udmabuf(struct dma_buf *buf, unsigned long page_num,
+			   void *vaddr)
+{
+	kunmap(vaddr);
+}
+
+static const struct dma_buf_ops udmabuf_ops = {
+	.map_dma_buf	  = map_udmabuf,
+	.unmap_dma_buf	  = unmap_udmabuf,
+	.release	  = release_udmabuf,
+	.map		  = kmap_udmabuf,
+	.unmap		  = kunmap_udmabuf,
+	.mmap		  = mmap_udmabuf,
+};
+
+#define SEALS_WANTED (F_SEAL_SHRINK)
+#define SEALS_DENIED (F_SEAL_WRITE)
+
+static long udmabuf_create(const struct udmabuf_create_list *head,
+			   const struct udmabuf_create_item *list)
+{
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+	struct file *memfd = NULL;
+	struct udmabuf *ubuf;
+	struct dma_buf *buf;
+	pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
+	struct page *page;
+	int seals, ret = -EINVAL;
+	u32 i, flags;
+
+	ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
+	if (!ubuf)
+		return -ENOMEM;
+
+	pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
+	for (i = 0; i < head->count; i++) {
+		if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
+			goto err;
+		if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
+			goto err;
+		ubuf->pagecount += list[i].size >> PAGE_SHIFT;
+		if (ubuf->pagecount > pglimit)
+			goto err;
+	}
+	ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
+				    GFP_KERNEL);
+	if (!ubuf->pages) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	pgbuf = 0;
+	for (i = 0; i < head->count; i++) {
+		ret = -EBADFD;
+		memfd = fget(list[i].memfd);
+		if (!memfd)
+			goto err;
+		if (!shmem_mapping(file_inode(memfd)->i_mapping))
+			goto err;
+		seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
+		if (seals == -EINVAL)
+			goto err;
+		ret = -EINVAL;
+		if ((seals & SEALS_WANTED) != SEALS_WANTED ||
+		    (seals & SEALS_DENIED) != 0)
+			goto err;
+		pgoff = list[i].offset >> PAGE_SHIFT;
+		pgcnt = list[i].size   >> PAGE_SHIFT;
+		for (pgidx = 0; pgidx < pgcnt; pgidx++) {
+			page = shmem_read_mapping_page(
+				file_inode(memfd)->i_mapping, pgoff + pgidx);
+			if (IS_ERR(page)) {
+				ret = PTR_ERR(page);
+				goto err;
+			}
+			ubuf->pages[pgbuf++] = page;
+		}
+		fput(memfd);
+		memfd = NULL;
+	}
+
+	exp_info.ops  = &udmabuf_ops;
+	exp_info.size = ubuf->pagecount << PAGE_SHIFT;
+	exp_info.priv = ubuf;
+	exp_info.flags = O_RDWR;
+
+	buf = dma_buf_export(&exp_info);
+	if (IS_ERR(buf)) {
+		ret = PTR_ERR(buf);
+		goto err;
+	}
+
+	flags = 0;
+	if (head->flags & UDMABUF_FLAGS_CLOEXEC)
+		flags |= O_CLOEXEC;
+	return dma_buf_fd(buf, flags);
+
+err:
+	while (pgbuf > 0)
+		put_page(ubuf->pages[--pgbuf]);
+	if (memfd)
+		fput(memfd);
+	kfree(ubuf->pages);
+	kfree(ubuf);
+	return ret;
+}
+
+static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
+{
+	struct udmabuf_create create;
+	struct udmabuf_create_list head;
+	struct udmabuf_create_item list;
+
+	if (copy_from_user(&create, (void __user *)arg,
+			   sizeof(create)))
+		return -EFAULT;
+
+	head.flags  = create.flags;
+	head.count  = 1;
+	list.memfd  = create.memfd;
+	list.offset = create.offset;
+	list.size   = create.size;
+
+	return udmabuf_create(&head, &list);
+}
+
+static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
+{
+	struct udmabuf_create_list head;
+	struct udmabuf_create_item *list;
+	int ret = -EINVAL;
+	u32 lsize;
+
+	if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
+		return -EFAULT;
+	if (head.count > list_limit)
+		return -EINVAL;
+	lsize = sizeof(struct udmabuf_create_item) * head.count;
+	list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
+	if (IS_ERR(list))
+		return PTR_ERR(list);
+
+	ret = udmabuf_create(&head, list);
+	kfree(list);
+	return ret;
+}
+
+static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
+			  unsigned long arg)
+{
+	long ret;
+
+	switch (ioctl) {
+	case UDMABUF_CREATE:
+		ret = udmabuf_ioctl_create(filp, arg);
+		break;
+	case UDMABUF_CREATE_LIST:
+		ret = udmabuf_ioctl_create_list(filp, arg);
+		break;
+	default:
+		ret = -ENOTTY;
+		break;
+	}
+	return ret;
+}
+
+static const struct file_operations udmabuf_fops = {
+	.owner		= THIS_MODULE,
+	.unlocked_ioctl = udmabuf_ioctl,
+};
+
+static struct miscdevice udmabuf_misc = {
+	.minor          = MISC_DYNAMIC_MINOR,
+	.name           = "udmabuf",
+	.fops           = &udmabuf_fops,
+};
+
+static int __init udmabuf_dev_init(void)
+{
+	return misc_register(&udmabuf_misc);
+}
+
+static void __exit udmabuf_dev_exit(void)
+{
+	misc_deregister(&udmabuf_misc);
+}
+
+module_init(udmabuf_dev_init)
+module_exit(udmabuf_dev_exit)
+
+MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
+MODULE_LICENSE("GPL v2");