v4.19.13 snapshot.
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
new file mode 100644
index 0000000..ed3b785
--- /dev/null
+++ b/drivers/dma-buf/Kconfig
@@ -0,0 +1,33 @@
+menu "DMABUF options"
+
+config SYNC_FILE
+	bool "Explicit Synchronization Framework"
+	default n
+	select ANON_INODES
+	select DMA_SHARED_BUFFER
+	---help---
+	  The Sync File Framework adds explicit syncronization via
+	  userspace. It enables send/receive 'struct dma_fence' objects to/from
+	  userspace via Sync File fds for synchronization between drivers via
+	  userspace components. It has been ported from Android.
+
+	  The first and main user for this is graphics in which a fence is
+	  associated with a buffer. When a job is submitted to the GPU a fence
+	  is attached to the buffer and is transferred via userspace, using Sync
+	  Files fds, to the DRM driver for example. More details at
+	  Documentation/sync_file.txt.
+
+config SW_SYNC
+	bool "Sync File Validation Framework"
+	default n
+	depends on SYNC_FILE
+	depends on DEBUG_FS
+	---help---
+	  A sync object driver that uses a 32bit counter to coordinate
+	  synchronization.  Useful when there is no hardware primitive backing
+	  the synchronization.
+
+	  WARNING: improper use of this can result in deadlocking kernel
+	  drivers from userspace. Intended for test and debug only.
+
+endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
new file mode 100644
index 0000000..c33bf88
--- /dev/null
+++ b/drivers/dma-buf/Makefile
@@ -0,0 +1,3 @@
+obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
+obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
+obj-$(CONFIG_SW_SYNC)		+= sw_sync.o sync_debug.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
new file mode 100644
index 0000000..1388447
--- /dev/null
+++ b/drivers/dma-buf/dma-buf.c
@@ -0,0 +1,1161 @@
+/*
+ * Framework for buffer objects that can be shared across devices/subsystems.
+ *
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
+ * Author: Sumit Semwal <sumit.semwal@ti.com>
+ *
+ * Many thanks to linaro-mm-sig list, and specially
+ * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
+ * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
+ * refining of this idea.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-fence.h>
+#include <linux/anon_inodes.h>
+#include <linux/export.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/poll.h>
+#include <linux/reservation.h>
+#include <linux/mm.h>
+
+#include <uapi/linux/dma-buf.h>
+
+static inline int is_dma_buf_file(struct file *);
+
+struct dma_buf_list {
+	struct list_head head;
+	struct mutex lock;
+};
+
+static struct dma_buf_list db_list;
+
+static int dma_buf_release(struct inode *inode, struct file *file)
+{
+	struct dma_buf *dmabuf;
+
+	if (!is_dma_buf_file(file))
+		return -EINVAL;
+
+	dmabuf = file->private_data;
+
+	BUG_ON(dmabuf->vmapping_counter);
+
+	/*
+	 * Any fences that a dma-buf poll can wait on should be signaled
+	 * before releasing dma-buf. This is the responsibility of each
+	 * driver that uses the reservation objects.
+	 *
+	 * If you hit this BUG() it means someone dropped their ref to the
+	 * dma-buf while still having pending operation to the buffer.
+	 */
+	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
+
+	dmabuf->ops->release(dmabuf);
+
+	mutex_lock(&db_list.lock);
+	list_del(&dmabuf->list_node);
+	mutex_unlock(&db_list.lock);
+
+	if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
+		reservation_object_fini(dmabuf->resv);
+
+	module_put(dmabuf->owner);
+	kfree(dmabuf);
+	return 0;
+}
+
+static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
+{
+	struct dma_buf *dmabuf;
+
+	if (!is_dma_buf_file(file))
+		return -EINVAL;
+
+	dmabuf = file->private_data;
+
+	/* check for overflowing the buffer's size */
+	if (vma->vm_pgoff + vma_pages(vma) >
+	    dmabuf->size >> PAGE_SHIFT)
+		return -EINVAL;
+
+	return dmabuf->ops->mmap(dmabuf, vma);
+}
+
+static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
+{
+	struct dma_buf *dmabuf;
+	loff_t base;
+
+	if (!is_dma_buf_file(file))
+		return -EBADF;
+
+	dmabuf = file->private_data;
+
+	/* only support discovering the end of the buffer,
+	   but also allow SEEK_SET to maintain the idiomatic
+	   SEEK_END(0), SEEK_CUR(0) pattern */
+	if (whence == SEEK_END)
+		base = dmabuf->size;
+	else if (whence == SEEK_SET)
+		base = 0;
+	else
+		return -EINVAL;
+
+	if (offset != 0)
+		return -EINVAL;
+
+	return base + offset;
+}
+
+/**
+ * DOC: fence polling
+ *
+ * To support cross-device and cross-driver synchronization of buffer access
+ * implicit fences (represented internally in the kernel with &struct fence) can
+ * be attached to a &dma_buf. The glue for that and a few related things are
+ * provided in the &reservation_object structure.
+ *
+ * Userspace can query the state of these implicitly tracked fences using poll()
+ * and related system calls:
+ *
+ * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
+ *   most recent write or exclusive fence.
+ *
+ * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
+ *   all attached fences, shared and exclusive ones.
+ *
+ * Note that this only signals the completion of the respective fences, i.e. the
+ * DMA transfers are complete. Cache flushing and any other necessary
+ * preparations before CPU access can begin still need to happen.
+ */
+
+static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dcb->poll->lock, flags);
+	wake_up_locked_poll(dcb->poll, dcb->active);
+	dcb->active = 0;
+	spin_unlock_irqrestore(&dcb->poll->lock, flags);
+}
+
+static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
+{
+	struct dma_buf *dmabuf;
+	struct reservation_object *resv;
+	struct reservation_object_list *fobj;
+	struct dma_fence *fence_excl;
+	__poll_t events;
+	unsigned shared_count, seq;
+
+	dmabuf = file->private_data;
+	if (!dmabuf || !dmabuf->resv)
+		return EPOLLERR;
+
+	resv = dmabuf->resv;
+
+	poll_wait(file, &dmabuf->poll, poll);
+
+	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
+	if (!events)
+		return 0;
+
+retry:
+	seq = read_seqcount_begin(&resv->seq);
+	rcu_read_lock();
+
+	fobj = rcu_dereference(resv->fence);
+	if (fobj)
+		shared_count = fobj->shared_count;
+	else
+		shared_count = 0;
+	fence_excl = rcu_dereference(resv->fence_excl);
+	if (read_seqcount_retry(&resv->seq, seq)) {
+		rcu_read_unlock();
+		goto retry;
+	}
+
+	if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
+		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
+		__poll_t pevents = EPOLLIN;
+
+		if (shared_count == 0)
+			pevents |= EPOLLOUT;
+
+		spin_lock_irq(&dmabuf->poll.lock);
+		if (dcb->active) {
+			dcb->active |= pevents;
+			events &= ~pevents;
+		} else
+			dcb->active = pevents;
+		spin_unlock_irq(&dmabuf->poll.lock);
+
+		if (events & pevents) {
+			if (!dma_fence_get_rcu(fence_excl)) {
+				/* force a recheck */
+				events &= ~pevents;
+				dma_buf_poll_cb(NULL, &dcb->cb);
+			} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
+							   dma_buf_poll_cb)) {
+				events &= ~pevents;
+				dma_fence_put(fence_excl);
+			} else {
+				/*
+				 * No callback queued, wake up any additional
+				 * waiters.
+				 */
+				dma_fence_put(fence_excl);
+				dma_buf_poll_cb(NULL, &dcb->cb);
+			}
+		}
+	}
+
+	if ((events & EPOLLOUT) && shared_count > 0) {
+		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
+		int i;
+
+		/* Only queue a new callback if no event has fired yet */
+		spin_lock_irq(&dmabuf->poll.lock);
+		if (dcb->active)
+			events &= ~EPOLLOUT;
+		else
+			dcb->active = EPOLLOUT;
+		spin_unlock_irq(&dmabuf->poll.lock);
+
+		if (!(events & EPOLLOUT))
+			goto out;
+
+		for (i = 0; i < shared_count; ++i) {
+			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
+
+			if (!dma_fence_get_rcu(fence)) {
+				/*
+				 * fence refcount dropped to zero, this means
+				 * that fobj has been freed
+				 *
+				 * call dma_buf_poll_cb and force a recheck!
+				 */
+				events &= ~EPOLLOUT;
+				dma_buf_poll_cb(NULL, &dcb->cb);
+				break;
+			}
+			if (!dma_fence_add_callback(fence, &dcb->cb,
+						    dma_buf_poll_cb)) {
+				dma_fence_put(fence);
+				events &= ~EPOLLOUT;
+				break;
+			}
+			dma_fence_put(fence);
+		}
+
+		/* No callback queued, wake up any additional waiters. */
+		if (i == shared_count)
+			dma_buf_poll_cb(NULL, &dcb->cb);
+	}
+
+out:
+	rcu_read_unlock();
+	return events;
+}
+
+static long dma_buf_ioctl(struct file *file,
+			  unsigned int cmd, unsigned long arg)
+{
+	struct dma_buf *dmabuf;
+	struct dma_buf_sync sync;
+	enum dma_data_direction direction;
+	int ret;
+
+	dmabuf = file->private_data;
+
+	switch (cmd) {
+	case DMA_BUF_IOCTL_SYNC:
+		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
+			return -EFAULT;
+
+		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
+			return -EINVAL;
+
+		switch (sync.flags & DMA_BUF_SYNC_RW) {
+		case DMA_BUF_SYNC_READ:
+			direction = DMA_FROM_DEVICE;
+			break;
+		case DMA_BUF_SYNC_WRITE:
+			direction = DMA_TO_DEVICE;
+			break;
+		case DMA_BUF_SYNC_RW:
+			direction = DMA_BIDIRECTIONAL;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if (sync.flags & DMA_BUF_SYNC_END)
+			ret = dma_buf_end_cpu_access(dmabuf, direction);
+		else
+			ret = dma_buf_begin_cpu_access(dmabuf, direction);
+
+		return ret;
+	default:
+		return -ENOTTY;
+	}
+}
+
+static const struct file_operations dma_buf_fops = {
+	.release	= dma_buf_release,
+	.mmap		= dma_buf_mmap_internal,
+	.llseek		= dma_buf_llseek,
+	.poll		= dma_buf_poll,
+	.unlocked_ioctl	= dma_buf_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= dma_buf_ioctl,
+#endif
+};
+
+/*
+ * is_dma_buf_file - Check if struct file* is associated with dma_buf
+ */
+static inline int is_dma_buf_file(struct file *file)
+{
+	return file->f_op == &dma_buf_fops;
+}
+
+/**
+ * DOC: dma buf device access
+ *
+ * For device DMA access to a shared DMA buffer the usual sequence of operations
+ * is fairly simple:
+ *
+ * 1. The exporter defines his exporter instance using
+ *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
+ *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
+ *    as a file descriptor by calling dma_buf_fd().
+ *
+ * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
+ *    to share with: First the filedescriptor is converted to a &dma_buf using
+ *    dma_buf_get(). Then the buffer is attached to the device using
+ *    dma_buf_attach().
+ *
+ *    Up to this stage the exporter is still free to migrate or reallocate the
+ *    backing storage.
+ *
+ * 3. Once the buffer is attached to all devices userspace can initiate DMA
+ *    access to the shared buffer. In the kernel this is done by calling
+ *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
+ *
+ * 4. Once a driver is done with a shared buffer it needs to call
+ *    dma_buf_detach() (after cleaning up any mappings) and then release the
+ *    reference acquired with dma_buf_get by calling dma_buf_put().
+ *
+ * For the detailed semantics exporters are expected to implement see
+ * &dma_buf_ops.
+ */
+
+/**
+ * dma_buf_export - Creates a new dma_buf, and associates an anon file
+ * with this buffer, so it can be exported.
+ * Also connect the allocator specific data and ops to the buffer.
+ * Additionally, provide a name string for exporter; useful in debugging.
+ *
+ * @exp_info:	[in]	holds all the export related information provided
+ *			by the exporter. see &struct dma_buf_export_info
+ *			for further details.
+ *
+ * Returns, on success, a newly created dma_buf object, which wraps the
+ * supplied private data and operations for dma_buf_ops. On either missing
+ * ops, or error in allocating struct dma_buf, will return negative error.
+ *
+ * For most cases the easiest way to create @exp_info is through the
+ * %DEFINE_DMA_BUF_EXPORT_INFO macro.
+ */
+struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
+{
+	struct dma_buf *dmabuf;
+	struct reservation_object *resv = exp_info->resv;
+	struct file *file;
+	size_t alloc_size = sizeof(struct dma_buf);
+	int ret;
+
+	if (!exp_info->resv)
+		alloc_size += sizeof(struct reservation_object);
+	else
+		/* prevent &dma_buf[1] == dma_buf->resv */
+		alloc_size += 1;
+
+	if (WARN_ON(!exp_info->priv
+			  || !exp_info->ops
+			  || !exp_info->ops->map_dma_buf
+			  || !exp_info->ops->unmap_dma_buf
+			  || !exp_info->ops->release
+			  || !exp_info->ops->map
+			  || !exp_info->ops->mmap)) {
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!try_module_get(exp_info->owner))
+		return ERR_PTR(-ENOENT);
+
+	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
+	if (!dmabuf) {
+		ret = -ENOMEM;
+		goto err_module;
+	}
+
+	dmabuf->priv = exp_info->priv;
+	dmabuf->ops = exp_info->ops;
+	dmabuf->size = exp_info->size;
+	dmabuf->exp_name = exp_info->exp_name;
+	dmabuf->owner = exp_info->owner;
+	init_waitqueue_head(&dmabuf->poll);
+	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
+	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
+
+	if (!resv) {
+		resv = (struct reservation_object *)&dmabuf[1];
+		reservation_object_init(resv);
+	}
+	dmabuf->resv = resv;
+
+	file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
+					exp_info->flags);
+	if (IS_ERR(file)) {
+		ret = PTR_ERR(file);
+		goto err_dmabuf;
+	}
+
+	file->f_mode |= FMODE_LSEEK;
+	dmabuf->file = file;
+
+	mutex_init(&dmabuf->lock);
+	INIT_LIST_HEAD(&dmabuf->attachments);
+
+	mutex_lock(&db_list.lock);
+	list_add(&dmabuf->list_node, &db_list.head);
+	mutex_unlock(&db_list.lock);
+
+	return dmabuf;
+
+err_dmabuf:
+	kfree(dmabuf);
+err_module:
+	module_put(exp_info->owner);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dma_buf_export);
+
+/**
+ * dma_buf_fd - returns a file descriptor for the given dma_buf
+ * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
+ * @flags:      [in]    flags to give to fd
+ *
+ * On success, returns an associated 'fd'. Else, returns error.
+ */
+int dma_buf_fd(struct dma_buf *dmabuf, int flags)
+{
+	int fd;
+
+	if (!dmabuf || !dmabuf->file)
+		return -EINVAL;
+
+	fd = get_unused_fd_flags(flags);
+	if (fd < 0)
+		return fd;
+
+	fd_install(fd, dmabuf->file);
+
+	return fd;
+}
+EXPORT_SYMBOL_GPL(dma_buf_fd);
+
+/**
+ * dma_buf_get - returns the dma_buf structure related to an fd
+ * @fd:	[in]	fd associated with the dma_buf to be returned
+ *
+ * On success, returns the dma_buf structure associated with an fd; uses
+ * file's refcounting done by fget to increase refcount. returns ERR_PTR
+ * otherwise.
+ */
+struct dma_buf *dma_buf_get(int fd)
+{
+	struct file *file;
+
+	file = fget(fd);
+
+	if (!file)
+		return ERR_PTR(-EBADF);
+
+	if (!is_dma_buf_file(file)) {
+		fput(file);
+		return ERR_PTR(-EINVAL);
+	}
+
+	return file->private_data;
+}
+EXPORT_SYMBOL_GPL(dma_buf_get);
+
+/**
+ * dma_buf_put - decreases refcount of the buffer
+ * @dmabuf:	[in]	buffer to reduce refcount of
+ *
+ * Uses file's refcounting done implicitly by fput().
+ *
+ * If, as a result of this call, the refcount becomes 0, the 'release' file
+ * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
+ * in turn, and frees the memory allocated for dmabuf when exported.
+ */
+void dma_buf_put(struct dma_buf *dmabuf)
+{
+	if (WARN_ON(!dmabuf || !dmabuf->file))
+		return;
+
+	fput(dmabuf->file);
+}
+EXPORT_SYMBOL_GPL(dma_buf_put);
+
+/**
+ * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
+ * calls attach() of dma_buf_ops to allow device-specific attach functionality
+ * @dmabuf:	[in]	buffer to attach device to.
+ * @dev:	[in]	device to be attached.
+ *
+ * Returns struct dma_buf_attachment pointer for this attachment. Attachments
+ * must be cleaned up by calling dma_buf_detach().
+ *
+ * Returns:
+ *
+ * A pointer to newly created &dma_buf_attachment on success, or a negative
+ * error code wrapped into a pointer on failure.
+ *
+ * Note that this can fail if the backing storage of @dmabuf is in a place not
+ * accessible to @dev, and cannot be moved to a more suitable place. This is
+ * indicated with the error code -EBUSY.
+ */
+struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+					  struct device *dev)
+{
+	struct dma_buf_attachment *attach;
+	int ret;
+
+	if (WARN_ON(!dmabuf || !dev))
+		return ERR_PTR(-EINVAL);
+
+	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+	if (!attach)
+		return ERR_PTR(-ENOMEM);
+
+	attach->dev = dev;
+	attach->dmabuf = dmabuf;
+
+	mutex_lock(&dmabuf->lock);
+
+	if (dmabuf->ops->attach) {
+		ret = dmabuf->ops->attach(dmabuf, attach);
+		if (ret)
+			goto err_attach;
+	}
+	list_add(&attach->node, &dmabuf->attachments);
+
+	mutex_unlock(&dmabuf->lock);
+	return attach;
+
+err_attach:
+	kfree(attach);
+	mutex_unlock(&dmabuf->lock);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dma_buf_attach);
+
+/**
+ * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
+ * optionally calls detach() of dma_buf_ops for device-specific detach
+ * @dmabuf:	[in]	buffer to detach from.
+ * @attach:	[in]	attachment to be detached; is free'd after this call.
+ *
+ * Clean up a device attachment obtained by calling dma_buf_attach().
+ */
+void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
+{
+	if (WARN_ON(!dmabuf || !attach))
+		return;
+
+	mutex_lock(&dmabuf->lock);
+	list_del(&attach->node);
+	if (dmabuf->ops->detach)
+		dmabuf->ops->detach(dmabuf, attach);
+
+	mutex_unlock(&dmabuf->lock);
+	kfree(attach);
+}
+EXPORT_SYMBOL_GPL(dma_buf_detach);
+
+/**
+ * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
+ * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
+ * dma_buf_ops.
+ * @attach:	[in]	attachment whose scatterlist is to be returned
+ * @direction:	[in]	direction of DMA transfer
+ *
+ * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
+ * on error. May return -EINTR if it is interrupted by a signal.
+ *
+ * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
+ * the underlying backing storage is pinned for as long as a mapping exists,
+ * therefore users/importers should not hold onto a mapping for undue amounts of
+ * time.
+ */
+struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
+					enum dma_data_direction direction)
+{
+	struct sg_table *sg_table;
+
+	might_sleep();
+
+	if (WARN_ON(!attach || !attach->dmabuf))
+		return ERR_PTR(-EINVAL);
+
+	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+	if (!sg_table)
+		sg_table = ERR_PTR(-ENOMEM);
+
+	return sg_table;
+}
+EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
+
+/**
+ * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
+ * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
+ * dma_buf_ops.
+ * @attach:	[in]	attachment to unmap buffer from
+ * @sg_table:	[in]	scatterlist info of the buffer to unmap
+ * @direction:  [in]    direction of DMA transfer
+ *
+ * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
+ */
+void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
+				struct sg_table *sg_table,
+				enum dma_data_direction direction)
+{
+	might_sleep();
+
+	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
+		return;
+
+	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
+						direction);
+}
+EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
+
+/**
+ * DOC: cpu access
+ *
+ * There are mutliple reasons for supporting CPU access to a dma buffer object:
+ *
+ * - Fallback operations in the kernel, for example when a device is connected
+ *   over USB and the kernel needs to shuffle the data around first before
+ *   sending it away. Cache coherency is handled by braketing any transactions
+ *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
+ *   access.
+ *
+ *   To support dma_buf objects residing in highmem cpu access is page-based
+ *   using an api similar to kmap. Accessing a dma_buf is done in aligned chunks
+ *   of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which
+ *   returns a pointer in kernel virtual address space. Afterwards the chunk
+ *   needs to be unmapped again. There is no limit on how often a given chunk
+ *   can be mapped and unmapped, i.e. the importer does not need to call
+ *   begin_cpu_access again before mapping the same chunk again.
+ *
+ *   Interfaces::
+ *      void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
+ *      void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
+ *
+ *   Implementing the functions is optional for exporters and for importers all
+ *   the restrictions of using kmap apply.
+ *
+ *   dma_buf kmap calls outside of the range specified in begin_cpu_access are
+ *   undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
+ *   the partial chunks at the beginning and end but may return stale or bogus
+ *   data outside of the range (in these partial chunks).
+ *
+ *   For some cases the overhead of kmap can be too high, a vmap interface
+ *   is introduced. This interface should be used very carefully, as vmalloc
+ *   space is a limited resources on many architectures.
+ *
+ *   Interfaces::
+ *      void \*dma_buf_vmap(struct dma_buf \*dmabuf)
+ *      void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
+ *
+ *   The vmap call can fail if there is no vmap support in the exporter, or if
+ *   it runs out of vmalloc space. Fallback to kmap should be implemented. Note
+ *   that the dma-buf layer keeps a reference count for all vmap access and
+ *   calls down into the exporter's vmap function only when no vmapping exists,
+ *   and only unmaps it once. Protection against concurrent vmap/vunmap calls is
+ *   provided by taking the dma_buf->lock mutex.
+ *
+ * - For full compatibility on the importer side with existing userspace
+ *   interfaces, which might already support mmap'ing buffers. This is needed in
+ *   many processing pipelines (e.g. feeding a software rendered image into a
+ *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
+ *   framework already supported this and for DMA buffer file descriptors to
+ *   replace ION buffers mmap support was needed.
+ *
+ *   There is no special interfaces, userspace simply calls mmap on the dma-buf
+ *   fd. But like for CPU access there's a need to braket the actual access,
+ *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
+ *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
+ *   be restarted.
+ *
+ *   Some systems might need some sort of cache coherency management e.g. when
+ *   CPU and GPU domains are being accessed through dma-buf at the same time.
+ *   To circumvent this problem there are begin/end coherency markers, that
+ *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
+ *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
+ *   sequence would be used like following:
+ *
+ *     - mmap dma-buf fd
+ *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
+ *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
+ *       want (with the new data being consumed by say the GPU or the scanout
+ *       device)
+ *     - munmap once you don't need the buffer any more
+ *
+ *    For correctness and optimal performance, it is always required to use
+ *    SYNC_START and SYNC_END before and after, respectively, when accessing the
+ *    mapped address. Userspace cannot rely on coherent access, even when there
+ *    are systems where it just works without calling these ioctls.
+ *
+ * - And as a CPU fallback in userspace processing pipelines.
+ *
+ *   Similar to the motivation for kernel cpu access it is again important that
+ *   the userspace code of a given importing subsystem can use the same
+ *   interfaces with a imported dma-buf buffer object as with a native buffer
+ *   object. This is especially important for drm where the userspace part of
+ *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
+ *   use a different way to mmap a buffer rather invasive.
+ *
+ *   The assumption in the current dma-buf interfaces is that redirecting the
+ *   initial mmap is all that's needed. A survey of some of the existing
+ *   subsystems shows that no driver seems to do any nefarious thing like
+ *   syncing up with outstanding asynchronous processing on the device or
+ *   allocating special resources at fault time. So hopefully this is good
+ *   enough, since adding interfaces to intercept pagefaults and allow pte
+ *   shootdowns would increase the complexity quite a bit.
+ *
+ *   Interface::
+ *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
+ *		       unsigned long);
+ *
+ *   If the importing subsystem simply provides a special-purpose mmap call to
+ *   set up a mapping in userspace, calling do_mmap with dma_buf->file will
+ *   equally achieve that for a dma-buf object.
+ */
+
+static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+				      enum dma_data_direction direction)
+{
+	bool write = (direction == DMA_BIDIRECTIONAL ||
+		      direction == DMA_TO_DEVICE);
+	struct reservation_object *resv = dmabuf->resv;
+	long ret;
+
+	/* Wait on any implicit rendering fences */
+	ret = reservation_object_wait_timeout_rcu(resv, write, true,
+						  MAX_SCHEDULE_TIMEOUT);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/**
+ * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
+ * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
+ * preparations. Coherency is only guaranteed in the specified range for the
+ * specified access direction.
+ * @dmabuf:	[in]	buffer to prepare cpu access for.
+ * @direction:	[in]	length of range for cpu access.
+ *
+ * After the cpu access is complete the caller should call
+ * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
+ * it guaranteed to be coherent with other DMA access.
+ *
+ * Can return negative error values, returns 0 on success.
+ */
+int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+			     enum dma_data_direction direction)
+{
+	int ret = 0;
+
+	if (WARN_ON(!dmabuf))
+		return -EINVAL;
+
+	if (dmabuf->ops->begin_cpu_access)
+		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
+
+	/* Ensure that all fences are waited upon - but we first allow
+	 * the native handler the chance to do so more efficiently if it
+	 * chooses. A double invocation here will be reasonably cheap no-op.
+	 */
+	if (ret == 0)
+		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
+
+/**
+ * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
+ * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
+ * actions. Coherency is only guaranteed in the specified range for the
+ * specified access direction.
+ * @dmabuf:	[in]	buffer to complete cpu access for.
+ * @direction:	[in]	length of range for cpu access.
+ *
+ * This terminates CPU access started with dma_buf_begin_cpu_access().
+ *
+ * Can return negative error values, returns 0 on success.
+ */
+int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+			   enum dma_data_direction direction)
+{
+	int ret = 0;
+
+	WARN_ON(!dmabuf);
+
+	if (dmabuf->ops->end_cpu_access)
+		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
+
+/**
+ * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
+ * same restrictions as for kmap and friends apply.
+ * @dmabuf:	[in]	buffer to map page from.
+ * @page_num:	[in]	page in PAGE_SIZE units to map.
+ *
+ * This call must always succeed, any necessary preparations that might fail
+ * need to be done in begin_cpu_access.
+ */
+void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
+{
+	WARN_ON(!dmabuf);
+
+	if (!dmabuf->ops->map)
+		return NULL;
+	return dmabuf->ops->map(dmabuf, page_num);
+}
+EXPORT_SYMBOL_GPL(dma_buf_kmap);
+
+/**
+ * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
+ * @dmabuf:	[in]	buffer to unmap page from.
+ * @page_num:	[in]	page in PAGE_SIZE units to unmap.
+ * @vaddr:	[in]	kernel space pointer obtained from dma_buf_kmap.
+ *
+ * This call must always succeed.
+ */
+void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
+		    void *vaddr)
+{
+	WARN_ON(!dmabuf);
+
+	if (dmabuf->ops->unmap)
+		dmabuf->ops->unmap(dmabuf, page_num, vaddr);
+}
+EXPORT_SYMBOL_GPL(dma_buf_kunmap);
+
+
+/**
+ * dma_buf_mmap - Setup up a userspace mmap with the given vma
+ * @dmabuf:	[in]	buffer that should back the vma
+ * @vma:	[in]	vma for the mmap
+ * @pgoff:	[in]	offset in pages where this mmap should start within the
+ *			dma-buf buffer.
+ *
+ * This function adjusts the passed in vma so that it points at the file of the
+ * dma_buf operation. It also adjusts the starting pgoff and does bounds
+ * checking on the size of the vma. Then it calls the exporters mmap function to
+ * set up the mapping.
+ *
+ * Can return negative error values, returns 0 on success.
+ */
+int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
+		 unsigned long pgoff)
+{
+	struct file *oldfile;
+	int ret;
+
+	if (WARN_ON(!dmabuf || !vma))
+		return -EINVAL;
+
+	/* check for offset overflow */
+	if (pgoff + vma_pages(vma) < pgoff)
+		return -EOVERFLOW;
+
+	/* check for overflowing the buffer's size */
+	if (pgoff + vma_pages(vma) >
+	    dmabuf->size >> PAGE_SHIFT)
+		return -EINVAL;
+
+	/* readjust the vma */
+	get_file(dmabuf->file);
+	oldfile = vma->vm_file;
+	vma->vm_file = dmabuf->file;
+	vma->vm_pgoff = pgoff;
+
+	ret = dmabuf->ops->mmap(dmabuf, vma);
+	if (ret) {
+		/* restore old parameters on failure */
+		vma->vm_file = oldfile;
+		fput(dmabuf->file);
+	} else {
+		if (oldfile)
+			fput(oldfile);
+	}
+	return ret;
+
+}
+EXPORT_SYMBOL_GPL(dma_buf_mmap);
+
+/**
+ * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
+ * address space. Same restrictions as for vmap and friends apply.
+ * @dmabuf:	[in]	buffer to vmap
+ *
+ * This call may fail due to lack of virtual mapping address space.
+ * These calls are optional in drivers. The intended use for them
+ * is for mapping objects linear in kernel space for high use objects.
+ * Please attempt to use kmap/kunmap before thinking about these interfaces.
+ *
+ * Returns NULL on error.
+ */
+void *dma_buf_vmap(struct dma_buf *dmabuf)
+{
+	void *ptr;
+
+	if (WARN_ON(!dmabuf))
+		return NULL;
+
+	if (!dmabuf->ops->vmap)
+		return NULL;
+
+	mutex_lock(&dmabuf->lock);
+	if (dmabuf->vmapping_counter) {
+		dmabuf->vmapping_counter++;
+		BUG_ON(!dmabuf->vmap_ptr);
+		ptr = dmabuf->vmap_ptr;
+		goto out_unlock;
+	}
+
+	BUG_ON(dmabuf->vmap_ptr);
+
+	ptr = dmabuf->ops->vmap(dmabuf);
+	if (WARN_ON_ONCE(IS_ERR(ptr)))
+		ptr = NULL;
+	if (!ptr)
+		goto out_unlock;
+
+	dmabuf->vmap_ptr = ptr;
+	dmabuf->vmapping_counter = 1;
+
+out_unlock:
+	mutex_unlock(&dmabuf->lock);
+	return ptr;
+}
+EXPORT_SYMBOL_GPL(dma_buf_vmap);
+
+/**
+ * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
+ * @dmabuf:	[in]	buffer to vunmap
+ * @vaddr:	[in]	vmap to vunmap
+ */
+void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+	if (WARN_ON(!dmabuf))
+		return;
+
+	BUG_ON(!dmabuf->vmap_ptr);
+	BUG_ON(dmabuf->vmapping_counter == 0);
+	BUG_ON(dmabuf->vmap_ptr != vaddr);
+
+	mutex_lock(&dmabuf->lock);
+	if (--dmabuf->vmapping_counter == 0) {
+		if (dmabuf->ops->vunmap)
+			dmabuf->ops->vunmap(dmabuf, vaddr);
+		dmabuf->vmap_ptr = NULL;
+	}
+	mutex_unlock(&dmabuf->lock);
+}
+EXPORT_SYMBOL_GPL(dma_buf_vunmap);
+
+#ifdef CONFIG_DEBUG_FS
+static int dma_buf_debug_show(struct seq_file *s, void *unused)
+{
+	int ret;
+	struct dma_buf *buf_obj;
+	struct dma_buf_attachment *attach_obj;
+	struct reservation_object *robj;
+	struct reservation_object_list *fobj;
+	struct dma_fence *fence;
+	unsigned seq;
+	int count = 0, attach_count, shared_count, i;
+	size_t size = 0;
+
+	ret = mutex_lock_interruptible(&db_list.lock);
+
+	if (ret)
+		return ret;
+
+	seq_puts(s, "\nDma-buf Objects:\n");
+	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\n",
+		   "size", "flags", "mode", "count");
+
+	list_for_each_entry(buf_obj, &db_list.head, list_node) {
+		ret = mutex_lock_interruptible(&buf_obj->lock);
+
+		if (ret) {
+			seq_puts(s,
+				 "\tERROR locking buffer object: skipping\n");
+			continue;
+		}
+
+		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
+				buf_obj->size,
+				buf_obj->file->f_flags, buf_obj->file->f_mode,
+				file_count(buf_obj->file),
+				buf_obj->exp_name);
+
+		robj = buf_obj->resv;
+		while (true) {
+			seq = read_seqcount_begin(&robj->seq);
+			rcu_read_lock();
+			fobj = rcu_dereference(robj->fence);
+			shared_count = fobj ? fobj->shared_count : 0;
+			fence = rcu_dereference(robj->fence_excl);
+			if (!read_seqcount_retry(&robj->seq, seq))
+				break;
+			rcu_read_unlock();
+		}
+
+		if (fence)
+			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
+				   fence->ops->get_driver_name(fence),
+				   fence->ops->get_timeline_name(fence),
+				   dma_fence_is_signaled(fence) ? "" : "un");
+		for (i = 0; i < shared_count; i++) {
+			fence = rcu_dereference(fobj->shared[i]);
+			if (!dma_fence_get_rcu(fence))
+				continue;
+			seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
+				   fence->ops->get_driver_name(fence),
+				   fence->ops->get_timeline_name(fence),
+				   dma_fence_is_signaled(fence) ? "" : "un");
+		}
+		rcu_read_unlock();
+
+		seq_puts(s, "\tAttached Devices:\n");
+		attach_count = 0;
+
+		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
+			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
+			attach_count++;
+		}
+
+		seq_printf(s, "Total %d devices attached\n\n",
+				attach_count);
+
+		count++;
+		size += buf_obj->size;
+		mutex_unlock(&buf_obj->lock);
+	}
+
+	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
+
+	mutex_unlock(&db_list.lock);
+	return 0;
+}
+
+static int dma_buf_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dma_buf_debug_show, NULL);
+}
+
+static const struct file_operations dma_buf_debug_fops = {
+	.open           = dma_buf_debug_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+
+static struct dentry *dma_buf_debugfs_dir;
+
+static int dma_buf_init_debugfs(void)
+{
+	struct dentry *d;
+	int err = 0;
+
+	d = debugfs_create_dir("dma_buf", NULL);
+	if (IS_ERR(d))
+		return PTR_ERR(d);
+
+	dma_buf_debugfs_dir = d;
+
+	d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
+				NULL, &dma_buf_debug_fops);
+	if (IS_ERR(d)) {
+		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
+		debugfs_remove_recursive(dma_buf_debugfs_dir);
+		dma_buf_debugfs_dir = NULL;
+		err = PTR_ERR(d);
+	}
+
+	return err;
+}
+
+static void dma_buf_uninit_debugfs(void)
+{
+	debugfs_remove_recursive(dma_buf_debugfs_dir);
+}
+#else
+static inline int dma_buf_init_debugfs(void)
+{
+	return 0;
+}
+static inline void dma_buf_uninit_debugfs(void)
+{
+}
+#endif
+
+static int __init dma_buf_init(void)
+{
+	mutex_init(&db_list.lock);
+	INIT_LIST_HEAD(&db_list.head);
+	dma_buf_init_debugfs();
+	return 0;
+}
+subsys_initcall(dma_buf_init);
+
+static void __exit dma_buf_deinit(void)
+{
+	dma_buf_uninit_debugfs();
+}
+__exitcall(dma_buf_deinit);
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
new file mode 100644
index 0000000..a8c2544
--- /dev/null
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -0,0 +1,181 @@
+/*
+ * dma-fence-array: aggregate fences to be waited together
+ *
+ * Copyright (C) 2016 Collabora Ltd
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Authors:
+ *	Gustavo Padovan <gustavo@padovan.org>
+ *	Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/dma-fence-array.h>
+
+static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
+{
+	return "dma_fence_array";
+}
+
+static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
+{
+	return "unbound";
+}
+
+static void irq_dma_fence_array_work(struct irq_work *wrk)
+{
+	struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
+
+	dma_fence_signal(&array->base);
+	dma_fence_put(&array->base);
+}
+
+static void dma_fence_array_cb_func(struct dma_fence *f,
+				    struct dma_fence_cb *cb)
+{
+	struct dma_fence_array_cb *array_cb =
+		container_of(cb, struct dma_fence_array_cb, cb);
+	struct dma_fence_array *array = array_cb->array;
+
+	if (atomic_dec_and_test(&array->num_pending))
+		irq_work_queue(&array->work);
+	else
+		dma_fence_put(&array->base);
+}
+
+static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
+{
+	struct dma_fence_array *array = to_dma_fence_array(fence);
+	struct dma_fence_array_cb *cb = (void *)(&array[1]);
+	unsigned i;
+
+	for (i = 0; i < array->num_fences; ++i) {
+		cb[i].array = array;
+		/*
+		 * As we may report that the fence is signaled before all
+		 * callbacks are complete, we need to take an additional
+		 * reference count on the array so that we do not free it too
+		 * early. The core fence handling will only hold the reference
+		 * until we signal the array as complete (but that is now
+		 * insufficient).
+		 */
+		dma_fence_get(&array->base);
+		if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
+					   dma_fence_array_cb_func)) {
+			dma_fence_put(&array->base);
+			if (atomic_dec_and_test(&array->num_pending))
+				return false;
+		}
+	}
+
+	return true;
+}
+
+static bool dma_fence_array_signaled(struct dma_fence *fence)
+{
+	struct dma_fence_array *array = to_dma_fence_array(fence);
+
+	return atomic_read(&array->num_pending) <= 0;
+}
+
+static void dma_fence_array_release(struct dma_fence *fence)
+{
+	struct dma_fence_array *array = to_dma_fence_array(fence);
+	unsigned i;
+
+	for (i = 0; i < array->num_fences; ++i)
+		dma_fence_put(array->fences[i]);
+
+	kfree(array->fences);
+	dma_fence_free(fence);
+}
+
+const struct dma_fence_ops dma_fence_array_ops = {
+	.get_driver_name = dma_fence_array_get_driver_name,
+	.get_timeline_name = dma_fence_array_get_timeline_name,
+	.enable_signaling = dma_fence_array_enable_signaling,
+	.signaled = dma_fence_array_signaled,
+	.release = dma_fence_array_release,
+};
+EXPORT_SYMBOL(dma_fence_array_ops);
+
+/**
+ * dma_fence_array_create - Create a custom fence array
+ * @num_fences:		[in]	number of fences to add in the array
+ * @fences:		[in]	array containing the fences
+ * @context:		[in]	fence context to use
+ * @seqno:		[in]	sequence number to use
+ * @signal_on_any:	[in]	signal on any fence in the array
+ *
+ * Allocate a dma_fence_array object and initialize the base fence with
+ * dma_fence_init().
+ * In case of error it returns NULL.
+ *
+ * The caller should allocate the fences array with num_fences size
+ * and fill it with the fences it wants to add to the object. Ownership of this
+ * array is taken and dma_fence_put() is used on each fence on release.
+ *
+ * If @signal_on_any is true the fence array signals if any fence in the array
+ * signals, otherwise it signals when all fences in the array signal.
+ */
+struct dma_fence_array *dma_fence_array_create(int num_fences,
+					       struct dma_fence **fences,
+					       u64 context, unsigned seqno,
+					       bool signal_on_any)
+{
+	struct dma_fence_array *array;
+	size_t size = sizeof(*array);
+
+	/* Allocate the callback structures behind the array. */
+	size += num_fences * sizeof(struct dma_fence_array_cb);
+	array = kzalloc(size, GFP_KERNEL);
+	if (!array)
+		return NULL;
+
+	spin_lock_init(&array->lock);
+	dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
+		       context, seqno);
+	init_irq_work(&array->work, irq_dma_fence_array_work);
+
+	array->num_fences = num_fences;
+	atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
+	array->fences = fences;
+
+	return array;
+}
+EXPORT_SYMBOL(dma_fence_array_create);
+
+/**
+ * dma_fence_match_context - Check if all fences are from the given context
+ * @fence:		[in]	fence or fence array
+ * @context:		[in]	fence context to check all fences against
+ *
+ * Checks the provided fence or, for a fence array, all fences in the array
+ * against the given context. Returns false if any fence is from a different
+ * context.
+ */
+bool dma_fence_match_context(struct dma_fence *fence, u64 context)
+{
+	struct dma_fence_array *array = to_dma_fence_array(fence);
+	unsigned i;
+
+	if (!dma_fence_is_array(fence))
+		return fence->context == context;
+
+	for (i = 0; i < array->num_fences; i++) {
+		if (array->fences[i]->context != context)
+			return false;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(dma_fence_match_context);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
new file mode 100644
index 0000000..1551ca7
--- /dev/null
+++ b/drivers/dma-buf/dma-fence.c
@@ -0,0 +1,634 @@
+/*
+ * Fence mechanism for dma-buf and to allow for asynchronous dma access
+ *
+ * Copyright (C) 2012 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/atomic.h>
+#include <linux/dma-fence.h>
+#include <linux/sched/signal.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/dma_fence.h>
+
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
+
+/*
+ * fence context counter: each execution context should have its own
+ * fence context, this allows checking if fences belong to the same
+ * context or not. One device can have multiple separate contexts,
+ * and they're used if some engine can run independently of another.
+ */
+static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
+
+/**
+ * DOC: DMA fences overview
+ *
+ * DMA fences, represented by &struct dma_fence, are the kernel internal
+ * synchronization primitive for DMA operations like GPU rendering, video
+ * encoding/decoding, or displaying buffers on a screen.
+ *
+ * A fence is initialized using dma_fence_init() and completed using
+ * dma_fence_signal(). Fences are associated with a context, allocated through
+ * dma_fence_context_alloc(), and all fences on the same context are
+ * fully ordered.
+ *
+ * Since the purposes of fences is to facilitate cross-device and
+ * cross-application synchronization, there's multiple ways to use one:
+ *
+ * - Individual fences can be exposed as a &sync_file, accessed as a file
+ *   descriptor from userspace, created by calling sync_file_create(). This is
+ *   called explicit fencing, since userspace passes around explicit
+ *   synchronization points.
+ *
+ * - Some subsystems also have their own explicit fencing primitives, like
+ *   &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
+ *   fence to be updated.
+ *
+ * - Then there's also implicit fencing, where the synchronization points are
+ *   implicitly passed around as part of shared &dma_buf instances. Such
+ *   implicit fences are stored in &struct reservation_object through the
+ *   &dma_buf.resv pointer.
+ */
+
+/**
+ * dma_fence_context_alloc - allocate an array of fence contexts
+ * @num: amount of contexts to allocate
+ *
+ * This function will return the first index of the number of fence contexts
+ * allocated.  The fence context is used for setting &dma_fence.context to a
+ * unique number by passing the context to dma_fence_init().
+ */
+u64 dma_fence_context_alloc(unsigned num)
+{
+	WARN_ON(!num);
+	return atomic64_add_return(num, &dma_fence_context_counter) - num;
+}
+EXPORT_SYMBOL(dma_fence_context_alloc);
+
+/**
+ * dma_fence_signal_locked - signal completion of a fence
+ * @fence: the fence to signal
+ *
+ * Signal completion for software callbacks on a fence, this will unblock
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time.
+ *
+ * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
+ * held.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
+ */
+int dma_fence_signal_locked(struct dma_fence *fence)
+{
+	struct dma_fence_cb *cur, *tmp;
+	int ret = 0;
+
+	lockdep_assert_held(fence->lock);
+
+	if (WARN_ON(!fence))
+		return -EINVAL;
+
+	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+		ret = -EINVAL;
+
+		/*
+		 * we might have raced with the unlocked dma_fence_signal,
+		 * still run through all callbacks
+		 */
+	} else {
+		fence->timestamp = ktime_get();
+		set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
+		trace_dma_fence_signaled(fence);
+	}
+
+	list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
+		list_del_init(&cur->node);
+		cur->func(fence, cur);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(dma_fence_signal_locked);
+
+/**
+ * dma_fence_signal - signal completion of a fence
+ * @fence: the fence to signal
+ *
+ * Signal completion for software callbacks on a fence, this will unblock
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
+ */
+int dma_fence_signal(struct dma_fence *fence)
+{
+	unsigned long flags;
+
+	if (!fence)
+		return -EINVAL;
+
+	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		return -EINVAL;
+
+	fence->timestamp = ktime_get();
+	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
+	trace_dma_fence_signaled(fence);
+
+	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
+		struct dma_fence_cb *cur, *tmp;
+
+		spin_lock_irqsave(fence->lock, flags);
+		list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
+			list_del_init(&cur->node);
+			cur->func(fence, cur);
+		}
+		spin_unlock_irqrestore(fence->lock, flags);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(dma_fence_signal);
+
+/**
+ * dma_fence_wait_timeout - sleep until the fence gets signaled
+ * or until timeout elapses
+ * @fence: the fence to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ *
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
+ * remaining timeout in jiffies on success. Other error values may be
+ * returned on custom implementations.
+ *
+ * Performs a synchronous wait on this fence. It is assumed the caller
+ * directly or indirectly (buf-mgr between reservation and committing)
+ * holds a reference to the fence, otherwise the fence might be
+ * freed before return, resulting in undefined behavior.
+ *
+ * See also dma_fence_wait() and dma_fence_wait_any_timeout().
+ */
+signed long
+dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
+{
+	signed long ret;
+
+	if (WARN_ON(timeout < 0))
+		return -EINVAL;
+
+	trace_dma_fence_wait_start(fence);
+	if (fence->ops->wait)
+		ret = fence->ops->wait(fence, intr, timeout);
+	else
+		ret = dma_fence_default_wait(fence, intr, timeout);
+	trace_dma_fence_wait_end(fence);
+	return ret;
+}
+EXPORT_SYMBOL(dma_fence_wait_timeout);
+
+/**
+ * dma_fence_release - default relese function for fences
+ * @kref: &dma_fence.recfount
+ *
+ * This is the default release functions for &dma_fence. Drivers shouldn't call
+ * this directly, but instead call dma_fence_put().
+ */
+void dma_fence_release(struct kref *kref)
+{
+	struct dma_fence *fence =
+		container_of(kref, struct dma_fence, refcount);
+
+	trace_dma_fence_destroy(fence);
+
+	/* Failed to signal before release, could be a refcounting issue */
+	WARN_ON(!list_empty(&fence->cb_list));
+
+	if (fence->ops->release)
+		fence->ops->release(fence);
+	else
+		dma_fence_free(fence);
+}
+EXPORT_SYMBOL(dma_fence_release);
+
+/**
+ * dma_fence_free - default release function for &dma_fence.
+ * @fence: fence to release
+ *
+ * This is the default implementation for &dma_fence_ops.release. It calls
+ * kfree_rcu() on @fence.
+ */
+void dma_fence_free(struct dma_fence *fence)
+{
+	kfree_rcu(fence, rcu);
+}
+EXPORT_SYMBOL(dma_fence_free);
+
+/**
+ * dma_fence_enable_sw_signaling - enable signaling on fence
+ * @fence: the fence to enable
+ *
+ * This will request for sw signaling to be enabled, to make the fence
+ * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
+ * internally.
+ */
+void dma_fence_enable_sw_signaling(struct dma_fence *fence)
+{
+	unsigned long flags;
+
+	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+			      &fence->flags) &&
+	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
+	    fence->ops->enable_signaling) {
+		trace_dma_fence_enable_signal(fence);
+
+		spin_lock_irqsave(fence->lock, flags);
+
+		if (!fence->ops->enable_signaling(fence))
+			dma_fence_signal_locked(fence);
+
+		spin_unlock_irqrestore(fence->lock, flags);
+	}
+}
+EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
+
+/**
+ * dma_fence_add_callback - add a callback to be called when the fence
+ * is signaled
+ * @fence: the fence to wait on
+ * @cb: the callback to register
+ * @func: the function to call
+ *
+ * @cb will be initialized by dma_fence_add_callback(), no initialization
+ * by the caller is required. Any number of callbacks can be registered
+ * to a fence, but a callback can only be registered to one fence at a time.
+ *
+ * Note that the callback can be called from an atomic context.  If
+ * fence is already signaled, this function will return -ENOENT (and
+ * *not* call the callback).
+ *
+ * Add a software callback to the fence. Same restrictions apply to
+ * refcount as it does to dma_fence_wait(), however the caller doesn't need to
+ * keep a refcount to fence afterward dma_fence_add_callback() has returned:
+ * when software access is enabled, the creator of the fence is required to keep
+ * the fence alive until after it signals with dma_fence_signal(). The callback
+ * itself can be called from irq context.
+ *
+ * Returns 0 in case of success, -ENOENT if the fence is already signaled
+ * and -EINVAL in case of error.
+ */
+int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
+			   dma_fence_func_t func)
+{
+	unsigned long flags;
+	int ret = 0;
+	bool was_set;
+
+	if (WARN_ON(!fence || !func))
+		return -EINVAL;
+
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+		INIT_LIST_HEAD(&cb->node);
+		return -ENOENT;
+	}
+
+	spin_lock_irqsave(fence->lock, flags);
+
+	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+				   &fence->flags);
+
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		ret = -ENOENT;
+	else if (!was_set && fence->ops->enable_signaling) {
+		trace_dma_fence_enable_signal(fence);
+
+		if (!fence->ops->enable_signaling(fence)) {
+			dma_fence_signal_locked(fence);
+			ret = -ENOENT;
+		}
+	}
+
+	if (!ret) {
+		cb->func = func;
+		list_add_tail(&cb->node, &fence->cb_list);
+	} else
+		INIT_LIST_HEAD(&cb->node);
+	spin_unlock_irqrestore(fence->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(dma_fence_add_callback);
+
+/**
+ * dma_fence_get_status - returns the status upon completion
+ * @fence: the dma_fence to query
+ *
+ * This wraps dma_fence_get_status_locked() to return the error status
+ * condition on a signaled fence. See dma_fence_get_status_locked() for more
+ * details.
+ *
+ * Returns 0 if the fence has not yet been signaled, 1 if the fence has
+ * been signaled without an error condition, or a negative error code
+ * if the fence has been completed in err.
+ */
+int dma_fence_get_status(struct dma_fence *fence)
+{
+	unsigned long flags;
+	int status;
+
+	spin_lock_irqsave(fence->lock, flags);
+	status = dma_fence_get_status_locked(fence);
+	spin_unlock_irqrestore(fence->lock, flags);
+
+	return status;
+}
+EXPORT_SYMBOL(dma_fence_get_status);
+
+/**
+ * dma_fence_remove_callback - remove a callback from the signaling list
+ * @fence: the fence to wait on
+ * @cb: the callback to remove
+ *
+ * Remove a previously queued callback from the fence. This function returns
+ * true if the callback is successfully removed, or false if the fence has
+ * already been signaled.
+ *
+ * *WARNING*:
+ * Cancelling a callback should only be done if you really know what you're
+ * doing, since deadlocks and race conditions could occur all too easily. For
+ * this reason, it should only ever be done on hardware lockup recovery,
+ * with a reference held to the fence.
+ *
+ * Behaviour is undefined if @cb has not been added to @fence using
+ * dma_fence_add_callback() beforehand.
+ */
+bool
+dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+	unsigned long flags;
+	bool ret;
+
+	spin_lock_irqsave(fence->lock, flags);
+
+	ret = !list_empty(&cb->node);
+	if (ret)
+		list_del_init(&cb->node);
+
+	spin_unlock_irqrestore(fence->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(dma_fence_remove_callback);
+
+struct default_wait_cb {
+	struct dma_fence_cb base;
+	struct task_struct *task;
+};
+
+static void
+dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+	struct default_wait_cb *wait =
+		container_of(cb, struct default_wait_cb, base);
+
+	wake_up_state(wait->task, TASK_NORMAL);
+}
+
+/**
+ * dma_fence_default_wait - default sleep until the fence gets signaled
+ * or until timeout elapses
+ * @fence: the fence to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ *
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
+ * remaining timeout in jiffies on success. If timeout is zero the value one is
+ * returned if the fence is already signaled for consistency with other
+ * functions taking a jiffies timeout.
+ */
+signed long
+dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
+{
+	struct default_wait_cb cb;
+	unsigned long flags;
+	signed long ret = timeout ? timeout : 1;
+	bool was_set;
+
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		return ret;
+
+	spin_lock_irqsave(fence->lock, flags);
+
+	if (intr && signal_pending(current)) {
+		ret = -ERESTARTSYS;
+		goto out;
+	}
+
+	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+				   &fence->flags);
+
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		goto out;
+
+	if (!was_set && fence->ops->enable_signaling) {
+		trace_dma_fence_enable_signal(fence);
+
+		if (!fence->ops->enable_signaling(fence)) {
+			dma_fence_signal_locked(fence);
+			goto out;
+		}
+	}
+
+	if (!timeout) {
+		ret = 0;
+		goto out;
+	}
+
+	cb.base.func = dma_fence_default_wait_cb;
+	cb.task = current;
+	list_add(&cb.base.node, &fence->cb_list);
+
+	while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
+		if (intr)
+			__set_current_state(TASK_INTERRUPTIBLE);
+		else
+			__set_current_state(TASK_UNINTERRUPTIBLE);
+		spin_unlock_irqrestore(fence->lock, flags);
+
+		ret = schedule_timeout(ret);
+
+		spin_lock_irqsave(fence->lock, flags);
+		if (ret > 0 && intr && signal_pending(current))
+			ret = -ERESTARTSYS;
+	}
+
+	if (!list_empty(&cb.base.node))
+		list_del(&cb.base.node);
+	__set_current_state(TASK_RUNNING);
+
+out:
+	spin_unlock_irqrestore(fence->lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL(dma_fence_default_wait);
+
+static bool
+dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
+			    uint32_t *idx)
+{
+	int i;
+
+	for (i = 0; i < count; ++i) {
+		struct dma_fence *fence = fences[i];
+		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+			if (idx)
+				*idx = i;
+			return true;
+		}
+	}
+	return false;
+}
+
+/**
+ * dma_fence_wait_any_timeout - sleep until any fence gets signaled
+ * or until timeout elapses
+ * @fences: array of fences to wait on
+ * @count: number of fences to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @idx: used to store the first signaled fence index, meaningful only on
+ *	positive return
+ *
+ * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
+ * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
+ * on success.
+ *
+ * Synchronous waits for the first fence in the array to be signaled. The
+ * caller needs to hold a reference to all fences in the array, otherwise a
+ * fence might be freed before return, resulting in undefined behavior.
+ *
+ * See also dma_fence_wait() and dma_fence_wait_timeout().
+ */
+signed long
+dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
+			   bool intr, signed long timeout, uint32_t *idx)
+{
+	struct default_wait_cb *cb;
+	signed long ret = timeout;
+	unsigned i;
+
+	if (WARN_ON(!fences || !count || timeout < 0))
+		return -EINVAL;
+
+	if (timeout == 0) {
+		for (i = 0; i < count; ++i)
+			if (dma_fence_is_signaled(fences[i])) {
+				if (idx)
+					*idx = i;
+				return 1;
+			}
+
+		return 0;
+	}
+
+	cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
+	if (cb == NULL) {
+		ret = -ENOMEM;
+		goto err_free_cb;
+	}
+
+	for (i = 0; i < count; ++i) {
+		struct dma_fence *fence = fences[i];
+
+		cb[i].task = current;
+		if (dma_fence_add_callback(fence, &cb[i].base,
+					   dma_fence_default_wait_cb)) {
+			/* This fence is already signaled */
+			if (idx)
+				*idx = i;
+			goto fence_rm_cb;
+		}
+	}
+
+	while (ret > 0) {
+		if (intr)
+			set_current_state(TASK_INTERRUPTIBLE);
+		else
+			set_current_state(TASK_UNINTERRUPTIBLE);
+
+		if (dma_fence_test_signaled_any(fences, count, idx))
+			break;
+
+		ret = schedule_timeout(ret);
+
+		if (ret > 0 && intr && signal_pending(current))
+			ret = -ERESTARTSYS;
+	}
+
+	__set_current_state(TASK_RUNNING);
+
+fence_rm_cb:
+	while (i-- > 0)
+		dma_fence_remove_callback(fences[i], &cb[i].base);
+
+err_free_cb:
+	kfree(cb);
+
+	return ret;
+}
+EXPORT_SYMBOL(dma_fence_wait_any_timeout);
+
+/**
+ * dma_fence_init - Initialize a custom fence.
+ * @fence: the fence to initialize
+ * @ops: the dma_fence_ops for operations on this fence
+ * @lock: the irqsafe spinlock to use for locking this fence
+ * @context: the execution context this fence is run on
+ * @seqno: a linear increasing sequence number for this context
+ *
+ * Initializes an allocated fence, the caller doesn't have to keep its
+ * refcount after committing with this fence, but it will need to hold a
+ * refcount again if &dma_fence_ops.enable_signaling gets called.
+ *
+ * context and seqno are used for easy comparison between fences, allowing
+ * to check which fence is later by simply using dma_fence_later().
+ */
+void
+dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+	       spinlock_t *lock, u64 context, unsigned seqno)
+{
+	BUG_ON(!lock);
+	BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
+
+	kref_init(&fence->refcount);
+	fence->ops = ops;
+	INIT_LIST_HEAD(&fence->cb_list);
+	fence->lock = lock;
+	fence->context = context;
+	fence->seqno = seqno;
+	fence->flags = 0UL;
+	fence->error = 0;
+
+	trace_dma_fence_init(fence);
+}
+EXPORT_SYMBOL(dma_fence_init);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
new file mode 100644
index 0000000..6c95f61
--- /dev/null
+++ b/drivers/dma-buf/reservation.c
@@ -0,0 +1,638 @@
+/*
+ * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
+ *
+ * Based on bo.c which bears the following copyright notice,
+ * but is dual licensed:
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <linux/reservation.h>
+#include <linux/export.h>
+
+/**
+ * DOC: Reservation Object Overview
+ *
+ * The reservation object provides a mechanism to manage shared and
+ * exclusive fences associated with a buffer.  A reservation object
+ * can have attached one exclusive fence (normally associated with
+ * write operations) or N shared fences (read operations).  The RCU
+ * mechanism is used to protect read access to fences from locked
+ * write-side updates.
+ */
+
+DEFINE_WD_CLASS(reservation_ww_class);
+EXPORT_SYMBOL(reservation_ww_class);
+
+struct lock_class_key reservation_seqcount_class;
+EXPORT_SYMBOL(reservation_seqcount_class);
+
+const char reservation_seqcount_string[] = "reservation_seqcount";
+EXPORT_SYMBOL(reservation_seqcount_string);
+
+/**
+ * reservation_object_reserve_shared - Reserve space to add a shared
+ * fence to a reservation_object.
+ * @obj: reservation object
+ *
+ * Should be called before reservation_object_add_shared_fence().  Must
+ * be called with obj->lock held.
+ *
+ * RETURNS
+ * Zero for success, or -errno
+ */
+int reservation_object_reserve_shared(struct reservation_object *obj)
+{
+	struct reservation_object_list *fobj, *old;
+	u32 max;
+
+	old = reservation_object_get_list(obj);
+
+	if (old && old->shared_max) {
+		if (old->shared_count < old->shared_max) {
+			/* perform an in-place update */
+			kfree(obj->staged);
+			obj->staged = NULL;
+			return 0;
+		} else
+			max = old->shared_max * 2;
+	} else
+		max = 4;
+
+	/*
+	 * resize obj->staged or allocate if it doesn't exist,
+	 * noop if already correct size
+	 */
+	fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
+			GFP_KERNEL);
+	if (!fobj)
+		return -ENOMEM;
+
+	obj->staged = fobj;
+	fobj->shared_max = max;
+	return 0;
+}
+EXPORT_SYMBOL(reservation_object_reserve_shared);
+
+static void
+reservation_object_add_shared_inplace(struct reservation_object *obj,
+				      struct reservation_object_list *fobj,
+				      struct dma_fence *fence)
+{
+	struct dma_fence *signaled = NULL;
+	u32 i, signaled_idx;
+
+	dma_fence_get(fence);
+
+	preempt_disable();
+	write_seqcount_begin(&obj->seq);
+
+	for (i = 0; i < fobj->shared_count; ++i) {
+		struct dma_fence *old_fence;
+
+		old_fence = rcu_dereference_protected(fobj->shared[i],
+						reservation_object_held(obj));
+
+		if (old_fence->context == fence->context) {
+			/* memory barrier is added by write_seqcount_begin */
+			RCU_INIT_POINTER(fobj->shared[i], fence);
+			write_seqcount_end(&obj->seq);
+			preempt_enable();
+
+			dma_fence_put(old_fence);
+			return;
+		}
+
+		if (!signaled && dma_fence_is_signaled(old_fence)) {
+			signaled = old_fence;
+			signaled_idx = i;
+		}
+	}
+
+	/*
+	 * memory barrier is added by write_seqcount_begin,
+	 * fobj->shared_count is protected by this lock too
+	 */
+	if (signaled) {
+		RCU_INIT_POINTER(fobj->shared[signaled_idx], fence);
+	} else {
+		BUG_ON(fobj->shared_count >= fobj->shared_max);
+		RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
+		fobj->shared_count++;
+	}
+
+	write_seqcount_end(&obj->seq);
+	preempt_enable();
+
+	dma_fence_put(signaled);
+}
+
+static void
+reservation_object_add_shared_replace(struct reservation_object *obj,
+				      struct reservation_object_list *old,
+				      struct reservation_object_list *fobj,
+				      struct dma_fence *fence)
+{
+	unsigned i, j, k;
+
+	dma_fence_get(fence);
+
+	if (!old) {
+		RCU_INIT_POINTER(fobj->shared[0], fence);
+		fobj->shared_count = 1;
+		goto done;
+	}
+
+	/*
+	 * no need to bump fence refcounts, rcu_read access
+	 * requires the use of kref_get_unless_zero, and the
+	 * references from the old struct are carried over to
+	 * the new.
+	 */
+	for (i = 0, j = 0, k = fobj->shared_max; i < old->shared_count; ++i) {
+		struct dma_fence *check;
+
+		check = rcu_dereference_protected(old->shared[i],
+						reservation_object_held(obj));
+
+		if (check->context == fence->context ||
+		    dma_fence_is_signaled(check))
+			RCU_INIT_POINTER(fobj->shared[--k], check);
+		else
+			RCU_INIT_POINTER(fobj->shared[j++], check);
+	}
+	fobj->shared_count = j;
+	RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
+	fobj->shared_count++;
+
+done:
+	preempt_disable();
+	write_seqcount_begin(&obj->seq);
+	/*
+	 * RCU_INIT_POINTER can be used here,
+	 * seqcount provides the necessary barriers
+	 */
+	RCU_INIT_POINTER(obj->fence, fobj);
+	write_seqcount_end(&obj->seq);
+	preempt_enable();
+
+	if (!old)
+		return;
+
+	/* Drop the references to the signaled fences */
+	for (i = k; i < fobj->shared_max; ++i) {
+		struct dma_fence *f;
+
+		f = rcu_dereference_protected(fobj->shared[i],
+					      reservation_object_held(obj));
+		dma_fence_put(f);
+	}
+	kfree_rcu(old, rcu);
+}
+
+/**
+ * reservation_object_add_shared_fence - Add a fence to a shared slot
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
+ * Add a fence to a shared slot, obj->lock must be held, and
+ * reservation_object_reserve_shared() has been called.
+ */
+void reservation_object_add_shared_fence(struct reservation_object *obj,
+					 struct dma_fence *fence)
+{
+	struct reservation_object_list *old, *fobj = obj->staged;
+
+	old = reservation_object_get_list(obj);
+	obj->staged = NULL;
+
+	if (!fobj)
+		reservation_object_add_shared_inplace(obj, old, fence);
+	else
+		reservation_object_add_shared_replace(obj, old, fobj, fence);
+}
+EXPORT_SYMBOL(reservation_object_add_shared_fence);
+
+/**
+ * reservation_object_add_excl_fence - Add an exclusive fence.
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
+ * Add a fence to the exclusive slot.  The obj->lock must be held.
+ */
+void reservation_object_add_excl_fence(struct reservation_object *obj,
+				       struct dma_fence *fence)
+{
+	struct dma_fence *old_fence = reservation_object_get_excl(obj);
+	struct reservation_object_list *old;
+	u32 i = 0;
+
+	old = reservation_object_get_list(obj);
+	if (old)
+		i = old->shared_count;
+
+	if (fence)
+		dma_fence_get(fence);
+
+	preempt_disable();
+	write_seqcount_begin(&obj->seq);
+	/* write_seqcount_begin provides the necessary memory barrier */
+	RCU_INIT_POINTER(obj->fence_excl, fence);
+	if (old)
+		old->shared_count = 0;
+	write_seqcount_end(&obj->seq);
+	preempt_enable();
+
+	/* inplace update, no shared fences */
+	while (i--)
+		dma_fence_put(rcu_dereference_protected(old->shared[i],
+						reservation_object_held(obj)));
+
+	dma_fence_put(old_fence);
+}
+EXPORT_SYMBOL(reservation_object_add_excl_fence);
+
+/**
+* reservation_object_copy_fences - Copy all fences from src to dst.
+* @dst: the destination reservation object
+* @src: the source reservation object
+*
+* Copy all fences from src to dst. dst-lock must be held.
+*/
+int reservation_object_copy_fences(struct reservation_object *dst,
+				   struct reservation_object *src)
+{
+	struct reservation_object_list *src_list, *dst_list;
+	struct dma_fence *old, *new;
+	size_t size;
+	unsigned i;
+
+	rcu_read_lock();
+	src_list = rcu_dereference(src->fence);
+
+retry:
+	if (src_list) {
+		unsigned shared_count = src_list->shared_count;
+
+		size = offsetof(typeof(*src_list), shared[shared_count]);
+		rcu_read_unlock();
+
+		dst_list = kmalloc(size, GFP_KERNEL);
+		if (!dst_list)
+			return -ENOMEM;
+
+		rcu_read_lock();
+		src_list = rcu_dereference(src->fence);
+		if (!src_list || src_list->shared_count > shared_count) {
+			kfree(dst_list);
+			goto retry;
+		}
+
+		dst_list->shared_count = 0;
+		dst_list->shared_max = shared_count;
+		for (i = 0; i < src_list->shared_count; ++i) {
+			struct dma_fence *fence;
+
+			fence = rcu_dereference(src_list->shared[i]);
+			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+				     &fence->flags))
+				continue;
+
+			if (!dma_fence_get_rcu(fence)) {
+				kfree(dst_list);
+				src_list = rcu_dereference(src->fence);
+				goto retry;
+			}
+
+			if (dma_fence_is_signaled(fence)) {
+				dma_fence_put(fence);
+				continue;
+			}
+
+			rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
+		}
+	} else {
+		dst_list = NULL;
+	}
+
+	new = dma_fence_get_rcu_safe(&src->fence_excl);
+	rcu_read_unlock();
+
+	kfree(dst->staged);
+	dst->staged = NULL;
+
+	src_list = reservation_object_get_list(dst);
+	old = reservation_object_get_excl(dst);
+
+	preempt_disable();
+	write_seqcount_begin(&dst->seq);
+	/* write_seqcount_begin provides the necessary memory barrier */
+	RCU_INIT_POINTER(dst->fence_excl, new);
+	RCU_INIT_POINTER(dst->fence, dst_list);
+	write_seqcount_end(&dst->seq);
+	preempt_enable();
+
+	if (src_list)
+		kfree_rcu(src_list, rcu);
+	dma_fence_put(old);
+
+	return 0;
+}
+EXPORT_SYMBOL(reservation_object_copy_fences);
+
+/**
+ * reservation_object_get_fences_rcu - Get an object's shared and exclusive
+ * fences without update side lock held
+ * @obj: the reservation object
+ * @pfence_excl: the returned exclusive fence (or NULL)
+ * @pshared_count: the number of shared fences returned
+ * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * the required size, and must be freed by caller)
+ *
+ * Retrieve all fences from the reservation object. If the pointer for the
+ * exclusive fence is not specified the fence is put into the array of the
+ * shared fences as well. Returns either zero or -ENOMEM.
+ */
+int reservation_object_get_fences_rcu(struct reservation_object *obj,
+				      struct dma_fence **pfence_excl,
+				      unsigned *pshared_count,
+				      struct dma_fence ***pshared)
+{
+	struct dma_fence **shared = NULL;
+	struct dma_fence *fence_excl;
+	unsigned int shared_count;
+	int ret = 1;
+
+	do {
+		struct reservation_object_list *fobj;
+		unsigned int i, seq;
+		size_t sz = 0;
+
+		shared_count = i = 0;
+
+		rcu_read_lock();
+		seq = read_seqcount_begin(&obj->seq);
+
+		fence_excl = rcu_dereference(obj->fence_excl);
+		if (fence_excl && !dma_fence_get_rcu(fence_excl))
+			goto unlock;
+
+		fobj = rcu_dereference(obj->fence);
+		if (fobj)
+			sz += sizeof(*shared) * fobj->shared_max;
+
+		if (!pfence_excl && fence_excl)
+			sz += sizeof(*shared);
+
+		if (sz) {
+			struct dma_fence **nshared;
+
+			nshared = krealloc(shared, sz,
+					   GFP_NOWAIT | __GFP_NOWARN);
+			if (!nshared) {
+				rcu_read_unlock();
+				nshared = krealloc(shared, sz, GFP_KERNEL);
+				if (nshared) {
+					shared = nshared;
+					continue;
+				}
+
+				ret = -ENOMEM;
+				break;
+			}
+			shared = nshared;
+			shared_count = fobj ? fobj->shared_count : 0;
+			for (i = 0; i < shared_count; ++i) {
+				shared[i] = rcu_dereference(fobj->shared[i]);
+				if (!dma_fence_get_rcu(shared[i]))
+					break;
+			}
+
+			if (!pfence_excl && fence_excl) {
+				shared[i] = fence_excl;
+				fence_excl = NULL;
+				++i;
+				++shared_count;
+			}
+		}
+
+		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
+			while (i--)
+				dma_fence_put(shared[i]);
+			dma_fence_put(fence_excl);
+			goto unlock;
+		}
+
+		ret = 0;
+unlock:
+		rcu_read_unlock();
+	} while (ret);
+
+	if (!shared_count) {
+		kfree(shared);
+		shared = NULL;
+	}
+
+	*pshared_count = shared_count;
+	*pshared = shared;
+	if (pfence_excl)
+		*pfence_excl = fence_excl;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
+
+/**
+ * reservation_object_wait_timeout_rcu - Wait on reservation's objects
+ * shared and/or exclusive fences.
+ * @obj: the reservation object
+ * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @intr: if true, do interruptible wait
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * RETURNS
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than zer on success.
+ */
+long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
+					 bool wait_all, bool intr,
+					 unsigned long timeout)
+{
+	struct dma_fence *fence;
+	unsigned seq, shared_count;
+	long ret = timeout ? timeout : 1;
+	int i;
+
+retry:
+	shared_count = 0;
+	seq = read_seqcount_begin(&obj->seq);
+	rcu_read_lock();
+	i = -1;
+
+	fence = rcu_dereference(obj->fence_excl);
+	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+		if (!dma_fence_get_rcu(fence))
+			goto unlock_retry;
+
+		if (dma_fence_is_signaled(fence)) {
+			dma_fence_put(fence);
+			fence = NULL;
+		}
+
+	} else {
+		fence = NULL;
+	}
+
+	if (wait_all) {
+		struct reservation_object_list *fobj =
+						rcu_dereference(obj->fence);
+
+		if (fobj)
+			shared_count = fobj->shared_count;
+
+		for (i = 0; !fence && i < shared_count; ++i) {
+			struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
+
+			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+				     &lfence->flags))
+				continue;
+
+			if (!dma_fence_get_rcu(lfence))
+				goto unlock_retry;
+
+			if (dma_fence_is_signaled(lfence)) {
+				dma_fence_put(lfence);
+				continue;
+			}
+
+			fence = lfence;
+			break;
+		}
+	}
+
+	rcu_read_unlock();
+	if (fence) {
+		if (read_seqcount_retry(&obj->seq, seq)) {
+			dma_fence_put(fence);
+			goto retry;
+		}
+
+		ret = dma_fence_wait_timeout(fence, intr, ret);
+		dma_fence_put(fence);
+		if (ret > 0 && wait_all && (i + 1 < shared_count))
+			goto retry;
+	}
+	return ret;
+
+unlock_retry:
+	rcu_read_unlock();
+	goto retry;
+}
+EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
+
+
+static inline int
+reservation_object_test_signaled_single(struct dma_fence *passed_fence)
+{
+	struct dma_fence *fence, *lfence = passed_fence;
+	int ret = 1;
+
+	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
+		fence = dma_fence_get_rcu(lfence);
+		if (!fence)
+			return -1;
+
+		ret = !!dma_fence_is_signaled(fence);
+		dma_fence_put(fence);
+	}
+	return ret;
+}
+
+/**
+ * reservation_object_test_signaled_rcu - Test if a reservation object's
+ * fences have been signaled.
+ * @obj: the reservation object
+ * @test_all: if true, test all fences, otherwise only test the exclusive
+ * fence
+ *
+ * RETURNS
+ * true if all fences signaled, else false
+ */
+bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
+					  bool test_all)
+{
+	unsigned seq, shared_count;
+	int ret;
+
+	rcu_read_lock();
+retry:
+	ret = true;
+	shared_count = 0;
+	seq = read_seqcount_begin(&obj->seq);
+
+	if (test_all) {
+		unsigned i;
+
+		struct reservation_object_list *fobj =
+						rcu_dereference(obj->fence);
+
+		if (fobj)
+			shared_count = fobj->shared_count;
+
+		for (i = 0; i < shared_count; ++i) {
+			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
+
+			ret = reservation_object_test_signaled_single(fence);
+			if (ret < 0)
+				goto retry;
+			else if (!ret)
+				break;
+		}
+
+		if (read_seqcount_retry(&obj->seq, seq))
+			goto retry;
+	}
+
+	if (!shared_count) {
+		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
+
+		if (fence_excl) {
+			ret = reservation_object_test_signaled_single(
+								fence_excl);
+			if (ret < 0)
+				goto retry;
+
+			if (read_seqcount_retry(&obj->seq, seq))
+				goto retry;
+		}
+	}
+
+	rcu_read_unlock();
+	return ret;
+}
+EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c
new file mode 100644
index 0000000..f47112a
--- /dev/null
+++ b/drivers/dma-buf/seqno-fence.c
@@ -0,0 +1,79 @@
+/*
+ * seqno-fence, using a dma-buf to synchronize fencing
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Copyright (C) 2012-2014 Canonical Ltd
+ * Authors:
+ *   Rob Clark <robdclark@gmail.com>
+ *   Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/seqno-fence.h>
+
+static const char *seqno_fence_get_driver_name(struct dma_fence *fence)
+{
+	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
+
+	return seqno_fence->ops->get_driver_name(fence);
+}
+
+static const char *seqno_fence_get_timeline_name(struct dma_fence *fence)
+{
+	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
+
+	return seqno_fence->ops->get_timeline_name(fence);
+}
+
+static bool seqno_enable_signaling(struct dma_fence *fence)
+{
+	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
+
+	return seqno_fence->ops->enable_signaling(fence);
+}
+
+static bool seqno_signaled(struct dma_fence *fence)
+{
+	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
+
+	return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
+}
+
+static void seqno_release(struct dma_fence *fence)
+{
+	struct seqno_fence *f = to_seqno_fence(fence);
+
+	dma_buf_put(f->sync_buf);
+	if (f->ops->release)
+		f->ops->release(fence);
+	else
+		dma_fence_free(&f->base);
+}
+
+static signed long seqno_wait(struct dma_fence *fence, bool intr,
+			      signed long timeout)
+{
+	struct seqno_fence *f = to_seqno_fence(fence);
+
+	return f->ops->wait(fence, intr, timeout);
+}
+
+const struct dma_fence_ops seqno_fence_ops = {
+	.get_driver_name = seqno_fence_get_driver_name,
+	.get_timeline_name = seqno_fence_get_timeline_name,
+	.enable_signaling = seqno_enable_signaling,
+	.signaled = seqno_signaled,
+	.wait = seqno_wait,
+	.release = seqno_release,
+};
+EXPORT_SYMBOL(seqno_fence_ops);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
new file mode 100644
index 0000000..53c1d6d
--- /dev/null
+++ b/drivers/dma-buf/sw_sync.c
@@ -0,0 +1,423 @@
+/*
+ * Sync File validation framework
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/sync_file.h>
+
+#include "sync_debug.h"
+
+#define CREATE_TRACE_POINTS
+#include "sync_trace.h"
+
+/*
+ * SW SYNC validation framework
+ *
+ * A sync object driver that uses a 32bit counter to coordinate
+ * synchronization.  Useful when there is no hardware primitive backing
+ * the synchronization.
+ *
+ * To start the framework just open:
+ *
+ * <debugfs>/sync/sw_sync
+ *
+ * That will create a sync timeline, all fences created under this timeline
+ * file descriptor will belong to the this timeline.
+ *
+ * The 'sw_sync' file can be opened many times as to create different
+ * timelines.
+ *
+ * Fences can be created with SW_SYNC_IOC_CREATE_FENCE ioctl with struct
+ * sw_sync_create_fence_data as parameter.
+ *
+ * To increment the timeline counter, SW_SYNC_IOC_INC ioctl should be used
+ * with the increment as u32. This will update the last signaled value
+ * from the timeline and signal any fence that has a seqno smaller or equal
+ * to it.
+ *
+ * struct sw_sync_create_fence_data
+ * @value:	the seqno to initialise the fence with
+ * @name:	the name of the new sync point
+ * @fence:	return the fd of the new sync_file with the created fence
+ */
+struct sw_sync_create_fence_data {
+	__u32	value;
+	char	name[32];
+	__s32	fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC	'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE	_IOWR(SW_SYNC_IOC_MAGIC, 0,\
+		struct sw_sync_create_fence_data)
+
+#define SW_SYNC_IOC_INC			_IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+static const struct dma_fence_ops timeline_fence_ops;
+
+static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence)
+{
+	if (fence->ops != &timeline_fence_ops)
+		return NULL;
+	return container_of(fence, struct sync_pt, base);
+}
+
+/**
+ * sync_timeline_create() - creates a sync object
+ * @name:	sync_timeline name
+ *
+ * Creates a new sync_timeline. Returns the sync_timeline object or NULL in
+ * case of error.
+ */
+static struct sync_timeline *sync_timeline_create(const char *name)
+{
+	struct sync_timeline *obj;
+
+	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+	if (!obj)
+		return NULL;
+
+	kref_init(&obj->kref);
+	obj->context = dma_fence_context_alloc(1);
+	strlcpy(obj->name, name, sizeof(obj->name));
+
+	obj->pt_tree = RB_ROOT;
+	INIT_LIST_HEAD(&obj->pt_list);
+	spin_lock_init(&obj->lock);
+
+	sync_timeline_debug_add(obj);
+
+	return obj;
+}
+
+static void sync_timeline_free(struct kref *kref)
+{
+	struct sync_timeline *obj =
+		container_of(kref, struct sync_timeline, kref);
+
+	sync_timeline_debug_remove(obj);
+
+	kfree(obj);
+}
+
+static void sync_timeline_get(struct sync_timeline *obj)
+{
+	kref_get(&obj->kref);
+}
+
+static void sync_timeline_put(struct sync_timeline *obj)
+{
+	kref_put(&obj->kref, sync_timeline_free);
+}
+
+static const char *timeline_fence_get_driver_name(struct dma_fence *fence)
+{
+	return "sw_sync";
+}
+
+static const char *timeline_fence_get_timeline_name(struct dma_fence *fence)
+{
+	struct sync_timeline *parent = dma_fence_parent(fence);
+
+	return parent->name;
+}
+
+static void timeline_fence_release(struct dma_fence *fence)
+{
+	struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+	struct sync_timeline *parent = dma_fence_parent(fence);
+
+	if (!list_empty(&pt->link)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(fence->lock, flags);
+		if (!list_empty(&pt->link)) {
+			list_del(&pt->link);
+			rb_erase(&pt->node, &parent->pt_tree);
+		}
+		spin_unlock_irqrestore(fence->lock, flags);
+	}
+
+	sync_timeline_put(parent);
+	dma_fence_free(fence);
+}
+
+static bool timeline_fence_signaled(struct dma_fence *fence)
+{
+	struct sync_timeline *parent = dma_fence_parent(fence);
+
+	return !__dma_fence_is_later(fence->seqno, parent->value);
+}
+
+static bool timeline_fence_enable_signaling(struct dma_fence *fence)
+{
+	return true;
+}
+
+static void timeline_fence_value_str(struct dma_fence *fence,
+				    char *str, int size)
+{
+	snprintf(str, size, "%d", fence->seqno);
+}
+
+static void timeline_fence_timeline_value_str(struct dma_fence *fence,
+					     char *str, int size)
+{
+	struct sync_timeline *parent = dma_fence_parent(fence);
+
+	snprintf(str, size, "%d", parent->value);
+}
+
+static const struct dma_fence_ops timeline_fence_ops = {
+	.get_driver_name = timeline_fence_get_driver_name,
+	.get_timeline_name = timeline_fence_get_timeline_name,
+	.enable_signaling = timeline_fence_enable_signaling,
+	.signaled = timeline_fence_signaled,
+	.release = timeline_fence_release,
+	.fence_value_str = timeline_fence_value_str,
+	.timeline_value_str = timeline_fence_timeline_value_str,
+};
+
+/**
+ * sync_timeline_signal() - signal a status change on a sync_timeline
+ * @obj:	sync_timeline to signal
+ * @inc:	num to increment on timeline->value
+ *
+ * A sync implementation should call this any time one of it's fences
+ * has signaled or has an error condition.
+ */
+static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+{
+	struct sync_pt *pt, *next;
+
+	trace_sync_timeline(obj);
+
+	spin_lock_irq(&obj->lock);
+
+	obj->value += inc;
+
+	list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
+		if (!timeline_fence_signaled(&pt->base))
+			break;
+
+		list_del_init(&pt->link);
+		rb_erase(&pt->node, &obj->pt_tree);
+
+		/*
+		 * A signal callback may release the last reference to this
+		 * fence, causing it to be freed. That operation has to be
+		 * last to avoid a use after free inside this loop, and must
+		 * be after we remove the fence from the timeline in order to
+		 * prevent deadlocking on timeline->lock inside
+		 * timeline_fence_release().
+		 */
+		dma_fence_signal_locked(&pt->base);
+	}
+
+	spin_unlock_irq(&obj->lock);
+}
+
+/**
+ * sync_pt_create() - creates a sync pt
+ * @obj:	parent sync_timeline
+ * @value:	value of the fence
+ *
+ * Creates a new sync_pt (fence) as a child of @parent.  @size bytes will be
+ * allocated allowing for implementation specific data to be kept after
+ * the generic sync_timeline struct. Returns the sync_pt object or
+ * NULL in case of error.
+ */
+static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
+				      unsigned int value)
+{
+	struct sync_pt *pt;
+
+	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+	if (!pt)
+		return NULL;
+
+	sync_timeline_get(obj);
+	dma_fence_init(&pt->base, &timeline_fence_ops, &obj->lock,
+		       obj->context, value);
+	INIT_LIST_HEAD(&pt->link);
+
+	spin_lock_irq(&obj->lock);
+	if (!dma_fence_is_signaled_locked(&pt->base)) {
+		struct rb_node **p = &obj->pt_tree.rb_node;
+		struct rb_node *parent = NULL;
+
+		while (*p) {
+			struct sync_pt *other;
+			int cmp;
+
+			parent = *p;
+			other = rb_entry(parent, typeof(*pt), node);
+			cmp = value - other->base.seqno;
+			if (cmp > 0) {
+				p = &parent->rb_right;
+			} else if (cmp < 0) {
+				p = &parent->rb_left;
+			} else {
+				if (dma_fence_get_rcu(&other->base)) {
+					dma_fence_put(&pt->base);
+					pt = other;
+					goto unlock;
+				}
+				p = &parent->rb_left;
+			}
+		}
+		rb_link_node(&pt->node, parent, p);
+		rb_insert_color(&pt->node, &obj->pt_tree);
+
+		parent = rb_next(&pt->node);
+		list_add_tail(&pt->link,
+			      parent ? &rb_entry(parent, typeof(*pt), node)->link : &obj->pt_list);
+	}
+unlock:
+	spin_unlock_irq(&obj->lock);
+
+	return pt;
+}
+
+/*
+ * *WARNING*
+ *
+ * improper use of this can result in deadlocking kernel drivers from userspace.
+ */
+
+/* opening sw_sync create a new sync obj */
+static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct sync_timeline *obj;
+	char task_comm[TASK_COMM_LEN];
+
+	get_task_comm(task_comm, current);
+
+	obj = sync_timeline_create(task_comm);
+	if (!obj)
+		return -ENOMEM;
+
+	file->private_data = obj;
+
+	return 0;
+}
+
+static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
+{
+	struct sync_timeline *obj = file->private_data;
+	struct sync_pt *pt, *next;
+
+	spin_lock_irq(&obj->lock);
+
+	list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
+		dma_fence_set_error(&pt->base, -ENOENT);
+		dma_fence_signal_locked(&pt->base);
+	}
+
+	spin_unlock_irq(&obj->lock);
+
+	sync_timeline_put(obj);
+	return 0;
+}
+
+static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
+				       unsigned long arg)
+{
+	int fd = get_unused_fd_flags(O_CLOEXEC);
+	int err;
+	struct sync_pt *pt;
+	struct sync_file *sync_file;
+	struct sw_sync_create_fence_data data;
+
+	if (fd < 0)
+		return fd;
+
+	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+		err = -EFAULT;
+		goto err;
+	}
+
+	pt = sync_pt_create(obj, data.value);
+	if (!pt) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	sync_file = sync_file_create(&pt->base);
+	dma_fence_put(&pt->base);
+	if (!sync_file) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	data.fence = fd;
+	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+		fput(sync_file->file);
+		err = -EFAULT;
+		goto err;
+	}
+
+	fd_install(fd, sync_file->file);
+
+	return 0;
+
+err:
+	put_unused_fd(fd);
+	return err;
+}
+
+static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
+{
+	u32 value;
+
+	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+		return -EFAULT;
+
+	while (value > INT_MAX)  {
+		sync_timeline_signal(obj, INT_MAX);
+		value -= INT_MAX;
+	}
+
+	sync_timeline_signal(obj, value);
+
+	return 0;
+}
+
+static long sw_sync_ioctl(struct file *file, unsigned int cmd,
+			  unsigned long arg)
+{
+	struct sync_timeline *obj = file->private_data;
+
+	switch (cmd) {
+	case SW_SYNC_IOC_CREATE_FENCE:
+		return sw_sync_ioctl_create_fence(obj, arg);
+
+	case SW_SYNC_IOC_INC:
+		return sw_sync_ioctl_inc(obj, arg);
+
+	default:
+		return -ENOTTY;
+	}
+}
+
+const struct file_operations sw_sync_debugfs_fops = {
+	.open           = sw_sync_debugfs_open,
+	.release        = sw_sync_debugfs_release,
+	.unlocked_ioctl = sw_sync_ioctl,
+	.compat_ioctl	= sw_sync_ioctl,
+};
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
new file mode 100644
index 0000000..c4c8ecb
--- /dev/null
+++ b/drivers/dma-buf/sync_debug.c
@@ -0,0 +1,235 @@
+/*
+ * Sync File validation framework and debug information
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include "sync_debug.h"
+
+static struct dentry *dbgfs;
+
+static LIST_HEAD(sync_timeline_list_head);
+static DEFINE_SPINLOCK(sync_timeline_list_lock);
+static LIST_HEAD(sync_file_list_head);
+static DEFINE_SPINLOCK(sync_file_list_lock);
+
+void sync_timeline_debug_add(struct sync_timeline *obj)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sync_timeline_list_lock, flags);
+	list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
+	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+}
+
+void sync_timeline_debug_remove(struct sync_timeline *obj)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sync_timeline_list_lock, flags);
+	list_del(&obj->sync_timeline_list);
+	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+}
+
+void sync_file_debug_add(struct sync_file *sync_file)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sync_file_list_lock, flags);
+	list_add_tail(&sync_file->sync_file_list, &sync_file_list_head);
+	spin_unlock_irqrestore(&sync_file_list_lock, flags);
+}
+
+void sync_file_debug_remove(struct sync_file *sync_file)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sync_file_list_lock, flags);
+	list_del(&sync_file->sync_file_list);
+	spin_unlock_irqrestore(&sync_file_list_lock, flags);
+}
+
+static const char *sync_status_str(int status)
+{
+	if (status < 0)
+		return "error";
+
+	if (status > 0)
+		return "signaled";
+
+	return "active";
+}
+
+static void sync_print_fence(struct seq_file *s,
+			     struct dma_fence *fence, bool show)
+{
+	struct sync_timeline *parent = dma_fence_parent(fence);
+	int status;
+
+	status = dma_fence_get_status_locked(fence);
+
+	seq_printf(s, "  %s%sfence %s",
+		   show ? parent->name : "",
+		   show ? "_" : "",
+		   sync_status_str(status));
+
+	if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) {
+		struct timespec64 ts64 =
+			ktime_to_timespec64(fence->timestamp);
+
+		seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
+	}
+
+	if (fence->ops->timeline_value_str &&
+		fence->ops->fence_value_str) {
+		char value[64];
+		bool success;
+
+		fence->ops->fence_value_str(fence, value, sizeof(value));
+		success = strlen(value);
+
+		if (success) {
+			seq_printf(s, ": %s", value);
+
+			fence->ops->timeline_value_str(fence, value,
+						       sizeof(value));
+
+			if (strlen(value))
+				seq_printf(s, " / %s", value);
+		}
+	}
+
+	seq_putc(s, '\n');
+}
+
+static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
+{
+	struct list_head *pos;
+
+	seq_printf(s, "%s: %d\n", obj->name, obj->value);
+
+	spin_lock_irq(&obj->lock);
+	list_for_each(pos, &obj->pt_list) {
+		struct sync_pt *pt = container_of(pos, struct sync_pt, link);
+		sync_print_fence(s, &pt->base, false);
+	}
+	spin_unlock_irq(&obj->lock);
+}
+
+static void sync_print_sync_file(struct seq_file *s,
+				  struct sync_file *sync_file)
+{
+	char buf[128];
+	int i;
+
+	seq_printf(s, "[%p] %s: %s\n", sync_file,
+		   sync_file_get_name(sync_file, buf, sizeof(buf)),
+		   sync_status_str(dma_fence_get_status(sync_file->fence)));
+
+	if (dma_fence_is_array(sync_file->fence)) {
+		struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
+
+		for (i = 0; i < array->num_fences; ++i)
+			sync_print_fence(s, array->fences[i], true);
+	} else {
+		sync_print_fence(s, sync_file->fence, true);
+	}
+}
+
+static int sync_debugfs_show(struct seq_file *s, void *unused)
+{
+	struct list_head *pos;
+
+	seq_puts(s, "objs:\n--------------\n");
+
+	spin_lock_irq(&sync_timeline_list_lock);
+	list_for_each(pos, &sync_timeline_list_head) {
+		struct sync_timeline *obj =
+			container_of(pos, struct sync_timeline,
+				     sync_timeline_list);
+
+		sync_print_obj(s, obj);
+		seq_putc(s, '\n');
+	}
+	spin_unlock_irq(&sync_timeline_list_lock);
+
+	seq_puts(s, "fences:\n--------------\n");
+
+	spin_lock_irq(&sync_file_list_lock);
+	list_for_each(pos, &sync_file_list_head) {
+		struct sync_file *sync_file =
+			container_of(pos, struct sync_file, sync_file_list);
+
+		sync_print_sync_file(s, sync_file);
+		seq_putc(s, '\n');
+	}
+	spin_unlock_irq(&sync_file_list_lock);
+	return 0;
+}
+
+static int sync_info_debugfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, sync_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations sync_info_debugfs_fops = {
+	.open           = sync_info_debugfs_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+
+static __init int sync_debugfs_init(void)
+{
+	dbgfs = debugfs_create_dir("sync", NULL);
+
+	/*
+	 * The debugfs files won't ever get removed and thus, there is
+	 * no need to protect it against removal races. The use of
+	 * debugfs_create_file_unsafe() is actually safe here.
+	 */
+	debugfs_create_file_unsafe("info", 0444, dbgfs, NULL,
+				   &sync_info_debugfs_fops);
+	debugfs_create_file_unsafe("sw_sync", 0644, dbgfs, NULL,
+				   &sw_sync_debugfs_fops);
+
+	return 0;
+}
+late_initcall(sync_debugfs_init);
+
+#define DUMP_CHUNK 256
+static char sync_dump_buf[64 * 1024];
+void sync_dump(void)
+{
+	struct seq_file s = {
+		.buf = sync_dump_buf,
+		.size = sizeof(sync_dump_buf) - 1,
+	};
+	int i;
+
+	sync_debugfs_show(&s, NULL);
+
+	for (i = 0; i < s.count; i += DUMP_CHUNK) {
+		if ((s.count - i) > DUMP_CHUNK) {
+			char c = s.buf[i + DUMP_CHUNK];
+
+			s.buf[i + DUMP_CHUNK] = 0;
+			pr_cont("%s", s.buf + i);
+			s.buf[i + DUMP_CHUNK] = c;
+		} else {
+			s.buf[s.count] = 0;
+			pr_cont("%s", s.buf + i);
+		}
+	}
+}
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
new file mode 100644
index 0000000..05e33f9
--- /dev/null
+++ b/drivers/dma-buf/sync_debug.h
@@ -0,0 +1,73 @@
+/*
+ * Sync File validation framework and debug infomation
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SYNC_H
+#define _LINUX_SYNC_H
+
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/dma-fence.h>
+
+#include <linux/sync_file.h>
+#include <uapi/linux/sync_file.h>
+
+/**
+ * struct sync_timeline - sync object
+ * @kref:		reference count on fence.
+ * @name:		name of the sync_timeline. Useful for debugging
+ * @lock:		lock protecting @pt_list and @value
+ * @pt_tree:		rbtree of active (unsignaled/errored) sync_pts
+ * @pt_list:		list of active (unsignaled/errored) sync_pts
+ * @sync_timeline_list:	membership in global sync_timeline_list
+ */
+struct sync_timeline {
+	struct kref		kref;
+	char			name[32];
+
+	/* protected by lock */
+	u64			context;
+	int			value;
+
+	struct rb_root		pt_tree;
+	struct list_head	pt_list;
+	spinlock_t		lock;
+
+	struct list_head	sync_timeline_list;
+};
+
+static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
+{
+	return container_of(fence->lock, struct sync_timeline, lock);
+}
+
+/**
+ * struct sync_pt - sync_pt object
+ * @base: base fence object
+ * @link: link on the sync timeline's list
+ * @node: node in the sync timeline's tree
+ */
+struct sync_pt {
+	struct dma_fence base;
+	struct list_head link;
+	struct rb_node node;
+};
+
+extern const struct file_operations sw_sync_debugfs_fops;
+
+void sync_timeline_debug_add(struct sync_timeline *obj);
+void sync_timeline_debug_remove(struct sync_timeline *obj);
+void sync_file_debug_add(struct sync_file *fence);
+void sync_file_debug_remove(struct sync_file *fence);
+void sync_dump(void);
+
+#endif /* _LINUX_SYNC_H */
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
new file mode 100644
index 0000000..35dd064
--- /dev/null
+++ b/drivers/dma-buf/sync_file.c
@@ -0,0 +1,492 @@
+/*
+ * drivers/dma-buf/sync_file.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/anon_inodes.h>
+#include <linux/sync_file.h>
+#include <uapi/linux/sync_file.h>
+
+static const struct file_operations sync_file_fops;
+
+static struct sync_file *sync_file_alloc(void)
+{
+	struct sync_file *sync_file;
+
+	sync_file = kzalloc(sizeof(*sync_file), GFP_KERNEL);
+	if (!sync_file)
+		return NULL;
+
+	sync_file->file = anon_inode_getfile("sync_file", &sync_file_fops,
+					     sync_file, 0);
+	if (IS_ERR(sync_file->file))
+		goto err;
+
+	init_waitqueue_head(&sync_file->wq);
+
+	INIT_LIST_HEAD(&sync_file->cb.node);
+
+	return sync_file;
+
+err:
+	kfree(sync_file);
+	return NULL;
+}
+
+static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+	struct sync_file *sync_file;
+
+	sync_file = container_of(cb, struct sync_file, cb);
+
+	wake_up_all(&sync_file->wq);
+}
+
+/**
+ * sync_file_create() - creates a sync file
+ * @fence:	fence to add to the sync_fence
+ *
+ * Creates a sync_file containg @fence. This function acquires and additional
+ * reference of @fence for the newly-created &sync_file, if it succeeds. The
+ * sync_file can be released with fput(sync_file->file). Returns the
+ * sync_file or NULL in case of error.
+ */
+struct sync_file *sync_file_create(struct dma_fence *fence)
+{
+	struct sync_file *sync_file;
+
+	sync_file = sync_file_alloc();
+	if (!sync_file)
+		return NULL;
+
+	sync_file->fence = dma_fence_get(fence);
+
+	return sync_file;
+}
+EXPORT_SYMBOL(sync_file_create);
+
+static struct sync_file *sync_file_fdget(int fd)
+{
+	struct file *file = fget(fd);
+
+	if (!file)
+		return NULL;
+
+	if (file->f_op != &sync_file_fops)
+		goto err;
+
+	return file->private_data;
+
+err:
+	fput(file);
+	return NULL;
+}
+
+/**
+ * sync_file_get_fence - get the fence related to the sync_file fd
+ * @fd:		sync_file fd to get the fence from
+ *
+ * Ensures @fd references a valid sync_file and returns a fence that
+ * represents all fence in the sync_file. On error NULL is returned.
+ */
+struct dma_fence *sync_file_get_fence(int fd)
+{
+	struct sync_file *sync_file;
+	struct dma_fence *fence;
+
+	sync_file = sync_file_fdget(fd);
+	if (!sync_file)
+		return NULL;
+
+	fence = dma_fence_get(sync_file->fence);
+	fput(sync_file->file);
+
+	return fence;
+}
+EXPORT_SYMBOL(sync_file_get_fence);
+
+/**
+ * sync_file_get_name - get the name of the sync_file
+ * @sync_file:		sync_file to get the fence from
+ * @buf:		destination buffer to copy sync_file name into
+ * @len:		available size of destination buffer.
+ *
+ * Each sync_file may have a name assigned either by the user (when merging
+ * sync_files together) or created from the fence it contains. In the latter
+ * case construction of the name is deferred until use, and so requires
+ * sync_file_get_name().
+ *
+ * Returns: a string representing the name.
+ */
+char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len)
+{
+	if (sync_file->user_name[0]) {
+		strlcpy(buf, sync_file->user_name, len);
+	} else {
+		struct dma_fence *fence = sync_file->fence;
+
+		snprintf(buf, len, "%s-%s%llu-%d",
+			 fence->ops->get_driver_name(fence),
+			 fence->ops->get_timeline_name(fence),
+			 fence->context,
+			 fence->seqno);
+	}
+
+	return buf;
+}
+
+static int sync_file_set_fence(struct sync_file *sync_file,
+			       struct dma_fence **fences, int num_fences)
+{
+	struct dma_fence_array *array;
+
+	/*
+	 * The reference for the fences in the new sync_file and held
+	 * in add_fence() during the merge procedure, so for num_fences == 1
+	 * we already own a new reference to the fence. For num_fence > 1
+	 * we own the reference of the dma_fence_array creation.
+	 */
+	if (num_fences == 1) {
+		sync_file->fence = fences[0];
+		kfree(fences);
+	} else {
+		array = dma_fence_array_create(num_fences, fences,
+					       dma_fence_context_alloc(1),
+					       1, false);
+		if (!array)
+			return -ENOMEM;
+
+		sync_file->fence = &array->base;
+	}
+
+	return 0;
+}
+
+static struct dma_fence **get_fences(struct sync_file *sync_file,
+				     int *num_fences)
+{
+	if (dma_fence_is_array(sync_file->fence)) {
+		struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
+
+		*num_fences = array->num_fences;
+		return array->fences;
+	}
+
+	*num_fences = 1;
+	return &sync_file->fence;
+}
+
+static void add_fence(struct dma_fence **fences,
+		      int *i, struct dma_fence *fence)
+{
+	fences[*i] = fence;
+
+	if (!dma_fence_is_signaled(fence)) {
+		dma_fence_get(fence);
+		(*i)++;
+	}
+}
+
+/**
+ * sync_file_merge() - merge two sync_files
+ * @name:	name of new fence
+ * @a:		sync_file a
+ * @b:		sync_file b
+ *
+ * Creates a new sync_file which contains copies of all the fences in both
+ * @a and @b.  @a and @b remain valid, independent sync_file. Returns the
+ * new merged sync_file or NULL in case of error.
+ */
+static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
+					 struct sync_file *b)
+{
+	struct sync_file *sync_file;
+	struct dma_fence **fences, **nfences, **a_fences, **b_fences;
+	int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
+
+	sync_file = sync_file_alloc();
+	if (!sync_file)
+		return NULL;
+
+	a_fences = get_fences(a, &a_num_fences);
+	b_fences = get_fences(b, &b_num_fences);
+	if (a_num_fences > INT_MAX - b_num_fences)
+		return NULL;
+
+	num_fences = a_num_fences + b_num_fences;
+
+	fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
+	if (!fences)
+		goto err;
+
+	/*
+	 * Assume sync_file a and b are both ordered and have no
+	 * duplicates with the same context.
+	 *
+	 * If a sync_file can only be created with sync_file_merge
+	 * and sync_file_create, this is a reasonable assumption.
+	 */
+	for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
+		struct dma_fence *pt_a = a_fences[i_a];
+		struct dma_fence *pt_b = b_fences[i_b];
+
+		if (pt_a->context < pt_b->context) {
+			add_fence(fences, &i, pt_a);
+
+			i_a++;
+		} else if (pt_a->context > pt_b->context) {
+			add_fence(fences, &i, pt_b);
+
+			i_b++;
+		} else {
+			if (pt_a->seqno - pt_b->seqno <= INT_MAX)
+				add_fence(fences, &i, pt_a);
+			else
+				add_fence(fences, &i, pt_b);
+
+			i_a++;
+			i_b++;
+		}
+	}
+
+	for (; i_a < a_num_fences; i_a++)
+		add_fence(fences, &i, a_fences[i_a]);
+
+	for (; i_b < b_num_fences; i_b++)
+		add_fence(fences, &i, b_fences[i_b]);
+
+	if (i == 0)
+		fences[i++] = dma_fence_get(a_fences[0]);
+
+	if (num_fences > i) {
+		nfences = krealloc(fences, i * sizeof(*fences),
+				  GFP_KERNEL);
+		if (!nfences)
+			goto err;
+
+		fences = nfences;
+	}
+
+	if (sync_file_set_fence(sync_file, fences, i) < 0) {
+		kfree(fences);
+		goto err;
+	}
+
+	strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
+	return sync_file;
+
+err:
+	fput(sync_file->file);
+	return NULL;
+
+}
+
+static int sync_file_release(struct inode *inode, struct file *file)
+{
+	struct sync_file *sync_file = file->private_data;
+
+	if (test_bit(POLL_ENABLED, &sync_file->flags))
+		dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
+	dma_fence_put(sync_file->fence);
+	kfree(sync_file);
+
+	return 0;
+}
+
+static __poll_t sync_file_poll(struct file *file, poll_table *wait)
+{
+	struct sync_file *sync_file = file->private_data;
+
+	poll_wait(file, &sync_file->wq, wait);
+
+	if (list_empty(&sync_file->cb.node) &&
+	    !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
+		if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
+					   fence_check_cb_func) < 0)
+			wake_up_all(&sync_file->wq);
+	}
+
+	return dma_fence_is_signaled(sync_file->fence) ? EPOLLIN : 0;
+}
+
+static long sync_file_ioctl_merge(struct sync_file *sync_file,
+				  unsigned long arg)
+{
+	int fd = get_unused_fd_flags(O_CLOEXEC);
+	int err;
+	struct sync_file *fence2, *fence3;
+	struct sync_merge_data data;
+
+	if (fd < 0)
+		return fd;
+
+	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+		err = -EFAULT;
+		goto err_put_fd;
+	}
+
+	if (data.flags || data.pad) {
+		err = -EINVAL;
+		goto err_put_fd;
+	}
+
+	fence2 = sync_file_fdget(data.fd2);
+	if (!fence2) {
+		err = -ENOENT;
+		goto err_put_fd;
+	}
+
+	data.name[sizeof(data.name) - 1] = '\0';
+	fence3 = sync_file_merge(data.name, sync_file, fence2);
+	if (!fence3) {
+		err = -ENOMEM;
+		goto err_put_fence2;
+	}
+
+	data.fence = fd;
+	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+		err = -EFAULT;
+		goto err_put_fence3;
+	}
+
+	fd_install(fd, fence3->file);
+	fput(fence2->file);
+	return 0;
+
+err_put_fence3:
+	fput(fence3->file);
+
+err_put_fence2:
+	fput(fence2->file);
+
+err_put_fd:
+	put_unused_fd(fd);
+	return err;
+}
+
+static int sync_fill_fence_info(struct dma_fence *fence,
+				 struct sync_fence_info *info)
+{
+	strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
+		sizeof(info->obj_name));
+	strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
+		sizeof(info->driver_name));
+
+	info->status = dma_fence_get_status(fence);
+	while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
+	       !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
+		cpu_relax();
+	info->timestamp_ns =
+		test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
+		ktime_to_ns(fence->timestamp) :
+		ktime_set(0, 0);
+
+	return info->status;
+}
+
+static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
+				       unsigned long arg)
+{
+	struct sync_file_info info;
+	struct sync_fence_info *fence_info = NULL;
+	struct dma_fence **fences;
+	__u32 size;
+	int num_fences, ret, i;
+
+	if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
+		return -EFAULT;
+
+	if (info.flags || info.pad)
+		return -EINVAL;
+
+	fences = get_fences(sync_file, &num_fences);
+
+	/*
+	 * Passing num_fences = 0 means that userspace doesn't want to
+	 * retrieve any sync_fence_info. If num_fences = 0 we skip filling
+	 * sync_fence_info and return the actual number of fences on
+	 * info->num_fences.
+	 */
+	if (!info.num_fences) {
+		info.status = dma_fence_is_signaled(sync_file->fence);
+		goto no_fences;
+	} else {
+		info.status = 1;
+	}
+
+	if (info.num_fences < num_fences)
+		return -EINVAL;
+
+	size = num_fences * sizeof(*fence_info);
+	fence_info = kzalloc(size, GFP_KERNEL);
+	if (!fence_info)
+		return -ENOMEM;
+
+	for (i = 0; i < num_fences; i++) {
+		int status = sync_fill_fence_info(fences[i], &fence_info[i]);
+		info.status = info.status <= 0 ? info.status : status;
+	}
+
+	if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
+			 size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+no_fences:
+	sync_file_get_name(sync_file, info.name, sizeof(info.name));
+	info.num_fences = num_fences;
+
+	if (copy_to_user((void __user *)arg, &info, sizeof(info)))
+		ret = -EFAULT;
+	else
+		ret = 0;
+
+out:
+	kfree(fence_info);
+
+	return ret;
+}
+
+static long sync_file_ioctl(struct file *file, unsigned int cmd,
+			    unsigned long arg)
+{
+	struct sync_file *sync_file = file->private_data;
+
+	switch (cmd) {
+	case SYNC_IOC_MERGE:
+		return sync_file_ioctl_merge(sync_file, arg);
+
+	case SYNC_IOC_FILE_INFO:
+		return sync_file_ioctl_fence_info(sync_file, arg);
+
+	default:
+		return -ENOTTY;
+	}
+}
+
+static const struct file_operations sync_file_fops = {
+	.release = sync_file_release,
+	.poll = sync_file_poll,
+	.unlocked_ioctl = sync_file_ioctl,
+	.compat_ioctl = sync_file_ioctl,
+};
diff --git a/drivers/dma-buf/sync_trace.h b/drivers/dma-buf/sync_trace.h
new file mode 100644
index 0000000..06e468a
--- /dev/null
+++ b/drivers/dma-buf/sync_trace.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_INCLUDE_PATH ../../drivers/dma-buf
+#define TRACE_SYSTEM sync_trace
+
+#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SYNC_H
+
+#include "sync_debug.h"
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sync_timeline,
+	TP_PROTO(struct sync_timeline *timeline),
+
+	TP_ARGS(timeline),
+
+	TP_STRUCT__entry(
+			__string(name, timeline->name)
+			__field(u32, value)
+	),
+
+	TP_fast_assign(
+			__assign_str(name, timeline->name);
+			__entry->value = timeline->value;
+	),
+
+	TP_printk("name=%s value=%d", __get_str(name), __entry->value)
+);
+
+#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>