v4.19.13 snapshot.
diff --git a/drivers/media/common/videobuf2/Kconfig b/drivers/media/common/videobuf2/Kconfig
new file mode 100644
index 0000000..4ed11b4
--- /dev/null
+++ b/drivers/media/common/videobuf2/Kconfig
@@ -0,0 +1,32 @@
+# Used by drivers that need Videobuf2 modules
+config VIDEOBUF2_CORE
+	select DMA_SHARED_BUFFER
+	tristate
+
+config VIDEOBUF2_V4L2
+	tristate
+
+config VIDEOBUF2_MEMOPS
+	tristate
+	select FRAME_VECTOR
+
+config VIDEOBUF2_DMA_CONTIG
+	tristate
+	select VIDEOBUF2_CORE
+	select VIDEOBUF2_MEMOPS
+	select DMA_SHARED_BUFFER
+
+config VIDEOBUF2_VMALLOC
+	tristate
+	select VIDEOBUF2_CORE
+	select VIDEOBUF2_MEMOPS
+	select DMA_SHARED_BUFFER
+
+config VIDEOBUF2_DMA_SG
+	tristate
+	select VIDEOBUF2_CORE
+	select VIDEOBUF2_MEMOPS
+
+config VIDEOBUF2_DVB
+	tristate
+	select VIDEOBUF2_CORE
diff --git a/drivers/media/common/videobuf2/Makefile b/drivers/media/common/videobuf2/Makefile
new file mode 100644
index 0000000..77bebe8
--- /dev/null
+++ b/drivers/media/common/videobuf2/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+videobuf2-common-objs := videobuf2-core.o
+
+ifeq ($(CONFIG_TRACEPOINTS),y)
+  videobuf2-common-objs += vb2-trace.o
+endif
+
+obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-common.o
+obj-$(CONFIG_VIDEOBUF2_V4L2) += videobuf2-v4l2.o
+obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o
+obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o
+obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o
+obj-$(CONFIG_VIDEOBUF2_DMA_SG) += videobuf2-dma-sg.o
+obj-$(CONFIG_VIDEOBUF2_DVB) += videobuf2-dvb.o
diff --git a/drivers/media/common/videobuf2/vb2-trace.c b/drivers/media/common/videobuf2/vb2-trace.c
new file mode 100644
index 0000000..4c0f39d
--- /dev/null
+++ b/drivers/media/common/videobuf2/vb2-trace.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <media/videobuf2-core.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/vb2.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_done);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_dqbuf);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_qbuf);
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
new file mode 100644
index 0000000..16c7b20
--- /dev/null
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -0,0 +1,2635 @@
+/*
+ * videobuf2-core.c - video buffer 2 core framework
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ *	   Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * The vb2_thread implementation was based on code from videobuf-dvb.c:
+ *	(c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+
+#include <media/videobuf2-core.h>
+#include <media/v4l2-mc.h>
+
+#include <trace/events/vb2.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define dprintk(level, fmt, arg...)				\
+	do {							\
+		if (debug >= level)				\
+			pr_info("%s: " fmt, __func__, ## arg);	\
+	} while (0)
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+
+/*
+ * If advanced debugging is on, then count how often each op is called
+ * successfully, which can either be per-buffer or per-queue.
+ *
+ * This makes it easy to check that the 'init' and 'cleanup'
+ * (and variations thereof) stay balanced.
+ */
+
+#define log_memop(vb, op)						\
+	dprintk(2, "call_memop(%p, %d, %s)%s\n",			\
+		(vb)->vb2_queue, (vb)->index, #op,			\
+		(vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
+
+#define call_memop(vb, op, args...)					\
+({									\
+	struct vb2_queue *_q = (vb)->vb2_queue;				\
+	int err;							\
+									\
+	log_memop(vb, op);						\
+	err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0;		\
+	if (!err)							\
+		(vb)->cnt_mem_ ## op++;					\
+	err;								\
+})
+
+#define call_ptr_memop(vb, op, args...)					\
+({									\
+	struct vb2_queue *_q = (vb)->vb2_queue;				\
+	void *ptr;							\
+									\
+	log_memop(vb, op);						\
+	ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL;		\
+	if (!IS_ERR_OR_NULL(ptr))					\
+		(vb)->cnt_mem_ ## op++;					\
+	ptr;								\
+})
+
+#define call_void_memop(vb, op, args...)				\
+({									\
+	struct vb2_queue *_q = (vb)->vb2_queue;				\
+									\
+	log_memop(vb, op);						\
+	if (_q->mem_ops->op)						\
+		_q->mem_ops->op(args);					\
+	(vb)->cnt_mem_ ## op++;						\
+})
+
+#define log_qop(q, op)							\
+	dprintk(2, "call_qop(%p, %s)%s\n", q, #op,			\
+		(q)->ops->op ? "" : " (nop)")
+
+#define call_qop(q, op, args...)					\
+({									\
+	int err;							\
+									\
+	log_qop(q, op);							\
+	err = (q)->ops->op ? (q)->ops->op(args) : 0;			\
+	if (!err)							\
+		(q)->cnt_ ## op++;					\
+	err;								\
+})
+
+#define call_void_qop(q, op, args...)					\
+({									\
+	log_qop(q, op);							\
+	if ((q)->ops->op)						\
+		(q)->ops->op(args);					\
+	(q)->cnt_ ## op++;						\
+})
+
+#define log_vb_qop(vb, op, args...)					\
+	dprintk(2, "call_vb_qop(%p, %d, %s)%s\n",			\
+		(vb)->vb2_queue, (vb)->index, #op,			\
+		(vb)->vb2_queue->ops->op ? "" : " (nop)")
+
+#define call_vb_qop(vb, op, args...)					\
+({									\
+	int err;							\
+									\
+	log_vb_qop(vb, op);						\
+	err = (vb)->vb2_queue->ops->op ?				\
+		(vb)->vb2_queue->ops->op(args) : 0;			\
+	if (!err)							\
+		(vb)->cnt_ ## op++;					\
+	err;								\
+})
+
+#define call_void_vb_qop(vb, op, args...)				\
+({									\
+	log_vb_qop(vb, op);						\
+	if ((vb)->vb2_queue->ops->op)					\
+		(vb)->vb2_queue->ops->op(args);				\
+	(vb)->cnt_ ## op++;						\
+})
+
+#else
+
+#define call_memop(vb, op, args...)					\
+	((vb)->vb2_queue->mem_ops->op ?					\
+		(vb)->vb2_queue->mem_ops->op(args) : 0)
+
+#define call_ptr_memop(vb, op, args...)					\
+	((vb)->vb2_queue->mem_ops->op ?					\
+		(vb)->vb2_queue->mem_ops->op(args) : NULL)
+
+#define call_void_memop(vb, op, args...)				\
+	do {								\
+		if ((vb)->vb2_queue->mem_ops->op)			\
+			(vb)->vb2_queue->mem_ops->op(args);		\
+	} while (0)
+
+#define call_qop(q, op, args...)					\
+	((q)->ops->op ? (q)->ops->op(args) : 0)
+
+#define call_void_qop(q, op, args...)					\
+	do {								\
+		if ((q)->ops->op)					\
+			(q)->ops->op(args);				\
+	} while (0)
+
+#define call_vb_qop(vb, op, args...)					\
+	((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
+
+#define call_void_vb_qop(vb, op, args...)				\
+	do {								\
+		if ((vb)->vb2_queue->ops->op)				\
+			(vb)->vb2_queue->ops->op(args);			\
+	} while (0)
+
+#endif
+
+#define call_bufop(q, op, args...)					\
+({									\
+	int ret = 0;							\
+	if (q && q->buf_ops && q->buf_ops->op)				\
+		ret = q->buf_ops->op(args);				\
+	ret;								\
+})
+
+#define call_void_bufop(q, op, args...)					\
+({									\
+	if (q && q->buf_ops && q->buf_ops->op)				\
+		q->buf_ops->op(args);					\
+})
+
+static void __vb2_queue_cancel(struct vb2_queue *q);
+static void __enqueue_in_driver(struct vb2_buffer *vb);
+
+/*
+ * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
+ */
+static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
+{
+	struct vb2_queue *q = vb->vb2_queue;
+	void *mem_priv;
+	int plane;
+	int ret = -ENOMEM;
+
+	/*
+	 * Allocate memory for all planes in this buffer
+	 * NOTE: mmapped areas should be page aligned
+	 */
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		unsigned long size = PAGE_ALIGN(vb->planes[plane].length);
+
+		mem_priv = call_ptr_memop(vb, alloc,
+				q->alloc_devs[plane] ? : q->dev,
+				q->dma_attrs, size, q->dma_dir, q->gfp_flags);
+		if (IS_ERR_OR_NULL(mem_priv)) {
+			if (mem_priv)
+				ret = PTR_ERR(mem_priv);
+			goto free;
+		}
+
+		/* Associate allocator private data with this plane */
+		vb->planes[plane].mem_priv = mem_priv;
+	}
+
+	return 0;
+free:
+	/* Free already allocated memory if one of the allocations failed */
+	for (; plane > 0; --plane) {
+		call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
+		vb->planes[plane - 1].mem_priv = NULL;
+	}
+
+	return ret;
+}
+
+/*
+ * __vb2_buf_mem_free() - free memory of the given buffer
+ */
+static void __vb2_buf_mem_free(struct vb2_buffer *vb)
+{
+	unsigned int plane;
+
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		call_void_memop(vb, put, vb->planes[plane].mem_priv);
+		vb->planes[plane].mem_priv = NULL;
+		dprintk(3, "freed plane %d of buffer %d\n", plane, vb->index);
+	}
+}
+
+/*
+ * __vb2_buf_userptr_put() - release userspace memory associated with
+ * a USERPTR buffer
+ */
+static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
+{
+	unsigned int plane;
+
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		if (vb->planes[plane].mem_priv)
+			call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
+		vb->planes[plane].mem_priv = NULL;
+	}
+}
+
+/*
+ * __vb2_plane_dmabuf_put() - release memory associated with
+ * a DMABUF shared plane
+ */
+static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
+{
+	if (!p->mem_priv)
+		return;
+
+	if (p->dbuf_mapped)
+		call_void_memop(vb, unmap_dmabuf, p->mem_priv);
+
+	call_void_memop(vb, detach_dmabuf, p->mem_priv);
+	dma_buf_put(p->dbuf);
+	p->mem_priv = NULL;
+	p->dbuf = NULL;
+	p->dbuf_mapped = 0;
+}
+
+/*
+ * __vb2_buf_dmabuf_put() - release memory associated with
+ * a DMABUF shared buffer
+ */
+static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
+{
+	unsigned int plane;
+
+	for (plane = 0; plane < vb->num_planes; ++plane)
+		__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
+}
+
+/*
+ * __setup_offsets() - setup unique offsets ("cookies") for every plane in
+ * the buffer.
+ */
+static void __setup_offsets(struct vb2_buffer *vb)
+{
+	struct vb2_queue *q = vb->vb2_queue;
+	unsigned int plane;
+	unsigned long off = 0;
+
+	if (vb->index) {
+		struct vb2_buffer *prev = q->bufs[vb->index - 1];
+		struct vb2_plane *p = &prev->planes[prev->num_planes - 1];
+
+		off = PAGE_ALIGN(p->m.offset + p->length);
+	}
+
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		vb->planes[plane].m.offset = off;
+
+		dprintk(3, "buffer %d, plane %d offset 0x%08lx\n",
+				vb->index, plane, off);
+
+		off += vb->planes[plane].length;
+		off = PAGE_ALIGN(off);
+	}
+}
+
+/*
+ * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
+ * video buffer memory for all buffers/planes on the queue and initializes the
+ * queue
+ *
+ * Returns the number of buffers successfully allocated.
+ */
+static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
+			     unsigned int num_buffers, unsigned int num_planes,
+			     const unsigned plane_sizes[VB2_MAX_PLANES])
+{
+	unsigned int buffer, plane;
+	struct vb2_buffer *vb;
+	int ret;
+
+	/* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */
+	num_buffers = min_t(unsigned int, num_buffers,
+			    VB2_MAX_FRAME - q->num_buffers);
+
+	for (buffer = 0; buffer < num_buffers; ++buffer) {
+		/* Allocate videobuf buffer structures */
+		vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
+		if (!vb) {
+			dprintk(1, "memory alloc for buffer struct failed\n");
+			break;
+		}
+
+		vb->state = VB2_BUF_STATE_DEQUEUED;
+		vb->vb2_queue = q;
+		vb->num_planes = num_planes;
+		vb->index = q->num_buffers + buffer;
+		vb->type = q->type;
+		vb->memory = memory;
+		for (plane = 0; plane < num_planes; ++plane) {
+			vb->planes[plane].length = plane_sizes[plane];
+			vb->planes[plane].min_length = plane_sizes[plane];
+		}
+		q->bufs[vb->index] = vb;
+
+		/* Allocate video buffer memory for the MMAP type */
+		if (memory == VB2_MEMORY_MMAP) {
+			ret = __vb2_buf_mem_alloc(vb);
+			if (ret) {
+				dprintk(1, "failed allocating memory for buffer %d\n",
+					buffer);
+				q->bufs[vb->index] = NULL;
+				kfree(vb);
+				break;
+			}
+			__setup_offsets(vb);
+			/*
+			 * Call the driver-provided buffer initialization
+			 * callback, if given. An error in initialization
+			 * results in queue setup failure.
+			 */
+			ret = call_vb_qop(vb, buf_init, vb);
+			if (ret) {
+				dprintk(1, "buffer %d %p initialization failed\n",
+					buffer, vb);
+				__vb2_buf_mem_free(vb);
+				q->bufs[vb->index] = NULL;
+				kfree(vb);
+				break;
+			}
+		}
+	}
+
+	dprintk(1, "allocated %d buffers, %d plane(s) each\n",
+			buffer, num_planes);
+
+	return buffer;
+}
+
+/*
+ * __vb2_free_mem() - release all video buffer memory for a given queue
+ */
+static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
+{
+	unsigned int buffer;
+	struct vb2_buffer *vb;
+
+	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+	     ++buffer) {
+		vb = q->bufs[buffer];
+		if (!vb)
+			continue;
+
+		/* Free MMAP buffers or release USERPTR buffers */
+		if (q->memory == VB2_MEMORY_MMAP)
+			__vb2_buf_mem_free(vb);
+		else if (q->memory == VB2_MEMORY_DMABUF)
+			__vb2_buf_dmabuf_put(vb);
+		else
+			__vb2_buf_userptr_put(vb);
+	}
+}
+
+/*
+ * __vb2_queue_free() - free buffers at the end of the queue - video memory and
+ * related information, if no buffers are left return the queue to an
+ * uninitialized state. Might be called even if the queue has already been freed.
+ */
+static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
+{
+	unsigned int buffer;
+
+	/*
+	 * Sanity check: when preparing a buffer the queue lock is released for
+	 * a short while (see __buf_prepare for the details), which would allow
+	 * a race with a reqbufs which can call this function. Removing the
+	 * buffers from underneath __buf_prepare is obviously a bad idea, so we
+	 * check if any of the buffers is in the state PREPARING, and if so we
+	 * just return -EAGAIN.
+	 */
+	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+	     ++buffer) {
+		if (q->bufs[buffer] == NULL)
+			continue;
+		if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
+			dprintk(1, "preparing buffers, cannot free\n");
+			return -EAGAIN;
+		}
+	}
+
+	/* Call driver-provided cleanup function for each buffer, if provided */
+	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+	     ++buffer) {
+		struct vb2_buffer *vb = q->bufs[buffer];
+
+		if (vb && vb->planes[0].mem_priv)
+			call_void_vb_qop(vb, buf_cleanup, vb);
+	}
+
+	/* Release video buffer memory */
+	__vb2_free_mem(q, buffers);
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	/*
+	 * Check that all the calls were balances during the life-time of this
+	 * queue. If not (or if the debug level is 1 or up), then dump the
+	 * counters to the kernel log.
+	 */
+	if (q->num_buffers) {
+		bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
+				  q->cnt_wait_prepare != q->cnt_wait_finish;
+
+		if (unbalanced || debug) {
+			pr_info("counters for queue %p:%s\n", q,
+				unbalanced ? " UNBALANCED!" : "");
+			pr_info("     setup: %u start_streaming: %u stop_streaming: %u\n",
+				q->cnt_queue_setup, q->cnt_start_streaming,
+				q->cnt_stop_streaming);
+			pr_info("     wait_prepare: %u wait_finish: %u\n",
+				q->cnt_wait_prepare, q->cnt_wait_finish);
+		}
+		q->cnt_queue_setup = 0;
+		q->cnt_wait_prepare = 0;
+		q->cnt_wait_finish = 0;
+		q->cnt_start_streaming = 0;
+		q->cnt_stop_streaming = 0;
+	}
+	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+		struct vb2_buffer *vb = q->bufs[buffer];
+		bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
+				  vb->cnt_mem_prepare != vb->cnt_mem_finish ||
+				  vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
+				  vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
+				  vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
+				  vb->cnt_buf_queue != vb->cnt_buf_done ||
+				  vb->cnt_buf_prepare != vb->cnt_buf_finish ||
+				  vb->cnt_buf_init != vb->cnt_buf_cleanup;
+
+		if (unbalanced || debug) {
+			pr_info("   counters for queue %p, buffer %d:%s\n",
+				q, buffer, unbalanced ? " UNBALANCED!" : "");
+			pr_info("     buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
+				vb->cnt_buf_init, vb->cnt_buf_cleanup,
+				vb->cnt_buf_prepare, vb->cnt_buf_finish);
+			pr_info("     buf_queue: %u buf_done: %u\n",
+				vb->cnt_buf_queue, vb->cnt_buf_done);
+			pr_info("     alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
+				vb->cnt_mem_alloc, vb->cnt_mem_put,
+				vb->cnt_mem_prepare, vb->cnt_mem_finish,
+				vb->cnt_mem_mmap);
+			pr_info("     get_userptr: %u put_userptr: %u\n",
+				vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
+			pr_info("     attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
+				vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
+				vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
+			pr_info("     get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
+				vb->cnt_mem_get_dmabuf,
+				vb->cnt_mem_num_users,
+				vb->cnt_mem_vaddr,
+				vb->cnt_mem_cookie);
+		}
+	}
+#endif
+
+	/* Free videobuf buffers */
+	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+	     ++buffer) {
+		kfree(q->bufs[buffer]);
+		q->bufs[buffer] = NULL;
+	}
+
+	q->num_buffers -= buffers;
+	if (!q->num_buffers) {
+		q->memory = VB2_MEMORY_UNKNOWN;
+		INIT_LIST_HEAD(&q->queued_list);
+	}
+	return 0;
+}
+
+bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
+{
+	unsigned int plane;
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		void *mem_priv = vb->planes[plane].mem_priv;
+		/*
+		 * If num_users() has not been provided, call_memop
+		 * will return 0, apparently nobody cares about this
+		 * case anyway. If num_users() returns more than 1,
+		 * we are not the only user of the plane's memory.
+		 */
+		if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
+			return true;
+	}
+	return false;
+}
+EXPORT_SYMBOL(vb2_buffer_in_use);
+
+/*
+ * __buffers_in_use() - return true if any buffers on the queue are in use and
+ * the queue cannot be freed (by the means of REQBUFS(0)) call
+ */
+static bool __buffers_in_use(struct vb2_queue *q)
+{
+	unsigned int buffer;
+	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+		if (vb2_buffer_in_use(q, q->bufs[buffer]))
+			return true;
+	}
+	return false;
+}
+
+void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
+{
+	call_void_bufop(q, fill_user_buffer, q->bufs[index], pb);
+}
+EXPORT_SYMBOL_GPL(vb2_core_querybuf);
+
+/*
+ * __verify_userptr_ops() - verify that all memory operations required for
+ * USERPTR queue type have been provided
+ */
+static int __verify_userptr_ops(struct vb2_queue *q)
+{
+	if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
+	    !q->mem_ops->put_userptr)
+		return -EINVAL;
+
+	return 0;
+}
+
+/*
+ * __verify_mmap_ops() - verify that all memory operations required for
+ * MMAP queue type have been provided
+ */
+static int __verify_mmap_ops(struct vb2_queue *q)
+{
+	if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
+	    !q->mem_ops->put || !q->mem_ops->mmap)
+		return -EINVAL;
+
+	return 0;
+}
+
+/*
+ * __verify_dmabuf_ops() - verify that all memory operations required for
+ * DMABUF queue type have been provided
+ */
+static int __verify_dmabuf_ops(struct vb2_queue *q)
+{
+	if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
+	    !q->mem_ops->detach_dmabuf  || !q->mem_ops->map_dmabuf ||
+	    !q->mem_ops->unmap_dmabuf)
+		return -EINVAL;
+
+	return 0;
+}
+
+int vb2_verify_memory_type(struct vb2_queue *q,
+		enum vb2_memory memory, unsigned int type)
+{
+	if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR &&
+	    memory != VB2_MEMORY_DMABUF) {
+		dprintk(1, "unsupported memory type\n");
+		return -EINVAL;
+	}
+
+	if (type != q->type) {
+		dprintk(1, "requested type is incorrect\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Make sure all the required memory ops for given memory type
+	 * are available.
+	 */
+	if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) {
+		dprintk(1, "MMAP for current setup unsupported\n");
+		return -EINVAL;
+	}
+
+	if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
+		dprintk(1, "USERPTR for current setup unsupported\n");
+		return -EINVAL;
+	}
+
+	if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
+		dprintk(1, "DMABUF for current setup unsupported\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Place the busy tests at the end: -EBUSY can be ignored when
+	 * create_bufs is called with count == 0, but count == 0 should still
+	 * do the memory and type validation.
+	 */
+	if (vb2_fileio_is_active(q)) {
+		dprintk(1, "file io in progress\n");
+		return -EBUSY;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(vb2_verify_memory_type);
+
+int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
+		unsigned int *count)
+{
+	unsigned int num_buffers, allocated_buffers, num_planes = 0;
+	unsigned plane_sizes[VB2_MAX_PLANES] = { };
+	int ret;
+
+	if (q->streaming) {
+		dprintk(1, "streaming active\n");
+		return -EBUSY;
+	}
+
+	if (*count == 0 || q->num_buffers != 0 ||
+	    (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) {
+		/*
+		 * We already have buffers allocated, so first check if they
+		 * are not in use and can be freed.
+		 */
+		mutex_lock(&q->mmap_lock);
+		if (q->memory == VB2_MEMORY_MMAP && __buffers_in_use(q)) {
+			mutex_unlock(&q->mmap_lock);
+			dprintk(1, "memory in use, cannot free\n");
+			return -EBUSY;
+		}
+
+		/*
+		 * Call queue_cancel to clean up any buffers in the PREPARED or
+		 * QUEUED state which is possible if buffers were prepared or
+		 * queued without ever calling STREAMON.
+		 */
+		__vb2_queue_cancel(q);
+		ret = __vb2_queue_free(q, q->num_buffers);
+		mutex_unlock(&q->mmap_lock);
+		if (ret)
+			return ret;
+
+		/*
+		 * In case of REQBUFS(0) return immediately without calling
+		 * driver's queue_setup() callback and allocating resources.
+		 */
+		if (*count == 0)
+			return 0;
+	}
+
+	/*
+	 * Make sure the requested values and current defaults are sane.
+	 */
+	WARN_ON(q->min_buffers_needed > VB2_MAX_FRAME);
+	num_buffers = max_t(unsigned int, *count, q->min_buffers_needed);
+	num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME);
+	memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
+	q->memory = memory;
+
+	/*
+	 * Ask the driver how many buffers and planes per buffer it requires.
+	 * Driver also sets the size and allocator context for each plane.
+	 */
+	ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
+		       plane_sizes, q->alloc_devs);
+	if (ret)
+		return ret;
+
+	/* Finally, allocate buffers and video memory */
+	allocated_buffers =
+		__vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes);
+	if (allocated_buffers == 0) {
+		dprintk(1, "memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * There is no point in continuing if we can't allocate the minimum
+	 * number of buffers needed by this vb2_queue.
+	 */
+	if (allocated_buffers < q->min_buffers_needed)
+		ret = -ENOMEM;
+
+	/*
+	 * Check if driver can handle the allocated number of buffers.
+	 */
+	if (!ret && allocated_buffers < num_buffers) {
+		num_buffers = allocated_buffers;
+		/*
+		 * num_planes is set by the previous queue_setup(), but since it
+		 * signals to queue_setup() whether it is called from create_bufs()
+		 * vs reqbufs() we zero it here to signal that queue_setup() is
+		 * called for the reqbufs() case.
+		 */
+		num_planes = 0;
+
+		ret = call_qop(q, queue_setup, q, &num_buffers,
+			       &num_planes, plane_sizes, q->alloc_devs);
+
+		if (!ret && allocated_buffers < num_buffers)
+			ret = -ENOMEM;
+
+		/*
+		 * Either the driver has accepted a smaller number of buffers,
+		 * or .queue_setup() returned an error
+		 */
+	}
+
+	mutex_lock(&q->mmap_lock);
+	q->num_buffers = allocated_buffers;
+
+	if (ret < 0) {
+		/*
+		 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
+		 * from q->num_buffers.
+		 */
+		__vb2_queue_free(q, allocated_buffers);
+		mutex_unlock(&q->mmap_lock);
+		return ret;
+	}
+	mutex_unlock(&q->mmap_lock);
+
+	/*
+	 * Return the number of successfully allocated buffers
+	 * to the userspace.
+	 */
+	*count = allocated_buffers;
+	q->waiting_for_buffers = !q->is_output;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
+
+int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
+		unsigned int *count, unsigned requested_planes,
+		const unsigned requested_sizes[])
+{
+	unsigned int num_planes = 0, num_buffers, allocated_buffers;
+	unsigned plane_sizes[VB2_MAX_PLANES] = { };
+	int ret;
+
+	if (q->num_buffers == VB2_MAX_FRAME) {
+		dprintk(1, "maximum number of buffers already allocated\n");
+		return -ENOBUFS;
+	}
+
+	if (!q->num_buffers) {
+		memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
+		q->memory = memory;
+		q->waiting_for_buffers = !q->is_output;
+	}
+
+	num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
+
+	if (requested_planes && requested_sizes) {
+		num_planes = requested_planes;
+		memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes));
+	}
+
+	/*
+	 * Ask the driver, whether the requested number of buffers, planes per
+	 * buffer and their sizes are acceptable
+	 */
+	ret = call_qop(q, queue_setup, q, &num_buffers,
+		       &num_planes, plane_sizes, q->alloc_devs);
+	if (ret)
+		return ret;
+
+	/* Finally, allocate buffers and video memory */
+	allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
+				num_planes, plane_sizes);
+	if (allocated_buffers == 0) {
+		dprintk(1, "memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Check if driver can handle the so far allocated number of buffers.
+	 */
+	if (allocated_buffers < num_buffers) {
+		num_buffers = allocated_buffers;
+
+		/*
+		 * q->num_buffers contains the total number of buffers, that the
+		 * queue driver has set up
+		 */
+		ret = call_qop(q, queue_setup, q, &num_buffers,
+			       &num_planes, plane_sizes, q->alloc_devs);
+
+		if (!ret && allocated_buffers < num_buffers)
+			ret = -ENOMEM;
+
+		/*
+		 * Either the driver has accepted a smaller number of buffers,
+		 * or .queue_setup() returned an error
+		 */
+	}
+
+	mutex_lock(&q->mmap_lock);
+	q->num_buffers += allocated_buffers;
+
+	if (ret < 0) {
+		/*
+		 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
+		 * from q->num_buffers.
+		 */
+		__vb2_queue_free(q, allocated_buffers);
+		mutex_unlock(&q->mmap_lock);
+		return -ENOMEM;
+	}
+	mutex_unlock(&q->mmap_lock);
+
+	/*
+	 * Return the number of successfully allocated buffers
+	 * to the userspace.
+	 */
+	*count = allocated_buffers;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
+
+void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
+{
+	if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
+		return NULL;
+
+	return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
+
+}
+EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
+
+void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
+{
+	if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
+		return NULL;
+
+	return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
+}
+EXPORT_SYMBOL_GPL(vb2_plane_cookie);
+
+void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
+{
+	struct vb2_queue *q = vb->vb2_queue;
+	unsigned long flags;
+	unsigned int plane;
+
+	if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
+		return;
+
+	if (WARN_ON(state != VB2_BUF_STATE_DONE &&
+		    state != VB2_BUF_STATE_ERROR &&
+		    state != VB2_BUF_STATE_QUEUED &&
+		    state != VB2_BUF_STATE_REQUEUEING))
+		state = VB2_BUF_STATE_ERROR;
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	/*
+	 * Although this is not a callback, it still does have to balance
+	 * with the buf_queue op. So update this counter manually.
+	 */
+	vb->cnt_buf_done++;
+#endif
+	dprintk(4, "done processing on buffer %d, state: %d\n",
+			vb->index, state);
+
+	if (state != VB2_BUF_STATE_QUEUED &&
+	    state != VB2_BUF_STATE_REQUEUEING) {
+		/* sync buffers */
+		for (plane = 0; plane < vb->num_planes; ++plane)
+			call_void_memop(vb, finish, vb->planes[plane].mem_priv);
+	}
+
+	spin_lock_irqsave(&q->done_lock, flags);
+	if (state == VB2_BUF_STATE_QUEUED ||
+	    state == VB2_BUF_STATE_REQUEUEING) {
+		vb->state = VB2_BUF_STATE_QUEUED;
+	} else {
+		/* Add the buffer to the done buffers list */
+		list_add_tail(&vb->done_entry, &q->done_list);
+		vb->state = state;
+	}
+	atomic_dec(&q->owned_by_drv_count);
+	spin_unlock_irqrestore(&q->done_lock, flags);
+
+	trace_vb2_buf_done(q, vb);
+
+	switch (state) {
+	case VB2_BUF_STATE_QUEUED:
+		return;
+	case VB2_BUF_STATE_REQUEUEING:
+		if (q->start_streaming_called)
+			__enqueue_in_driver(vb);
+		return;
+	default:
+		/* Inform any processes that may be waiting for buffers */
+		wake_up(&q->done_wq);
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(vb2_buffer_done);
+
+void vb2_discard_done(struct vb2_queue *q)
+{
+	struct vb2_buffer *vb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->done_lock, flags);
+	list_for_each_entry(vb, &q->done_list, done_entry)
+		vb->state = VB2_BUF_STATE_ERROR;
+	spin_unlock_irqrestore(&q->done_lock, flags);
+}
+EXPORT_SYMBOL_GPL(vb2_discard_done);
+
+/*
+ * __prepare_mmap() - prepare an MMAP buffer
+ */
+static int __prepare_mmap(struct vb2_buffer *vb, const void *pb)
+{
+	int ret = 0;
+
+	if (pb)
+		ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+				 vb, pb, vb->planes);
+	return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
+}
+
+/*
+ * __prepare_userptr() - prepare a USERPTR buffer
+ */
+static int __prepare_userptr(struct vb2_buffer *vb, const void *pb)
+{
+	struct vb2_plane planes[VB2_MAX_PLANES];
+	struct vb2_queue *q = vb->vb2_queue;
+	void *mem_priv;
+	unsigned int plane;
+	int ret = 0;
+	bool reacquired = vb->planes[0].mem_priv == NULL;
+
+	memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
+	/* Copy relevant information provided by the userspace */
+	if (pb) {
+		ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+				 vb, pb, planes);
+		if (ret)
+			return ret;
+	}
+
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		/* Skip the plane if already verified */
+		if (vb->planes[plane].m.userptr &&
+			vb->planes[plane].m.userptr == planes[plane].m.userptr
+			&& vb->planes[plane].length == planes[plane].length)
+			continue;
+
+		dprintk(3, "userspace address for plane %d changed, reacquiring memory\n",
+			plane);
+
+		/* Check if the provided plane buffer is large enough */
+		if (planes[plane].length < vb->planes[plane].min_length) {
+			dprintk(1, "provided buffer size %u is less than setup size %u for plane %d\n",
+						planes[plane].length,
+						vb->planes[plane].min_length,
+						plane);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		/* Release previously acquired memory if present */
+		if (vb->planes[plane].mem_priv) {
+			if (!reacquired) {
+				reacquired = true;
+				call_void_vb_qop(vb, buf_cleanup, vb);
+			}
+			call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
+		}
+
+		vb->planes[plane].mem_priv = NULL;
+		vb->planes[plane].bytesused = 0;
+		vb->planes[plane].length = 0;
+		vb->planes[plane].m.userptr = 0;
+		vb->planes[plane].data_offset = 0;
+
+		/* Acquire each plane's memory */
+		mem_priv = call_ptr_memop(vb, get_userptr,
+				q->alloc_devs[plane] ? : q->dev,
+				planes[plane].m.userptr,
+				planes[plane].length, q->dma_dir);
+		if (IS_ERR(mem_priv)) {
+			dprintk(1, "failed acquiring userspace memory for plane %d\n",
+				plane);
+			ret = PTR_ERR(mem_priv);
+			goto err;
+		}
+		vb->planes[plane].mem_priv = mem_priv;
+	}
+
+	/*
+	 * Now that everything is in order, copy relevant information
+	 * provided by userspace.
+	 */
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		vb->planes[plane].bytesused = planes[plane].bytesused;
+		vb->planes[plane].length = planes[plane].length;
+		vb->planes[plane].m.userptr = planes[plane].m.userptr;
+		vb->planes[plane].data_offset = planes[plane].data_offset;
+	}
+
+	if (reacquired) {
+		/*
+		 * One or more planes changed, so we must call buf_init to do
+		 * the driver-specific initialization on the newly acquired
+		 * buffer, if provided.
+		 */
+		ret = call_vb_qop(vb, buf_init, vb);
+		if (ret) {
+			dprintk(1, "buffer initialization failed\n");
+			goto err;
+		}
+	}
+
+	ret = call_vb_qop(vb, buf_prepare, vb);
+	if (ret) {
+		dprintk(1, "buffer preparation failed\n");
+		call_void_vb_qop(vb, buf_cleanup, vb);
+		goto err;
+	}
+
+	return 0;
+err:
+	/* In case of errors, release planes that were already acquired */
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		if (vb->planes[plane].mem_priv)
+			call_void_memop(vb, put_userptr,
+				vb->planes[plane].mem_priv);
+		vb->planes[plane].mem_priv = NULL;
+		vb->planes[plane].m.userptr = 0;
+		vb->planes[plane].length = 0;
+	}
+
+	return ret;
+}
+
+/*
+ * __prepare_dmabuf() - prepare a DMABUF buffer
+ */
+static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb)
+{
+	struct vb2_plane planes[VB2_MAX_PLANES];
+	struct vb2_queue *q = vb->vb2_queue;
+	void *mem_priv;
+	unsigned int plane;
+	int ret = 0;
+	bool reacquired = vb->planes[0].mem_priv == NULL;
+
+	memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
+	/* Copy relevant information provided by the userspace */
+	if (pb) {
+		ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+				 vb, pb, planes);
+		if (ret)
+			return ret;
+	}
+
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
+
+		if (IS_ERR_OR_NULL(dbuf)) {
+			dprintk(1, "invalid dmabuf fd for plane %d\n",
+				plane);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		/* use DMABUF size if length is not provided */
+		if (planes[plane].length == 0)
+			planes[plane].length = dbuf->size;
+
+		if (planes[plane].length < vb->planes[plane].min_length) {
+			dprintk(1, "invalid dmabuf length %u for plane %d, minimum length %u\n",
+				planes[plane].length, plane,
+				vb->planes[plane].min_length);
+			dma_buf_put(dbuf);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		/* Skip the plane if already verified */
+		if (dbuf == vb->planes[plane].dbuf &&
+			vb->planes[plane].length == planes[plane].length) {
+			dma_buf_put(dbuf);
+			continue;
+		}
+
+		dprintk(3, "buffer for plane %d changed\n", plane);
+
+		if (!reacquired) {
+			reacquired = true;
+			call_void_vb_qop(vb, buf_cleanup, vb);
+		}
+
+		/* Release previously acquired memory if present */
+		__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
+		vb->planes[plane].bytesused = 0;
+		vb->planes[plane].length = 0;
+		vb->planes[plane].m.fd = 0;
+		vb->planes[plane].data_offset = 0;
+
+		/* Acquire each plane's memory */
+		mem_priv = call_ptr_memop(vb, attach_dmabuf,
+				q->alloc_devs[plane] ? : q->dev,
+				dbuf, planes[plane].length, q->dma_dir);
+		if (IS_ERR(mem_priv)) {
+			dprintk(1, "failed to attach dmabuf\n");
+			ret = PTR_ERR(mem_priv);
+			dma_buf_put(dbuf);
+			goto err;
+		}
+
+		vb->planes[plane].dbuf = dbuf;
+		vb->planes[plane].mem_priv = mem_priv;
+	}
+
+	/*
+	 * This pins the buffer(s) with dma_buf_map_attachment()). It's done
+	 * here instead just before the DMA, while queueing the buffer(s) so
+	 * userspace knows sooner rather than later if the dma-buf map fails.
+	 */
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
+		if (ret) {
+			dprintk(1, "failed to map dmabuf for plane %d\n",
+				plane);
+			goto err;
+		}
+		vb->planes[plane].dbuf_mapped = 1;
+	}
+
+	/*
+	 * Now that everything is in order, copy relevant information
+	 * provided by userspace.
+	 */
+	for (plane = 0; plane < vb->num_planes; ++plane) {
+		vb->planes[plane].bytesused = planes[plane].bytesused;
+		vb->planes[plane].length = planes[plane].length;
+		vb->planes[plane].m.fd = planes[plane].m.fd;
+		vb->planes[plane].data_offset = planes[plane].data_offset;
+	}
+
+	if (reacquired) {
+		/*
+		 * Call driver-specific initialization on the newly acquired buffer,
+		 * if provided.
+		 */
+		ret = call_vb_qop(vb, buf_init, vb);
+		if (ret) {
+			dprintk(1, "buffer initialization failed\n");
+			goto err;
+		}
+	}
+
+	ret = call_vb_qop(vb, buf_prepare, vb);
+	if (ret) {
+		dprintk(1, "buffer preparation failed\n");
+		call_void_vb_qop(vb, buf_cleanup, vb);
+		goto err;
+	}
+
+	return 0;
+err:
+	/* In case of errors, release planes that were already acquired */
+	__vb2_buf_dmabuf_put(vb);
+
+	return ret;
+}
+
+/*
+ * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
+ */
+static void __enqueue_in_driver(struct vb2_buffer *vb)
+{
+	struct vb2_queue *q = vb->vb2_queue;
+
+	vb->state = VB2_BUF_STATE_ACTIVE;
+	atomic_inc(&q->owned_by_drv_count);
+
+	trace_vb2_buf_queue(q, vb);
+
+	call_void_vb_qop(vb, buf_queue, vb);
+}
+
+static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
+{
+	struct vb2_queue *q = vb->vb2_queue;
+	unsigned int plane;
+	int ret;
+
+	if (q->error) {
+		dprintk(1, "fatal error occurred on queue\n");
+		return -EIO;
+	}
+
+	vb->state = VB2_BUF_STATE_PREPARING;
+
+	switch (q->memory) {
+	case VB2_MEMORY_MMAP:
+		ret = __prepare_mmap(vb, pb);
+		break;
+	case VB2_MEMORY_USERPTR:
+		ret = __prepare_userptr(vb, pb);
+		break;
+	case VB2_MEMORY_DMABUF:
+		ret = __prepare_dmabuf(vb, pb);
+		break;
+	default:
+		WARN(1, "Invalid queue type\n");
+		ret = -EINVAL;
+	}
+
+	if (ret) {
+		dprintk(1, "buffer preparation failed: %d\n", ret);
+		vb->state = VB2_BUF_STATE_DEQUEUED;
+		return ret;
+	}
+
+	/* sync buffers */
+	for (plane = 0; plane < vb->num_planes; ++plane)
+		call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
+
+	vb->state = VB2_BUF_STATE_PREPARED;
+
+	return 0;
+}
+
+int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
+{
+	struct vb2_buffer *vb;
+	int ret;
+
+	vb = q->bufs[index];
+	if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+		dprintk(1, "invalid buffer state %d\n",
+			vb->state);
+		return -EINVAL;
+	}
+
+	ret = __buf_prepare(vb, pb);
+	if (ret)
+		return ret;
+
+	/* Fill buffer information for the userspace */
+	call_void_bufop(q, fill_user_buffer, vb, pb);
+
+	dprintk(2, "prepare of buffer %d succeeded\n", vb->index);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
+
+/*
+ * vb2_start_streaming() - Attempt to start streaming.
+ * @q:		videobuf2 queue
+ *
+ * Attempt to start streaming. When this function is called there must be
+ * at least q->min_buffers_needed buffers queued up (i.e. the minimum
+ * number of buffers required for the DMA engine to function). If the
+ * @start_streaming op fails it is supposed to return all the driver-owned
+ * buffers back to vb2 in state QUEUED. Check if that happened and if
+ * not warn and reclaim them forcefully.
+ */
+static int vb2_start_streaming(struct vb2_queue *q)
+{
+	struct vb2_buffer *vb;
+	int ret;
+
+	/*
+	 * If any buffers were queued before streamon,
+	 * we can now pass them to driver for processing.
+	 */
+	list_for_each_entry(vb, &q->queued_list, queued_entry)
+		__enqueue_in_driver(vb);
+
+	/* Tell the driver to start streaming */
+	q->start_streaming_called = 1;
+	ret = call_qop(q, start_streaming, q,
+		       atomic_read(&q->owned_by_drv_count));
+	if (!ret)
+		return 0;
+
+	q->start_streaming_called = 0;
+
+	dprintk(1, "driver refused to start streaming\n");
+	/*
+	 * If you see this warning, then the driver isn't cleaning up properly
+	 * after a failed start_streaming(). See the start_streaming()
+	 * documentation in videobuf2-core.h for more information how buffers
+	 * should be returned to vb2 in start_streaming().
+	 */
+	if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
+		unsigned i;
+
+		/*
+		 * Forcefully reclaim buffers if the driver did not
+		 * correctly return them to vb2.
+		 */
+		for (i = 0; i < q->num_buffers; ++i) {
+			vb = q->bufs[i];
+			if (vb->state == VB2_BUF_STATE_ACTIVE)
+				vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
+		}
+		/* Must be zero now */
+		WARN_ON(atomic_read(&q->owned_by_drv_count));
+	}
+	/*
+	 * If done_list is not empty, then start_streaming() didn't call
+	 * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or
+	 * STATE_DONE.
+	 */
+	WARN_ON(!list_empty(&q->done_list));
+	return ret;
+}
+
+int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
+{
+	struct vb2_buffer *vb;
+	int ret;
+
+	if (q->error) {
+		dprintk(1, "fatal error occurred on queue\n");
+		return -EIO;
+	}
+
+	vb = q->bufs[index];
+
+	switch (vb->state) {
+	case VB2_BUF_STATE_DEQUEUED:
+		ret = __buf_prepare(vb, pb);
+		if (ret)
+			return ret;
+		break;
+	case VB2_BUF_STATE_PREPARED:
+		break;
+	case VB2_BUF_STATE_PREPARING:
+		dprintk(1, "buffer still being prepared\n");
+		return -EINVAL;
+	default:
+		dprintk(1, "invalid buffer state %d\n", vb->state);
+		return -EINVAL;
+	}
+
+	/*
+	 * Add to the queued buffers list, a buffer will stay on it until
+	 * dequeued in dqbuf.
+	 */
+	list_add_tail(&vb->queued_entry, &q->queued_list);
+	q->queued_count++;
+	q->waiting_for_buffers = false;
+	vb->state = VB2_BUF_STATE_QUEUED;
+
+	if (pb)
+		call_void_bufop(q, copy_timestamp, vb, pb);
+
+	trace_vb2_qbuf(q, vb);
+
+	/*
+	 * If already streaming, give the buffer to driver for processing.
+	 * If not, the buffer will be given to driver on next streamon.
+	 */
+	if (q->start_streaming_called)
+		__enqueue_in_driver(vb);
+
+	/* Fill buffer information for the userspace */
+	if (pb)
+		call_void_bufop(q, fill_user_buffer, vb, pb);
+
+	/*
+	 * If streamon has been called, and we haven't yet called
+	 * start_streaming() since not enough buffers were queued, and
+	 * we now have reached the minimum number of queued buffers,
+	 * then we can finally call start_streaming().
+	 */
+	if (q->streaming && !q->start_streaming_called &&
+	    q->queued_count >= q->min_buffers_needed) {
+		ret = vb2_start_streaming(q);
+		if (ret)
+			return ret;
+	}
+
+	dprintk(2, "qbuf of buffer %d succeeded\n", vb->index);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_qbuf);
+
+/*
+ * __vb2_wait_for_done_vb() - wait for a buffer to become available
+ * for dequeuing
+ *
+ * Will sleep if required for nonblocking == false.
+ */
+static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
+{
+	/*
+	 * All operations on vb_done_list are performed under done_lock
+	 * spinlock protection. However, buffers may be removed from
+	 * it and returned to userspace only while holding both driver's
+	 * lock and the done_lock spinlock. Thus we can be sure that as
+	 * long as we hold the driver's lock, the list will remain not
+	 * empty if list_empty() check succeeds.
+	 */
+
+	for (;;) {
+		int ret;
+
+		if (!q->streaming) {
+			dprintk(1, "streaming off, will not wait for buffers\n");
+			return -EINVAL;
+		}
+
+		if (q->error) {
+			dprintk(1, "Queue in error state, will not wait for buffers\n");
+			return -EIO;
+		}
+
+		if (q->last_buffer_dequeued) {
+			dprintk(3, "last buffer dequeued already, will not wait for buffers\n");
+			return -EPIPE;
+		}
+
+		if (!list_empty(&q->done_list)) {
+			/*
+			 * Found a buffer that we were waiting for.
+			 */
+			break;
+		}
+
+		if (nonblocking) {
+			dprintk(3, "nonblocking and no buffers to dequeue, will not wait\n");
+			return -EAGAIN;
+		}
+
+		/*
+		 * We are streaming and blocking, wait for another buffer to
+		 * become ready or for streamoff. Driver's lock is released to
+		 * allow streamoff or qbuf to be called while waiting.
+		 */
+		call_void_qop(q, wait_prepare, q);
+
+		/*
+		 * All locks have been released, it is safe to sleep now.
+		 */
+		dprintk(3, "will sleep waiting for buffers\n");
+		ret = wait_event_interruptible(q->done_wq,
+				!list_empty(&q->done_list) || !q->streaming ||
+				q->error);
+
+		/*
+		 * We need to reevaluate both conditions again after reacquiring
+		 * the locks or return an error if one occurred.
+		 */
+		call_void_qop(q, wait_finish, q);
+		if (ret) {
+			dprintk(1, "sleep was interrupted\n");
+			return ret;
+		}
+	}
+	return 0;
+}
+
+/*
+ * __vb2_get_done_vb() - get a buffer ready for dequeuing
+ *
+ * Will sleep if required for nonblocking == false.
+ */
+static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
+			     void *pb, int nonblocking)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	/*
+	 * Wait for at least one buffer to become available on the done_list.
+	 */
+	ret = __vb2_wait_for_done_vb(q, nonblocking);
+	if (ret)
+		return ret;
+
+	/*
+	 * Driver's lock has been held since we last verified that done_list
+	 * is not empty, so no need for another list_empty(done_list) check.
+	 */
+	spin_lock_irqsave(&q->done_lock, flags);
+	*vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
+	/*
+	 * Only remove the buffer from done_list if all planes can be
+	 * handled. Some cases such as V4L2 file I/O and DVB have pb
+	 * == NULL; skip the check then as there's nothing to verify.
+	 */
+	if (pb)
+		ret = call_bufop(q, verify_planes_array, *vb, pb);
+	if (!ret)
+		list_del(&(*vb)->done_entry);
+	spin_unlock_irqrestore(&q->done_lock, flags);
+
+	return ret;
+}
+
+int vb2_wait_for_all_buffers(struct vb2_queue *q)
+{
+	if (!q->streaming) {
+		dprintk(1, "streaming off, will not wait for buffers\n");
+		return -EINVAL;
+	}
+
+	if (q->start_streaming_called)
+		wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
+
+/*
+ * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
+ */
+static void __vb2_dqbuf(struct vb2_buffer *vb)
+{
+	struct vb2_queue *q = vb->vb2_queue;
+	unsigned int i;
+
+	/* nothing to do if the buffer is already dequeued */
+	if (vb->state == VB2_BUF_STATE_DEQUEUED)
+		return;
+
+	vb->state = VB2_BUF_STATE_DEQUEUED;
+
+	/* unmap DMABUF buffer */
+	if (q->memory == VB2_MEMORY_DMABUF)
+		for (i = 0; i < vb->num_planes; ++i) {
+			if (!vb->planes[i].dbuf_mapped)
+				continue;
+			call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
+			vb->planes[i].dbuf_mapped = 0;
+		}
+}
+
+int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
+		   bool nonblocking)
+{
+	struct vb2_buffer *vb = NULL;
+	int ret;
+
+	ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
+	if (ret < 0)
+		return ret;
+
+	switch (vb->state) {
+	case VB2_BUF_STATE_DONE:
+		dprintk(3, "returning done buffer\n");
+		break;
+	case VB2_BUF_STATE_ERROR:
+		dprintk(3, "returning done buffer with errors\n");
+		break;
+	default:
+		dprintk(1, "invalid buffer state\n");
+		return -EINVAL;
+	}
+
+	call_void_vb_qop(vb, buf_finish, vb);
+
+	if (pindex)
+		*pindex = vb->index;
+
+	/* Fill buffer information for the userspace */
+	if (pb)
+		call_void_bufop(q, fill_user_buffer, vb, pb);
+
+	/* Remove from videobuf queue */
+	list_del(&vb->queued_entry);
+	q->queued_count--;
+
+	trace_vb2_dqbuf(q, vb);
+
+	/* go back to dequeued state */
+	__vb2_dqbuf(vb);
+
+	dprintk(2, "dqbuf of buffer %d, with state %d\n",
+			vb->index, vb->state);
+
+	return 0;
+
+}
+EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
+
+/*
+ * __vb2_queue_cancel() - cancel and stop (pause) streaming
+ *
+ * Removes all queued buffers from driver's queue and all buffers queued by
+ * userspace from videobuf's queue. Returns to state after reqbufs.
+ */
+static void __vb2_queue_cancel(struct vb2_queue *q)
+{
+	unsigned int i;
+
+	/*
+	 * Tell driver to stop all transactions and release all queued
+	 * buffers.
+	 */
+	if (q->start_streaming_called)
+		call_void_qop(q, stop_streaming, q);
+
+	/*
+	 * If you see this warning, then the driver isn't cleaning up properly
+	 * in stop_streaming(). See the stop_streaming() documentation in
+	 * videobuf2-core.h for more information how buffers should be returned
+	 * to vb2 in stop_streaming().
+	 */
+	if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
+		for (i = 0; i < q->num_buffers; ++i)
+			if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
+				pr_warn("driver bug: stop_streaming operation is leaving buf %p in active state\n",
+					q->bufs[i]);
+				vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
+			}
+		/* Must be zero now */
+		WARN_ON(atomic_read(&q->owned_by_drv_count));
+	}
+
+	q->streaming = 0;
+	q->start_streaming_called = 0;
+	q->queued_count = 0;
+	q->error = 0;
+
+	/*
+	 * Remove all buffers from videobuf's list...
+	 */
+	INIT_LIST_HEAD(&q->queued_list);
+	/*
+	 * ...and done list; userspace will not receive any buffers it
+	 * has not already dequeued before initiating cancel.
+	 */
+	INIT_LIST_HEAD(&q->done_list);
+	atomic_set(&q->owned_by_drv_count, 0);
+	wake_up_all(&q->done_wq);
+
+	/*
+	 * Reinitialize all buffers for next use.
+	 * Make sure to call buf_finish for any queued buffers. Normally
+	 * that's done in dqbuf, but that's not going to happen when we
+	 * cancel the whole queue. Note: this code belongs here, not in
+	 * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical
+	 * call to __fill_user_buffer() after buf_finish(). That order can't
+	 * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
+	 */
+	for (i = 0; i < q->num_buffers; ++i) {
+		struct vb2_buffer *vb = q->bufs[i];
+
+		if (vb->state == VB2_BUF_STATE_PREPARED ||
+		    vb->state == VB2_BUF_STATE_QUEUED) {
+			unsigned int plane;
+
+			for (plane = 0; plane < vb->num_planes; ++plane)
+				call_void_memop(vb, finish,
+						vb->planes[plane].mem_priv);
+		}
+
+		if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+			vb->state = VB2_BUF_STATE_PREPARED;
+			call_void_vb_qop(vb, buf_finish, vb);
+		}
+		__vb2_dqbuf(vb);
+	}
+}
+
+int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
+{
+	int ret;
+
+	if (type != q->type) {
+		dprintk(1, "invalid stream type\n");
+		return -EINVAL;
+	}
+
+	if (q->streaming) {
+		dprintk(3, "already streaming\n");
+		return 0;
+	}
+
+	if (!q->num_buffers) {
+		dprintk(1, "no buffers have been allocated\n");
+		return -EINVAL;
+	}
+
+	if (q->num_buffers < q->min_buffers_needed) {
+		dprintk(1, "need at least %u allocated buffers\n",
+				q->min_buffers_needed);
+		return -EINVAL;
+	}
+
+	/*
+	 * Tell driver to start streaming provided sufficient buffers
+	 * are available.
+	 */
+	if (q->queued_count >= q->min_buffers_needed) {
+		ret = v4l_vb2q_enable_media_source(q);
+		if (ret)
+			return ret;
+		ret = vb2_start_streaming(q);
+		if (ret)
+			return ret;
+	}
+
+	q->streaming = 1;
+
+	dprintk(3, "successful\n");
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_streamon);
+
+void vb2_queue_error(struct vb2_queue *q)
+{
+	q->error = 1;
+
+	wake_up_all(&q->done_wq);
+}
+EXPORT_SYMBOL_GPL(vb2_queue_error);
+
+int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
+{
+	if (type != q->type) {
+		dprintk(1, "invalid stream type\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Cancel will pause streaming and remove all buffers from the driver
+	 * and videobuf, effectively returning control over them to userspace.
+	 *
+	 * Note that we do this even if q->streaming == 0: if you prepare or
+	 * queue buffers, and then call streamoff without ever having called
+	 * streamon, you would still expect those buffers to be returned to
+	 * their normal dequeued state.
+	 */
+	__vb2_queue_cancel(q);
+	q->waiting_for_buffers = !q->is_output;
+	q->last_buffer_dequeued = false;
+
+	dprintk(3, "successful\n");
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_streamoff);
+
+/*
+ * __find_plane_by_offset() - find plane associated with the given offset off
+ */
+static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
+			unsigned int *_buffer, unsigned int *_plane)
+{
+	struct vb2_buffer *vb;
+	unsigned int buffer, plane;
+
+	/*
+	 * Go over all buffers and their planes, comparing the given offset
+	 * with an offset assigned to each plane. If a match is found,
+	 * return its buffer and plane numbers.
+	 */
+	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+		vb = q->bufs[buffer];
+
+		for (plane = 0; plane < vb->num_planes; ++plane) {
+			if (vb->planes[plane].m.offset == off) {
+				*_buffer = buffer;
+				*_plane = plane;
+				return 0;
+			}
+		}
+	}
+
+	return -EINVAL;
+}
+
+int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
+		unsigned int index, unsigned int plane, unsigned int flags)
+{
+	struct vb2_buffer *vb = NULL;
+	struct vb2_plane *vb_plane;
+	int ret;
+	struct dma_buf *dbuf;
+
+	if (q->memory != VB2_MEMORY_MMAP) {
+		dprintk(1, "queue is not currently set up for mmap\n");
+		return -EINVAL;
+	}
+
+	if (!q->mem_ops->get_dmabuf) {
+		dprintk(1, "queue does not support DMA buffer exporting\n");
+		return -EINVAL;
+	}
+
+	if (flags & ~(O_CLOEXEC | O_ACCMODE)) {
+		dprintk(1, "queue does support only O_CLOEXEC and access mode flags\n");
+		return -EINVAL;
+	}
+
+	if (type != q->type) {
+		dprintk(1, "invalid buffer type\n");
+		return -EINVAL;
+	}
+
+	if (index >= q->num_buffers) {
+		dprintk(1, "buffer index out of range\n");
+		return -EINVAL;
+	}
+
+	vb = q->bufs[index];
+
+	if (plane >= vb->num_planes) {
+		dprintk(1, "buffer plane out of range\n");
+		return -EINVAL;
+	}
+
+	if (vb2_fileio_is_active(q)) {
+		dprintk(1, "expbuf: file io in progress\n");
+		return -EBUSY;
+	}
+
+	vb_plane = &vb->planes[plane];
+
+	dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv,
+				flags & O_ACCMODE);
+	if (IS_ERR_OR_NULL(dbuf)) {
+		dprintk(1, "failed to export buffer %d, plane %d\n",
+			index, plane);
+		return -EINVAL;
+	}
+
+	ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE);
+	if (ret < 0) {
+		dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
+			index, plane, ret);
+		dma_buf_put(dbuf);
+		return ret;
+	}
+
+	dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
+		index, plane, ret);
+	*fd = ret;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_expbuf);
+
+int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
+{
+	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+	struct vb2_buffer *vb;
+	unsigned int buffer = 0, plane = 0;
+	int ret;
+	unsigned long length;
+
+	if (q->memory != VB2_MEMORY_MMAP) {
+		dprintk(1, "queue is not currently set up for mmap\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Check memory area access mode.
+	 */
+	if (!(vma->vm_flags & VM_SHARED)) {
+		dprintk(1, "invalid vma flags, VM_SHARED needed\n");
+		return -EINVAL;
+	}
+	if (q->is_output) {
+		if (!(vma->vm_flags & VM_WRITE)) {
+			dprintk(1, "invalid vma flags, VM_WRITE needed\n");
+			return -EINVAL;
+		}
+	} else {
+		if (!(vma->vm_flags & VM_READ)) {
+			dprintk(1, "invalid vma flags, VM_READ needed\n");
+			return -EINVAL;
+		}
+	}
+	if (vb2_fileio_is_active(q)) {
+		dprintk(1, "mmap: file io in progress\n");
+		return -EBUSY;
+	}
+
+	/*
+	 * Find the plane corresponding to the offset passed by userspace.
+	 */
+	ret = __find_plane_by_offset(q, off, &buffer, &plane);
+	if (ret)
+		return ret;
+
+	vb = q->bufs[buffer];
+
+	/*
+	 * MMAP requires page_aligned buffers.
+	 * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
+	 * so, we need to do the same here.
+	 */
+	length = PAGE_ALIGN(vb->planes[plane].length);
+	if (length < (vma->vm_end - vma->vm_start)) {
+		dprintk(1,
+			"MMAP invalid, as it would overflow buffer length\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&q->mmap_lock);
+	ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
+	mutex_unlock(&q->mmap_lock);
+	if (ret)
+		return ret;
+
+	dprintk(3, "buffer %d, plane %d successfully mapped\n", buffer, plane);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_mmap);
+
+#ifndef CONFIG_MMU
+unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
+				    unsigned long addr,
+				    unsigned long len,
+				    unsigned long pgoff,
+				    unsigned long flags)
+{
+	unsigned long off = pgoff << PAGE_SHIFT;
+	struct vb2_buffer *vb;
+	unsigned int buffer, plane;
+	void *vaddr;
+	int ret;
+
+	if (q->memory != VB2_MEMORY_MMAP) {
+		dprintk(1, "queue is not currently set up for mmap\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Find the plane corresponding to the offset passed by userspace.
+	 */
+	ret = __find_plane_by_offset(q, off, &buffer, &plane);
+	if (ret)
+		return ret;
+
+	vb = q->bufs[buffer];
+
+	vaddr = vb2_plane_vaddr(vb, plane);
+	return vaddr ? (unsigned long)vaddr : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
+#endif
+
+int vb2_core_queue_init(struct vb2_queue *q)
+{
+	/*
+	 * Sanity check
+	 */
+	if (WARN_ON(!q)			  ||
+	    WARN_ON(!q->ops)		  ||
+	    WARN_ON(!q->mem_ops)	  ||
+	    WARN_ON(!q->type)		  ||
+	    WARN_ON(!q->io_modes)	  ||
+	    WARN_ON(!q->ops->queue_setup) ||
+	    WARN_ON(!q->ops->buf_queue))
+		return -EINVAL;
+
+	INIT_LIST_HEAD(&q->queued_list);
+	INIT_LIST_HEAD(&q->done_list);
+	spin_lock_init(&q->done_lock);
+	mutex_init(&q->mmap_lock);
+	init_waitqueue_head(&q->done_wq);
+
+	q->memory = VB2_MEMORY_UNKNOWN;
+
+	if (q->buf_struct_size == 0)
+		q->buf_struct_size = sizeof(struct vb2_buffer);
+
+	if (q->bidirectional)
+		q->dma_dir = DMA_BIDIRECTIONAL;
+	else
+		q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_queue_init);
+
+static int __vb2_init_fileio(struct vb2_queue *q, int read);
+static int __vb2_cleanup_fileio(struct vb2_queue *q);
+void vb2_core_queue_release(struct vb2_queue *q)
+{
+	__vb2_cleanup_fileio(q);
+	__vb2_queue_cancel(q);
+	mutex_lock(&q->mmap_lock);
+	__vb2_queue_free(q, q->num_buffers);
+	mutex_unlock(&q->mmap_lock);
+}
+EXPORT_SYMBOL_GPL(vb2_core_queue_release);
+
+__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
+		poll_table *wait)
+{
+	__poll_t req_events = poll_requested_events(wait);
+	struct vb2_buffer *vb = NULL;
+	unsigned long flags;
+
+	if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM)))
+		return 0;
+	if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM)))
+		return 0;
+
+	/*
+	 * Start file I/O emulator only if streaming API has not been used yet.
+	 */
+	if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
+		if (!q->is_output && (q->io_modes & VB2_READ) &&
+				(req_events & (EPOLLIN | EPOLLRDNORM))) {
+			if (__vb2_init_fileio(q, 1))
+				return EPOLLERR;
+		}
+		if (q->is_output && (q->io_modes & VB2_WRITE) &&
+				(req_events & (EPOLLOUT | EPOLLWRNORM))) {
+			if (__vb2_init_fileio(q, 0))
+				return EPOLLERR;
+			/*
+			 * Write to OUTPUT queue can be done immediately.
+			 */
+			return EPOLLOUT | EPOLLWRNORM;
+		}
+	}
+
+	/*
+	 * There is nothing to wait for if the queue isn't streaming, or if the
+	 * error flag is set.
+	 */
+	if (!vb2_is_streaming(q) || q->error)
+		return EPOLLERR;
+
+	/*
+	 * If this quirk is set and QBUF hasn't been called yet then
+	 * return EPOLLERR as well. This only affects capture queues, output
+	 * queues will always initialize waiting_for_buffers to false.
+	 * This quirk is set by V4L2 for backwards compatibility reasons.
+	 */
+	if (q->quirk_poll_must_check_waiting_for_buffers &&
+	    q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM)))
+		return EPOLLERR;
+
+	/*
+	 * For output streams you can call write() as long as there are fewer
+	 * buffers queued than there are buffers available.
+	 */
+	if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
+		return EPOLLOUT | EPOLLWRNORM;
+
+	if (list_empty(&q->done_list)) {
+		/*
+		 * If the last buffer was dequeued from a capture queue,
+		 * return immediately. DQBUF will return -EPIPE.
+		 */
+		if (q->last_buffer_dequeued)
+			return EPOLLIN | EPOLLRDNORM;
+
+		poll_wait(file, &q->done_wq, wait);
+	}
+
+	/*
+	 * Take first buffer available for dequeuing.
+	 */
+	spin_lock_irqsave(&q->done_lock, flags);
+	if (!list_empty(&q->done_list))
+		vb = list_first_entry(&q->done_list, struct vb2_buffer,
+					done_entry);
+	spin_unlock_irqrestore(&q->done_lock, flags);
+
+	if (vb && (vb->state == VB2_BUF_STATE_DONE
+			|| vb->state == VB2_BUF_STATE_ERROR)) {
+		return (q->is_output) ?
+				EPOLLOUT | EPOLLWRNORM :
+				EPOLLIN | EPOLLRDNORM;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_poll);
+
+/*
+ * struct vb2_fileio_buf - buffer context used by file io emulator
+ *
+ * vb2 provides a compatibility layer and emulator of file io (read and
+ * write) calls on top of streaming API. This structure is used for
+ * tracking context related to the buffers.
+ */
+struct vb2_fileio_buf {
+	void *vaddr;
+	unsigned int size;
+	unsigned int pos;
+	unsigned int queued:1;
+};
+
+/*
+ * struct vb2_fileio_data - queue context used by file io emulator
+ *
+ * @cur_index:	the index of the buffer currently being read from or
+ *		written to. If equal to q->num_buffers then a new buffer
+ *		must be dequeued.
+ * @initial_index: in the read() case all buffers are queued up immediately
+ *		in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
+ *		buffers. However, in the write() case no buffers are initially
+ *		queued, instead whenever a buffer is full it is queued up by
+ *		__vb2_perform_fileio(). Only once all available buffers have
+ *		been queued up will __vb2_perform_fileio() start to dequeue
+ *		buffers. This means that initially __vb2_perform_fileio()
+ *		needs to know what buffer index to use when it is queuing up
+ *		the buffers for the first time. That initial index is stored
+ *		in this field. Once it is equal to q->num_buffers all
+ *		available buffers have been queued and __vb2_perform_fileio()
+ *		should start the normal dequeue/queue cycle.
+ *
+ * vb2 provides a compatibility layer and emulator of file io (read and
+ * write) calls on top of streaming API. For proper operation it required
+ * this structure to save the driver state between each call of the read
+ * or write function.
+ */
+struct vb2_fileio_data {
+	unsigned int count;
+	unsigned int type;
+	unsigned int memory;
+	struct vb2_fileio_buf bufs[VB2_MAX_FRAME];
+	unsigned int cur_index;
+	unsigned int initial_index;
+	unsigned int q_count;
+	unsigned int dq_count;
+	unsigned read_once:1;
+	unsigned write_immediately:1;
+};
+
+/*
+ * __vb2_init_fileio() - initialize file io emulator
+ * @q:		videobuf2 queue
+ * @read:	mode selector (1 means read, 0 means write)
+ */
+static int __vb2_init_fileio(struct vb2_queue *q, int read)
+{
+	struct vb2_fileio_data *fileio;
+	int i, ret;
+	unsigned int count = 0;
+
+	/*
+	 * Sanity check
+	 */
+	if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
+		    (!read && !(q->io_modes & VB2_WRITE))))
+		return -EINVAL;
+
+	/*
+	 * Check if device supports mapping buffers to kernel virtual space.
+	 */
+	if (!q->mem_ops->vaddr)
+		return -EBUSY;
+
+	/*
+	 * Check if streaming api has not been already activated.
+	 */
+	if (q->streaming || q->num_buffers > 0)
+		return -EBUSY;
+
+	/*
+	 * Start with count 1, driver can increase it in queue_setup()
+	 */
+	count = 1;
+
+	dprintk(3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
+		(read) ? "read" : "write", count, q->fileio_read_once,
+		q->fileio_write_immediately);
+
+	fileio = kzalloc(sizeof(*fileio), GFP_KERNEL);
+	if (fileio == NULL)
+		return -ENOMEM;
+
+	fileio->read_once = q->fileio_read_once;
+	fileio->write_immediately = q->fileio_write_immediately;
+
+	/*
+	 * Request buffers and use MMAP type to force driver
+	 * to allocate buffers by itself.
+	 */
+	fileio->count = count;
+	fileio->memory = VB2_MEMORY_MMAP;
+	fileio->type = q->type;
+	q->fileio = fileio;
+	ret = vb2_core_reqbufs(q, fileio->memory, &fileio->count);
+	if (ret)
+		goto err_kfree;
+
+	/*
+	 * Check if plane_count is correct
+	 * (multiplane buffers are not supported).
+	 */
+	if (q->bufs[0]->num_planes != 1) {
+		ret = -EBUSY;
+		goto err_reqbufs;
+	}
+
+	/*
+	 * Get kernel address of each buffer.
+	 */
+	for (i = 0; i < q->num_buffers; i++) {
+		fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
+		if (fileio->bufs[i].vaddr == NULL) {
+			ret = -EINVAL;
+			goto err_reqbufs;
+		}
+		fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
+	}
+
+	/*
+	 * Read mode requires pre queuing of all buffers.
+	 */
+	if (read) {
+		/*
+		 * Queue all buffers.
+		 */
+		for (i = 0; i < q->num_buffers; i++) {
+			ret = vb2_core_qbuf(q, i, NULL);
+			if (ret)
+				goto err_reqbufs;
+			fileio->bufs[i].queued = 1;
+		}
+		/*
+		 * All buffers have been queued, so mark that by setting
+		 * initial_index to q->num_buffers
+		 */
+		fileio->initial_index = q->num_buffers;
+		fileio->cur_index = q->num_buffers;
+	}
+
+	/*
+	 * Start streaming.
+	 */
+	ret = vb2_core_streamon(q, q->type);
+	if (ret)
+		goto err_reqbufs;
+
+	return ret;
+
+err_reqbufs:
+	fileio->count = 0;
+	vb2_core_reqbufs(q, fileio->memory, &fileio->count);
+
+err_kfree:
+	q->fileio = NULL;
+	kfree(fileio);
+	return ret;
+}
+
+/*
+ * __vb2_cleanup_fileio() - free resourced used by file io emulator
+ * @q:		videobuf2 queue
+ */
+static int __vb2_cleanup_fileio(struct vb2_queue *q)
+{
+	struct vb2_fileio_data *fileio = q->fileio;
+
+	if (fileio) {
+		vb2_core_streamoff(q, q->type);
+		q->fileio = NULL;
+		fileio->count = 0;
+		vb2_core_reqbufs(q, fileio->memory, &fileio->count);
+		kfree(fileio);
+		dprintk(3, "file io emulator closed\n");
+	}
+	return 0;
+}
+
+/*
+ * __vb2_perform_fileio() - perform a single file io (read or write) operation
+ * @q:		videobuf2 queue
+ * @data:	pointed to target userspace buffer
+ * @count:	number of bytes to read or write
+ * @ppos:	file handle position tracking pointer
+ * @nonblock:	mode selector (1 means blocking calls, 0 means nonblocking)
+ * @read:	access mode selector (1 means read, 0 means write)
+ */
+static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
+		loff_t *ppos, int nonblock, int read)
+{
+	struct vb2_fileio_data *fileio;
+	struct vb2_fileio_buf *buf;
+	bool is_multiplanar = q->is_multiplanar;
+	/*
+	 * When using write() to write data to an output video node the vb2 core
+	 * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
+	 * else is able to provide this information with the write() operation.
+	 */
+	bool copy_timestamp = !read && q->copy_timestamp;
+	unsigned index;
+	int ret;
+
+	dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
+		read ? "read" : "write", (long)*ppos, count,
+		nonblock ? "non" : "");
+
+	if (!data)
+		return -EINVAL;
+
+	/*
+	 * Initialize emulator on first call.
+	 */
+	if (!vb2_fileio_is_active(q)) {
+		ret = __vb2_init_fileio(q, read);
+		dprintk(3, "vb2_init_fileio result: %d\n", ret);
+		if (ret)
+			return ret;
+	}
+	fileio = q->fileio;
+
+	/*
+	 * Check if we need to dequeue the buffer.
+	 */
+	index = fileio->cur_index;
+	if (index >= q->num_buffers) {
+		struct vb2_buffer *b;
+
+		/*
+		 * Call vb2_dqbuf to get buffer back.
+		 */
+		ret = vb2_core_dqbuf(q, &index, NULL, nonblock);
+		dprintk(5, "vb2_dqbuf result: %d\n", ret);
+		if (ret)
+			return ret;
+		fileio->dq_count += 1;
+
+		fileio->cur_index = index;
+		buf = &fileio->bufs[index];
+		b = q->bufs[index];
+
+		/*
+		 * Get number of bytes filled by the driver
+		 */
+		buf->pos = 0;
+		buf->queued = 0;
+		buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
+				 : vb2_plane_size(q->bufs[index], 0);
+		/* Compensate for data_offset on read in the multiplanar case. */
+		if (is_multiplanar && read &&
+				b->planes[0].data_offset < buf->size) {
+			buf->pos = b->planes[0].data_offset;
+			buf->size -= buf->pos;
+		}
+	} else {
+		buf = &fileio->bufs[index];
+	}
+
+	/*
+	 * Limit count on last few bytes of the buffer.
+	 */
+	if (buf->pos + count > buf->size) {
+		count = buf->size - buf->pos;
+		dprintk(5, "reducing read count: %zd\n", count);
+	}
+
+	/*
+	 * Transfer data to userspace.
+	 */
+	dprintk(3, "copying %zd bytes - buffer %d, offset %u\n",
+		count, index, buf->pos);
+	if (read)
+		ret = copy_to_user(data, buf->vaddr + buf->pos, count);
+	else
+		ret = copy_from_user(buf->vaddr + buf->pos, data, count);
+	if (ret) {
+		dprintk(3, "error copying data\n");
+		return -EFAULT;
+	}
+
+	/*
+	 * Update counters.
+	 */
+	buf->pos += count;
+	*ppos += count;
+
+	/*
+	 * Queue next buffer if required.
+	 */
+	if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
+		struct vb2_buffer *b = q->bufs[index];
+
+		/*
+		 * Check if this is the last buffer to read.
+		 */
+		if (read && fileio->read_once && fileio->dq_count == 1) {
+			dprintk(3, "read limit reached\n");
+			return __vb2_cleanup_fileio(q);
+		}
+
+		/*
+		 * Call vb2_qbuf and give buffer to the driver.
+		 */
+		b->planes[0].bytesused = buf->pos;
+
+		if (copy_timestamp)
+			b->timestamp = ktime_get_ns();
+		ret = vb2_core_qbuf(q, index, NULL);
+		dprintk(5, "vb2_dbuf result: %d\n", ret);
+		if (ret)
+			return ret;
+
+		/*
+		 * Buffer has been queued, update the status
+		 */
+		buf->pos = 0;
+		buf->queued = 1;
+		buf->size = vb2_plane_size(q->bufs[index], 0);
+		fileio->q_count += 1;
+		/*
+		 * If we are queuing up buffers for the first time, then
+		 * increase initial_index by one.
+		 */
+		if (fileio->initial_index < q->num_buffers)
+			fileio->initial_index++;
+		/*
+		 * The next buffer to use is either a buffer that's going to be
+		 * queued for the first time (initial_index < q->num_buffers)
+		 * or it is equal to q->num_buffers, meaning that the next
+		 * time we need to dequeue a buffer since we've now queued up
+		 * all the 'first time' buffers.
+		 */
+		fileio->cur_index = fileio->initial_index;
+	}
+
+	/*
+	 * Return proper number of bytes processed.
+	 */
+	if (ret == 0)
+		ret = count;
+	return ret;
+}
+
+size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
+		loff_t *ppos, int nonblocking)
+{
+	return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
+}
+EXPORT_SYMBOL_GPL(vb2_read);
+
+size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
+		loff_t *ppos, int nonblocking)
+{
+	return __vb2_perform_fileio(q, (char __user *) data, count,
+							ppos, nonblocking, 0);
+}
+EXPORT_SYMBOL_GPL(vb2_write);
+
+struct vb2_threadio_data {
+	struct task_struct *thread;
+	vb2_thread_fnc fnc;
+	void *priv;
+	bool stop;
+};
+
+static int vb2_thread(void *data)
+{
+	struct vb2_queue *q = data;
+	struct vb2_threadio_data *threadio = q->threadio;
+	bool copy_timestamp = false;
+	unsigned prequeue = 0;
+	unsigned index = 0;
+	int ret = 0;
+
+	if (q->is_output) {
+		prequeue = q->num_buffers;
+		copy_timestamp = q->copy_timestamp;
+	}
+
+	set_freezable();
+
+	for (;;) {
+		struct vb2_buffer *vb;
+
+		/*
+		 * Call vb2_dqbuf to get buffer back.
+		 */
+		if (prequeue) {
+			vb = q->bufs[index++];
+			prequeue--;
+		} else {
+			call_void_qop(q, wait_finish, q);
+			if (!threadio->stop)
+				ret = vb2_core_dqbuf(q, &index, NULL, 0);
+			call_void_qop(q, wait_prepare, q);
+			dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
+			if (!ret)
+				vb = q->bufs[index];
+		}
+		if (ret || threadio->stop)
+			break;
+		try_to_freeze();
+
+		if (vb->state != VB2_BUF_STATE_ERROR)
+			if (threadio->fnc(vb, threadio->priv))
+				break;
+		call_void_qop(q, wait_finish, q);
+		if (copy_timestamp)
+			vb->timestamp = ktime_get_ns();
+		if (!threadio->stop)
+			ret = vb2_core_qbuf(q, vb->index, NULL);
+		call_void_qop(q, wait_prepare, q);
+		if (ret || threadio->stop)
+			break;
+	}
+
+	/* Hmm, linux becomes *very* unhappy without this ... */
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule();
+	}
+	return 0;
+}
+
+/*
+ * This function should not be used for anything else but the videobuf2-dvb
+ * support. If you think you have another good use-case for this, then please
+ * contact the linux-media mailinglist first.
+ */
+int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
+		     const char *thread_name)
+{
+	struct vb2_threadio_data *threadio;
+	int ret = 0;
+
+	if (q->threadio)
+		return -EBUSY;
+	if (vb2_is_busy(q))
+		return -EBUSY;
+	if (WARN_ON(q->fileio))
+		return -EBUSY;
+
+	threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
+	if (threadio == NULL)
+		return -ENOMEM;
+	threadio->fnc = fnc;
+	threadio->priv = priv;
+
+	ret = __vb2_init_fileio(q, !q->is_output);
+	dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
+	if (ret)
+		goto nomem;
+	q->threadio = threadio;
+	threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
+	if (IS_ERR(threadio->thread)) {
+		ret = PTR_ERR(threadio->thread);
+		threadio->thread = NULL;
+		goto nothread;
+	}
+	return 0;
+
+nothread:
+	__vb2_cleanup_fileio(q);
+nomem:
+	kfree(threadio);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(vb2_thread_start);
+
+int vb2_thread_stop(struct vb2_queue *q)
+{
+	struct vb2_threadio_data *threadio = q->threadio;
+	int err;
+
+	if (threadio == NULL)
+		return 0;
+	threadio->stop = true;
+	/* Wake up all pending sleeps in the thread */
+	vb2_queue_error(q);
+	err = kthread_stop(threadio->thread);
+	__vb2_cleanup_fileio(q);
+	threadio->thread = NULL;
+	kfree(threadio);
+	q->threadio = NULL;
+	return err;
+}
+EXPORT_SYMBOL_GPL(vb2_thread_stop);
+
+MODULE_DESCRIPTION("Media buffer core framework");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
new file mode 100644
index 0000000..aff0ab7
--- /dev/null
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -0,0 +1,786 @@
+/*
+ * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/refcount.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-memops.h>
+
+struct vb2_dc_buf {
+	struct device			*dev;
+	void				*vaddr;
+	unsigned long			size;
+	void				*cookie;
+	dma_addr_t			dma_addr;
+	unsigned long			attrs;
+	enum dma_data_direction		dma_dir;
+	struct sg_table			*dma_sgt;
+	struct frame_vector		*vec;
+
+	/* MMAP related */
+	struct vb2_vmarea_handler	handler;
+	refcount_t			refcount;
+	struct sg_table			*sgt_base;
+
+	/* DMABUF related */
+	struct dma_buf_attachment	*db_attach;
+};
+
+/*********************************************/
+/*        scatterlist table functions        */
+/*********************************************/
+
+static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
+{
+	struct scatterlist *s;
+	dma_addr_t expected = sg_dma_address(sgt->sgl);
+	unsigned int i;
+	unsigned long size = 0;
+
+	for_each_sg(sgt->sgl, s, sgt->nents, i) {
+		if (sg_dma_address(s) != expected)
+			break;
+		expected = sg_dma_address(s) + sg_dma_len(s);
+		size += sg_dma_len(s);
+	}
+	return size;
+}
+
+/*********************************************/
+/*         callbacks for all buffers         */
+/*********************************************/
+
+static void *vb2_dc_cookie(void *buf_priv)
+{
+	struct vb2_dc_buf *buf = buf_priv;
+
+	return &buf->dma_addr;
+}
+
+static void *vb2_dc_vaddr(void *buf_priv)
+{
+	struct vb2_dc_buf *buf = buf_priv;
+
+	if (!buf->vaddr && buf->db_attach)
+		buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
+
+	return buf->vaddr;
+}
+
+static unsigned int vb2_dc_num_users(void *buf_priv)
+{
+	struct vb2_dc_buf *buf = buf_priv;
+
+	return refcount_read(&buf->refcount);
+}
+
+static void vb2_dc_prepare(void *buf_priv)
+{
+	struct vb2_dc_buf *buf = buf_priv;
+	struct sg_table *sgt = buf->dma_sgt;
+
+	/* DMABUF exporter will flush the cache for us */
+	if (!sgt || buf->db_attach)
+		return;
+
+	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
+			       buf->dma_dir);
+}
+
+static void vb2_dc_finish(void *buf_priv)
+{
+	struct vb2_dc_buf *buf = buf_priv;
+	struct sg_table *sgt = buf->dma_sgt;
+
+	/* DMABUF exporter will flush the cache for us */
+	if (!sgt || buf->db_attach)
+		return;
+
+	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+}
+
+/*********************************************/
+/*        callbacks for MMAP buffers         */
+/*********************************************/
+
+static void vb2_dc_put(void *buf_priv)
+{
+	struct vb2_dc_buf *buf = buf_priv;
+
+	if (!refcount_dec_and_test(&buf->refcount))
+		return;
+
+	if (buf->sgt_base) {
+		sg_free_table(buf->sgt_base);
+		kfree(buf->sgt_base);
+	}
+	dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
+		       buf->attrs);
+	put_device(buf->dev);
+	kfree(buf);
+}
+
+static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
+			  unsigned long size, enum dma_data_direction dma_dir,
+			  gfp_t gfp_flags)
+{
+	struct vb2_dc_buf *buf;
+
+	if (WARN_ON(!dev))
+		return ERR_PTR(-EINVAL);
+
+	buf = kzalloc(sizeof *buf, GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	if (attrs)
+		buf->attrs = attrs;
+	buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
+					GFP_KERNEL | gfp_flags, buf->attrs);
+	if (!buf->cookie) {
+		dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
+		kfree(buf);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
+		buf->vaddr = buf->cookie;
+
+	/* Prevent the device from being released while the buffer is used */
+	buf->dev = get_device(dev);
+	buf->size = size;
+	buf->dma_dir = dma_dir;
+
+	buf->handler.refcount = &buf->refcount;
+	buf->handler.put = vb2_dc_put;
+	buf->handler.arg = buf;
+
+	refcount_set(&buf->refcount, 1);
+
+	return buf;
+}
+
+static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+	struct vb2_dc_buf *buf = buf_priv;
+	int ret;
+
+	if (!buf) {
+		printk(KERN_ERR "No buffer to map\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
+	 * map whole buffer
+	 */
+	vma->vm_pgoff = 0;
+
+	ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
+		buf->dma_addr, buf->size, buf->attrs);
+
+	if (ret) {
+		pr_err("Remapping memory failed, error: %d\n", ret);
+		return ret;
+	}
+
+	vma->vm_flags		|= VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_private_data	= &buf->handler;
+	vma->vm_ops		= &vb2_common_vm_ops;
+
+	vma->vm_ops->open(vma);
+
+	pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
+		__func__, (unsigned long)buf->dma_addr, vma->vm_start,
+		buf->size);
+
+	return 0;
+}
+
+/*********************************************/
+/*         DMABUF ops for exporters          */
+/*********************************************/
+
+struct vb2_dc_attachment {
+	struct sg_table sgt;
+	enum dma_data_direction dma_dir;
+};
+
+static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
+	struct dma_buf_attachment *dbuf_attach)
+{
+	struct vb2_dc_attachment *attach;
+	unsigned int i;
+	struct scatterlist *rd, *wr;
+	struct sg_table *sgt;
+	struct vb2_dc_buf *buf = dbuf->priv;
+	int ret;
+
+	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+	if (!attach)
+		return -ENOMEM;
+
+	sgt = &attach->sgt;
+	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
+	 * map the same scatter list to multiple attachments at the same time.
+	 */
+	ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
+	if (ret) {
+		kfree(attach);
+		return -ENOMEM;
+	}
+
+	rd = buf->sgt_base->sgl;
+	wr = sgt->sgl;
+	for (i = 0; i < sgt->orig_nents; ++i) {
+		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+		rd = sg_next(rd);
+		wr = sg_next(wr);
+	}
+
+	attach->dma_dir = DMA_NONE;
+	dbuf_attach->priv = attach;
+
+	return 0;
+}
+
+static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
+	struct dma_buf_attachment *db_attach)
+{
+	struct vb2_dc_attachment *attach = db_attach->priv;
+	struct sg_table *sgt;
+
+	if (!attach)
+		return;
+
+	sgt = &attach->sgt;
+
+	/* release the scatterlist cache */
+	if (attach->dma_dir != DMA_NONE)
+		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+			attach->dma_dir);
+	sg_free_table(sgt);
+	kfree(attach);
+	db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_dc_dmabuf_ops_map(
+	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
+{
+	struct vb2_dc_attachment *attach = db_attach->priv;
+	/* stealing dmabuf mutex to serialize map/unmap operations */
+	struct mutex *lock = &db_attach->dmabuf->lock;
+	struct sg_table *sgt;
+
+	mutex_lock(lock);
+
+	sgt = &attach->sgt;
+	/* return previously mapped sg table */
+	if (attach->dma_dir == dma_dir) {
+		mutex_unlock(lock);
+		return sgt;
+	}
+
+	/* release any previous cache */
+	if (attach->dma_dir != DMA_NONE) {
+		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+			attach->dma_dir);
+		attach->dma_dir = DMA_NONE;
+	}
+
+	/* mapping to the client with new direction */
+	sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+				dma_dir);
+	if (!sgt->nents) {
+		pr_err("failed to map scatterlist\n");
+		mutex_unlock(lock);
+		return ERR_PTR(-EIO);
+	}
+
+	attach->dma_dir = dma_dir;
+
+	mutex_unlock(lock);
+
+	return sgt;
+}
+
+static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+	struct sg_table *sgt, enum dma_data_direction dma_dir)
+{
+	/* nothing to be done here */
+}
+
+static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+	/* drop reference obtained in vb2_dc_get_dmabuf */
+	vb2_dc_put(dbuf->priv);
+}
+
+static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
+{
+	struct vb2_dc_buf *buf = dbuf->priv;
+
+	return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
+}
+
+static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+{
+	struct vb2_dc_buf *buf = dbuf->priv;
+
+	return buf->vaddr;
+}
+
+static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
+	struct vm_area_struct *vma)
+{
+	return vb2_dc_mmap(dbuf->priv, vma);
+}
+
+static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
+	.attach = vb2_dc_dmabuf_ops_attach,
+	.detach = vb2_dc_dmabuf_ops_detach,
+	.map_dma_buf = vb2_dc_dmabuf_ops_map,
+	.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
+	.map = vb2_dc_dmabuf_ops_kmap,
+	.vmap = vb2_dc_dmabuf_ops_vmap,
+	.mmap = vb2_dc_dmabuf_ops_mmap,
+	.release = vb2_dc_dmabuf_ops_release,
+};
+
+static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
+{
+	int ret;
+	struct sg_table *sgt;
+
+	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+	if (!sgt) {
+		dev_err(buf->dev, "failed to alloc sg table\n");
+		return NULL;
+	}
+
+	ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
+		buf->size, buf->attrs);
+	if (ret < 0) {
+		dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
+		kfree(sgt);
+		return NULL;
+	}
+
+	return sgt;
+}
+
+static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
+{
+	struct vb2_dc_buf *buf = buf_priv;
+	struct dma_buf *dbuf;
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+	exp_info.ops = &vb2_dc_dmabuf_ops;
+	exp_info.size = buf->size;
+	exp_info.flags = flags;
+	exp_info.priv = buf;
+
+	if (!buf->sgt_base)
+		buf->sgt_base = vb2_dc_get_base_sgt(buf);
+
+	if (WARN_ON(!buf->sgt_base))
+		return NULL;
+
+	dbuf = dma_buf_export(&exp_info);
+	if (IS_ERR(dbuf))
+		return NULL;
+
+	/* dmabuf keeps reference to vb2 buffer */
+	refcount_inc(&buf->refcount);
+
+	return dbuf;
+}
+
+/*********************************************/
+/*       callbacks for USERPTR buffers       */
+/*********************************************/
+
+static void vb2_dc_put_userptr(void *buf_priv)
+{
+	struct vb2_dc_buf *buf = buf_priv;
+	struct sg_table *sgt = buf->dma_sgt;
+	int i;
+	struct page **pages;
+
+	if (sgt) {
+		/*
+		 * No need to sync to CPU, it's already synced to the CPU
+		 * since the finish() memop will have been called before this.
+		 */
+		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+				   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+		pages = frame_vector_pages(buf->vec);
+		/* sgt should exist only if vector contains pages... */
+		BUG_ON(IS_ERR(pages));
+		if (buf->dma_dir == DMA_FROM_DEVICE ||
+		    buf->dma_dir == DMA_BIDIRECTIONAL)
+			for (i = 0; i < frame_vector_count(buf->vec); i++)
+				set_page_dirty_lock(pages[i]);
+		sg_free_table(sgt);
+		kfree(sgt);
+	}
+	vb2_destroy_framevec(buf->vec);
+	kfree(buf);
+}
+
+/*
+ * For some kind of reserved memory there might be no struct page available,
+ * so all that can be done to support such 'pages' is to try to convert
+ * pfn to dma address or at the last resort just assume that
+ * dma address == physical address (like it has been assumed in earlier version
+ * of videobuf2-dma-contig
+ */
+
+#ifdef __arch_pfn_to_dma
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+	return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
+}
+#elif defined(__pfn_to_bus)
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+	return (dma_addr_t)__pfn_to_bus(pfn);
+}
+#elif defined(__pfn_to_phys)
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+	return (dma_addr_t)__pfn_to_phys(pfn);
+}
+#else
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+	/* really, we cannot do anything better at this point */
+	return (dma_addr_t)(pfn) << PAGE_SHIFT;
+}
+#endif
+
+static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
+	unsigned long size, enum dma_data_direction dma_dir)
+{
+	struct vb2_dc_buf *buf;
+	struct frame_vector *vec;
+	unsigned int offset;
+	int n_pages, i;
+	int ret = 0;
+	struct sg_table *sgt;
+	unsigned long contig_size;
+	unsigned long dma_align = dma_get_cache_alignment();
+
+	/* Only cache aligned DMA transfers are reliable */
+	if (!IS_ALIGNED(vaddr | size, dma_align)) {
+		pr_debug("user data must be aligned to %lu bytes\n", dma_align);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!size) {
+		pr_debug("size is zero\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (WARN_ON(!dev))
+		return ERR_PTR(-EINVAL);
+
+	buf = kzalloc(sizeof *buf, GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	buf->dev = dev;
+	buf->dma_dir = dma_dir;
+
+	offset = lower_32_bits(offset_in_page(vaddr));
+	vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
+					       dma_dir == DMA_BIDIRECTIONAL);
+	if (IS_ERR(vec)) {
+		ret = PTR_ERR(vec);
+		goto fail_buf;
+	}
+	buf->vec = vec;
+	n_pages = frame_vector_count(vec);
+	ret = frame_vector_to_pages(vec);
+	if (ret < 0) {
+		unsigned long *nums = frame_vector_pfns(vec);
+
+		/*
+		 * Failed to convert to pages... Check the memory is physically
+		 * contiguous and use direct mapping
+		 */
+		for (i = 1; i < n_pages; i++)
+			if (nums[i-1] + 1 != nums[i])
+				goto fail_pfnvec;
+		buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
+		goto out;
+	}
+
+	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+	if (!sgt) {
+		pr_err("failed to allocate sg table\n");
+		ret = -ENOMEM;
+		goto fail_pfnvec;
+	}
+
+	ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
+		offset, size, GFP_KERNEL);
+	if (ret) {
+		pr_err("failed to initialize sg table\n");
+		goto fail_sgt;
+	}
+
+	/*
+	 * No need to sync to the device, this will happen later when the
+	 * prepare() memop is called.
+	 */
+	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+				      buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+	if (sgt->nents <= 0) {
+		pr_err("failed to map scatterlist\n");
+		ret = -EIO;
+		goto fail_sgt_init;
+	}
+
+	contig_size = vb2_dc_get_contiguous_size(sgt);
+	if (contig_size < size) {
+		pr_err("contiguous mapping is too small %lu/%lu\n",
+			contig_size, size);
+		ret = -EFAULT;
+		goto fail_map_sg;
+	}
+
+	buf->dma_addr = sg_dma_address(sgt->sgl);
+	buf->dma_sgt = sgt;
+out:
+	buf->size = size;
+
+	return buf;
+
+fail_map_sg:
+	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+			   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+
+fail_sgt_init:
+	sg_free_table(sgt);
+
+fail_sgt:
+	kfree(sgt);
+
+fail_pfnvec:
+	vb2_destroy_framevec(vec);
+
+fail_buf:
+	kfree(buf);
+
+	return ERR_PTR(ret);
+}
+
+/*********************************************/
+/*       callbacks for DMABUF buffers        */
+/*********************************************/
+
+static int vb2_dc_map_dmabuf(void *mem_priv)
+{
+	struct vb2_dc_buf *buf = mem_priv;
+	struct sg_table *sgt;
+	unsigned long contig_size;
+
+	if (WARN_ON(!buf->db_attach)) {
+		pr_err("trying to pin a non attached buffer\n");
+		return -EINVAL;
+	}
+
+	if (WARN_ON(buf->dma_sgt)) {
+		pr_err("dmabuf buffer is already pinned\n");
+		return 0;
+	}
+
+	/* get the associated scatterlist for this buffer */
+	sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
+	if (IS_ERR(sgt)) {
+		pr_err("Error getting dmabuf scatterlist\n");
+		return -EINVAL;
+	}
+
+	/* checking if dmabuf is big enough to store contiguous chunk */
+	contig_size = vb2_dc_get_contiguous_size(sgt);
+	if (contig_size < buf->size) {
+		pr_err("contiguous chunk is too small %lu/%lu b\n",
+			contig_size, buf->size);
+		dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+		return -EFAULT;
+	}
+
+	buf->dma_addr = sg_dma_address(sgt->sgl);
+	buf->dma_sgt = sgt;
+	buf->vaddr = NULL;
+
+	return 0;
+}
+
+static void vb2_dc_unmap_dmabuf(void *mem_priv)
+{
+	struct vb2_dc_buf *buf = mem_priv;
+	struct sg_table *sgt = buf->dma_sgt;
+
+	if (WARN_ON(!buf->db_attach)) {
+		pr_err("trying to unpin a not attached buffer\n");
+		return;
+	}
+
+	if (WARN_ON(!sgt)) {
+		pr_err("dmabuf buffer is already unpinned\n");
+		return;
+	}
+
+	if (buf->vaddr) {
+		dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
+		buf->vaddr = NULL;
+	}
+	dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+
+	buf->dma_addr = 0;
+	buf->dma_sgt = NULL;
+}
+
+static void vb2_dc_detach_dmabuf(void *mem_priv)
+{
+	struct vb2_dc_buf *buf = mem_priv;
+
+	/* if vb2 works correctly you should never detach mapped buffer */
+	if (WARN_ON(buf->dma_addr))
+		vb2_dc_unmap_dmabuf(buf);
+
+	/* detach this attachment */
+	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
+	kfree(buf);
+}
+
+static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
+	unsigned long size, enum dma_data_direction dma_dir)
+{
+	struct vb2_dc_buf *buf;
+	struct dma_buf_attachment *dba;
+
+	if (dbuf->size < size)
+		return ERR_PTR(-EFAULT);
+
+	if (WARN_ON(!dev))
+		return ERR_PTR(-EINVAL);
+
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	buf->dev = dev;
+	/* create attachment for the dmabuf with the user device */
+	dba = dma_buf_attach(dbuf, buf->dev);
+	if (IS_ERR(dba)) {
+		pr_err("failed to attach dmabuf\n");
+		kfree(buf);
+		return dba;
+	}
+
+	buf->dma_dir = dma_dir;
+	buf->size = size;
+	buf->db_attach = dba;
+
+	return buf;
+}
+
+/*********************************************/
+/*       DMA CONTIG exported functions       */
+/*********************************************/
+
+const struct vb2_mem_ops vb2_dma_contig_memops = {
+	.alloc		= vb2_dc_alloc,
+	.put		= vb2_dc_put,
+	.get_dmabuf	= vb2_dc_get_dmabuf,
+	.cookie		= vb2_dc_cookie,
+	.vaddr		= vb2_dc_vaddr,
+	.mmap		= vb2_dc_mmap,
+	.get_userptr	= vb2_dc_get_userptr,
+	.put_userptr	= vb2_dc_put_userptr,
+	.prepare	= vb2_dc_prepare,
+	.finish		= vb2_dc_finish,
+	.map_dmabuf	= vb2_dc_map_dmabuf,
+	.unmap_dmabuf	= vb2_dc_unmap_dmabuf,
+	.attach_dmabuf	= vb2_dc_attach_dmabuf,
+	.detach_dmabuf	= vb2_dc_detach_dmabuf,
+	.num_users	= vb2_dc_num_users,
+};
+EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
+
+/**
+ * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
+ * @dev:	device for configuring DMA parameters
+ * @size:	size of DMA max segment size to set
+ *
+ * To allow mapping the scatter-list into a single chunk in the DMA
+ * address space, the device is required to have the DMA max segment
+ * size parameter set to a value larger than the buffer size. Otherwise,
+ * the DMA-mapping subsystem will split the mapping into max segment
+ * size chunks. This function sets the DMA max segment size
+ * parameter to let DMA-mapping map a buffer as a single chunk in DMA
+ * address space.
+ * This code assumes that the DMA-mapping subsystem will merge all
+ * scatterlist segments if this is really possible (for example when
+ * an IOMMU is available and enabled).
+ * Ideally, this parameter should be set by the generic bus code, but it
+ * is left with the default 64KiB value due to historical litmiations in
+ * other subsystems (like limited USB host drivers) and there no good
+ * place to set it to the proper value.
+ * This function should be called from the drivers, which are known to
+ * operate on platforms with IOMMU and provide access to shared buffers
+ * (either USERPTR or DMABUF). This should be done before initializing
+ * videobuf2 queue.
+ */
+int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
+{
+	if (!dev->dma_parms) {
+		dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
+		if (!dev->dma_parms)
+			return -ENOMEM;
+	}
+	if (dma_get_max_seg_size(dev) < size)
+		return dma_set_max_seg_size(dev, size);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
+
+/*
+ * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
+ * @dev:	device for configuring DMA parameters
+ *
+ * This function releases resources allocated to configure DMA parameters
+ * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
+ * device drivers on driver remove.
+ */
+void vb2_dma_contig_clear_max_seg_size(struct device *dev)
+{
+	kfree(dev->dma_parms);
+	dev->dma_parms = NULL;
+}
+EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
+
+MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
new file mode 100644
index 0000000..015e737
--- /dev/null
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -0,0 +1,668 @@
+/*
+ * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/refcount.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-memops.h>
+#include <media/videobuf2-dma-sg.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define dprintk(level, fmt, arg...)					\
+	do {								\
+		if (debug >= level)					\
+			printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg);	\
+	} while (0)
+
+struct vb2_dma_sg_buf {
+	struct device			*dev;
+	void				*vaddr;
+	struct page			**pages;
+	struct frame_vector		*vec;
+	int				offset;
+	enum dma_data_direction		dma_dir;
+	struct sg_table			sg_table;
+	/*
+	 * This will point to sg_table when used with the MMAP or USERPTR
+	 * memory model, and to the dma_buf sglist when used with the
+	 * DMABUF memory model.
+	 */
+	struct sg_table			*dma_sgt;
+	size_t				size;
+	unsigned int			num_pages;
+	refcount_t			refcount;
+	struct vb2_vmarea_handler	handler;
+
+	struct dma_buf_attachment	*db_attach;
+};
+
+static void vb2_dma_sg_put(void *buf_priv);
+
+static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
+		gfp_t gfp_flags)
+{
+	unsigned int last_page = 0;
+	int size = buf->size;
+
+	while (size > 0) {
+		struct page *pages;
+		int order;
+		int i;
+
+		order = get_order(size);
+		/* Dont over allocate*/
+		if ((PAGE_SIZE << order) > size)
+			order--;
+
+		pages = NULL;
+		while (!pages) {
+			pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
+					__GFP_NOWARN | gfp_flags, order);
+			if (pages)
+				break;
+
+			if (order == 0) {
+				while (last_page--)
+					__free_page(buf->pages[last_page]);
+				return -ENOMEM;
+			}
+			order--;
+		}
+
+		split_page(pages, order);
+		for (i = 0; i < (1 << order); i++)
+			buf->pages[last_page++] = &pages[i];
+
+		size -= PAGE_SIZE << order;
+	}
+
+	return 0;
+}
+
+static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
+			      unsigned long size, enum dma_data_direction dma_dir,
+			      gfp_t gfp_flags)
+{
+	struct vb2_dma_sg_buf *buf;
+	struct sg_table *sgt;
+	int ret;
+	int num_pages;
+
+	if (WARN_ON(!dev))
+		return ERR_PTR(-EINVAL);
+
+	buf = kzalloc(sizeof *buf, GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	buf->vaddr = NULL;
+	buf->dma_dir = dma_dir;
+	buf->offset = 0;
+	buf->size = size;
+	/* size is already page aligned */
+	buf->num_pages = size >> PAGE_SHIFT;
+	buf->dma_sgt = &buf->sg_table;
+
+	buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
+				    GFP_KERNEL | __GFP_ZERO);
+	if (!buf->pages)
+		goto fail_pages_array_alloc;
+
+	ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
+	if (ret)
+		goto fail_pages_alloc;
+
+	ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
+			buf->num_pages, 0, size, GFP_KERNEL);
+	if (ret)
+		goto fail_table_alloc;
+
+	/* Prevent the device from being released while the buffer is used */
+	buf->dev = get_device(dev);
+
+	sgt = &buf->sg_table;
+	/*
+	 * No need to sync to the device, this will happen later when the
+	 * prepare() memop is called.
+	 */
+	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+				      buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+	if (!sgt->nents)
+		goto fail_map;
+
+	buf->handler.refcount = &buf->refcount;
+	buf->handler.put = vb2_dma_sg_put;
+	buf->handler.arg = buf;
+
+	refcount_set(&buf->refcount, 1);
+
+	dprintk(1, "%s: Allocated buffer of %d pages\n",
+		__func__, buf->num_pages);
+	return buf;
+
+fail_map:
+	put_device(buf->dev);
+	sg_free_table(buf->dma_sgt);
+fail_table_alloc:
+	num_pages = buf->num_pages;
+	while (num_pages--)
+		__free_page(buf->pages[num_pages]);
+fail_pages_alloc:
+	kvfree(buf->pages);
+fail_pages_array_alloc:
+	kfree(buf);
+	return ERR_PTR(-ENOMEM);
+}
+
+static void vb2_dma_sg_put(void *buf_priv)
+{
+	struct vb2_dma_sg_buf *buf = buf_priv;
+	struct sg_table *sgt = &buf->sg_table;
+	int i = buf->num_pages;
+
+	if (refcount_dec_and_test(&buf->refcount)) {
+		dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
+			buf->num_pages);
+		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+				   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+		if (buf->vaddr)
+			vm_unmap_ram(buf->vaddr, buf->num_pages);
+		sg_free_table(buf->dma_sgt);
+		while (--i >= 0)
+			__free_page(buf->pages[i]);
+		kvfree(buf->pages);
+		put_device(buf->dev);
+		kfree(buf);
+	}
+}
+
+static void vb2_dma_sg_prepare(void *buf_priv)
+{
+	struct vb2_dma_sg_buf *buf = buf_priv;
+	struct sg_table *sgt = buf->dma_sgt;
+
+	/* DMABUF exporter will flush the cache for us */
+	if (buf->db_attach)
+		return;
+
+	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
+			       buf->dma_dir);
+}
+
+static void vb2_dma_sg_finish(void *buf_priv)
+{
+	struct vb2_dma_sg_buf *buf = buf_priv;
+	struct sg_table *sgt = buf->dma_sgt;
+
+	/* DMABUF exporter will flush the cache for us */
+	if (buf->db_attach)
+		return;
+
+	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+}
+
+static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
+				    unsigned long size,
+				    enum dma_data_direction dma_dir)
+{
+	struct vb2_dma_sg_buf *buf;
+	struct sg_table *sgt;
+	struct frame_vector *vec;
+
+	if (WARN_ON(!dev))
+		return ERR_PTR(-EINVAL);
+
+	buf = kzalloc(sizeof *buf, GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	buf->vaddr = NULL;
+	buf->dev = dev;
+	buf->dma_dir = dma_dir;
+	buf->offset = vaddr & ~PAGE_MASK;
+	buf->size = size;
+	buf->dma_sgt = &buf->sg_table;
+	vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
+					       dma_dir == DMA_BIDIRECTIONAL);
+	if (IS_ERR(vec))
+		goto userptr_fail_pfnvec;
+	buf->vec = vec;
+
+	buf->pages = frame_vector_pages(vec);
+	if (IS_ERR(buf->pages))
+		goto userptr_fail_sgtable;
+	buf->num_pages = frame_vector_count(vec);
+
+	if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
+			buf->num_pages, buf->offset, size, 0))
+		goto userptr_fail_sgtable;
+
+	sgt = &buf->sg_table;
+	/*
+	 * No need to sync to the device, this will happen later when the
+	 * prepare() memop is called.
+	 */
+	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+				      buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+	if (!sgt->nents)
+		goto userptr_fail_map;
+
+	return buf;
+
+userptr_fail_map:
+	sg_free_table(&buf->sg_table);
+userptr_fail_sgtable:
+	vb2_destroy_framevec(vec);
+userptr_fail_pfnvec:
+	kfree(buf);
+	return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * @put_userptr: inform the allocator that a USERPTR buffer will no longer
+ *		 be used
+ */
+static void vb2_dma_sg_put_userptr(void *buf_priv)
+{
+	struct vb2_dma_sg_buf *buf = buf_priv;
+	struct sg_table *sgt = &buf->sg_table;
+	int i = buf->num_pages;
+
+	dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
+	       __func__, buf->num_pages);
+	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
+			   DMA_ATTR_SKIP_CPU_SYNC);
+	if (buf->vaddr)
+		vm_unmap_ram(buf->vaddr, buf->num_pages);
+	sg_free_table(buf->dma_sgt);
+	if (buf->dma_dir == DMA_FROM_DEVICE ||
+	    buf->dma_dir == DMA_BIDIRECTIONAL)
+		while (--i >= 0)
+			set_page_dirty_lock(buf->pages[i]);
+	vb2_destroy_framevec(buf->vec);
+	kfree(buf);
+}
+
+static void *vb2_dma_sg_vaddr(void *buf_priv)
+{
+	struct vb2_dma_sg_buf *buf = buf_priv;
+
+	BUG_ON(!buf);
+
+	if (!buf->vaddr) {
+		if (buf->db_attach)
+			buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
+		else
+			buf->vaddr = vm_map_ram(buf->pages,
+					buf->num_pages, -1, PAGE_KERNEL);
+	}
+
+	/* add offset in case userptr is not page-aligned */
+	return buf->vaddr ? buf->vaddr + buf->offset : NULL;
+}
+
+static unsigned int vb2_dma_sg_num_users(void *buf_priv)
+{
+	struct vb2_dma_sg_buf *buf = buf_priv;
+
+	return refcount_read(&buf->refcount);
+}
+
+static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+	struct vb2_dma_sg_buf *buf = buf_priv;
+	unsigned long uaddr = vma->vm_start;
+	unsigned long usize = vma->vm_end - vma->vm_start;
+	int i = 0;
+
+	if (!buf) {
+		printk(KERN_ERR "No memory to map\n");
+		return -EINVAL;
+	}
+
+	do {
+		int ret;
+
+		ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
+		if (ret) {
+			printk(KERN_ERR "Remapping memory, error: %d\n", ret);
+			return ret;
+		}
+
+		uaddr += PAGE_SIZE;
+		usize -= PAGE_SIZE;
+	} while (usize > 0);
+
+
+	/*
+	 * Use common vm_area operations to track buffer refcount.
+	 */
+	vma->vm_private_data	= &buf->handler;
+	vma->vm_ops		= &vb2_common_vm_ops;
+
+	vma->vm_ops->open(vma);
+
+	return 0;
+}
+
+/*********************************************/
+/*         DMABUF ops for exporters          */
+/*********************************************/
+
+struct vb2_dma_sg_attachment {
+	struct sg_table sgt;
+	enum dma_data_direction dma_dir;
+};
+
+static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
+	struct dma_buf_attachment *dbuf_attach)
+{
+	struct vb2_dma_sg_attachment *attach;
+	unsigned int i;
+	struct scatterlist *rd, *wr;
+	struct sg_table *sgt;
+	struct vb2_dma_sg_buf *buf = dbuf->priv;
+	int ret;
+
+	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+	if (!attach)
+		return -ENOMEM;
+
+	sgt = &attach->sgt;
+	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
+	 * map the same scatter list to multiple attachments at the same time.
+	 */
+	ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
+	if (ret) {
+		kfree(attach);
+		return -ENOMEM;
+	}
+
+	rd = buf->dma_sgt->sgl;
+	wr = sgt->sgl;
+	for (i = 0; i < sgt->orig_nents; ++i) {
+		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+		rd = sg_next(rd);
+		wr = sg_next(wr);
+	}
+
+	attach->dma_dir = DMA_NONE;
+	dbuf_attach->priv = attach;
+
+	return 0;
+}
+
+static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
+	struct dma_buf_attachment *db_attach)
+{
+	struct vb2_dma_sg_attachment *attach = db_attach->priv;
+	struct sg_table *sgt;
+
+	if (!attach)
+		return;
+
+	sgt = &attach->sgt;
+
+	/* release the scatterlist cache */
+	if (attach->dma_dir != DMA_NONE)
+		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+			attach->dma_dir);
+	sg_free_table(sgt);
+	kfree(attach);
+	db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
+	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
+{
+	struct vb2_dma_sg_attachment *attach = db_attach->priv;
+	/* stealing dmabuf mutex to serialize map/unmap operations */
+	struct mutex *lock = &db_attach->dmabuf->lock;
+	struct sg_table *sgt;
+
+	mutex_lock(lock);
+
+	sgt = &attach->sgt;
+	/* return previously mapped sg table */
+	if (attach->dma_dir == dma_dir) {
+		mutex_unlock(lock);
+		return sgt;
+	}
+
+	/* release any previous cache */
+	if (attach->dma_dir != DMA_NONE) {
+		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+			attach->dma_dir);
+		attach->dma_dir = DMA_NONE;
+	}
+
+	/* mapping to the client with new direction */
+	sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+				dma_dir);
+	if (!sgt->nents) {
+		pr_err("failed to map scatterlist\n");
+		mutex_unlock(lock);
+		return ERR_PTR(-EIO);
+	}
+
+	attach->dma_dir = dma_dir;
+
+	mutex_unlock(lock);
+
+	return sgt;
+}
+
+static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+	struct sg_table *sgt, enum dma_data_direction dma_dir)
+{
+	/* nothing to be done here */
+}
+
+static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+	/* drop reference obtained in vb2_dma_sg_get_dmabuf */
+	vb2_dma_sg_put(dbuf->priv);
+}
+
+static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
+{
+	struct vb2_dma_sg_buf *buf = dbuf->priv;
+
+	return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
+}
+
+static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
+{
+	struct vb2_dma_sg_buf *buf = dbuf->priv;
+
+	return vb2_dma_sg_vaddr(buf);
+}
+
+static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
+	struct vm_area_struct *vma)
+{
+	return vb2_dma_sg_mmap(dbuf->priv, vma);
+}
+
+static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
+	.attach = vb2_dma_sg_dmabuf_ops_attach,
+	.detach = vb2_dma_sg_dmabuf_ops_detach,
+	.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
+	.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
+	.map = vb2_dma_sg_dmabuf_ops_kmap,
+	.vmap = vb2_dma_sg_dmabuf_ops_vmap,
+	.mmap = vb2_dma_sg_dmabuf_ops_mmap,
+	.release = vb2_dma_sg_dmabuf_ops_release,
+};
+
+static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
+{
+	struct vb2_dma_sg_buf *buf = buf_priv;
+	struct dma_buf *dbuf;
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+	exp_info.ops = &vb2_dma_sg_dmabuf_ops;
+	exp_info.size = buf->size;
+	exp_info.flags = flags;
+	exp_info.priv = buf;
+
+	if (WARN_ON(!buf->dma_sgt))
+		return NULL;
+
+	dbuf = dma_buf_export(&exp_info);
+	if (IS_ERR(dbuf))
+		return NULL;
+
+	/* dmabuf keeps reference to vb2 buffer */
+	refcount_inc(&buf->refcount);
+
+	return dbuf;
+}
+
+/*********************************************/
+/*       callbacks for DMABUF buffers        */
+/*********************************************/
+
+static int vb2_dma_sg_map_dmabuf(void *mem_priv)
+{
+	struct vb2_dma_sg_buf *buf = mem_priv;
+	struct sg_table *sgt;
+
+	if (WARN_ON(!buf->db_attach)) {
+		pr_err("trying to pin a non attached buffer\n");
+		return -EINVAL;
+	}
+
+	if (WARN_ON(buf->dma_sgt)) {
+		pr_err("dmabuf buffer is already pinned\n");
+		return 0;
+	}
+
+	/* get the associated scatterlist for this buffer */
+	sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
+	if (IS_ERR(sgt)) {
+		pr_err("Error getting dmabuf scatterlist\n");
+		return -EINVAL;
+	}
+
+	buf->dma_sgt = sgt;
+	buf->vaddr = NULL;
+
+	return 0;
+}
+
+static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
+{
+	struct vb2_dma_sg_buf *buf = mem_priv;
+	struct sg_table *sgt = buf->dma_sgt;
+
+	if (WARN_ON(!buf->db_attach)) {
+		pr_err("trying to unpin a not attached buffer\n");
+		return;
+	}
+
+	if (WARN_ON(!sgt)) {
+		pr_err("dmabuf buffer is already unpinned\n");
+		return;
+	}
+
+	if (buf->vaddr) {
+		dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
+		buf->vaddr = NULL;
+	}
+	dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+
+	buf->dma_sgt = NULL;
+}
+
+static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
+{
+	struct vb2_dma_sg_buf *buf = mem_priv;
+
+	/* if vb2 works correctly you should never detach mapped buffer */
+	if (WARN_ON(buf->dma_sgt))
+		vb2_dma_sg_unmap_dmabuf(buf);
+
+	/* detach this attachment */
+	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
+	kfree(buf);
+}
+
+static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
+	unsigned long size, enum dma_data_direction dma_dir)
+{
+	struct vb2_dma_sg_buf *buf;
+	struct dma_buf_attachment *dba;
+
+	if (WARN_ON(!dev))
+		return ERR_PTR(-EINVAL);
+
+	if (dbuf->size < size)
+		return ERR_PTR(-EFAULT);
+
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	buf->dev = dev;
+	/* create attachment for the dmabuf with the user device */
+	dba = dma_buf_attach(dbuf, buf->dev);
+	if (IS_ERR(dba)) {
+		pr_err("failed to attach dmabuf\n");
+		kfree(buf);
+		return dba;
+	}
+
+	buf->dma_dir = dma_dir;
+	buf->size = size;
+	buf->db_attach = dba;
+
+	return buf;
+}
+
+static void *vb2_dma_sg_cookie(void *buf_priv)
+{
+	struct vb2_dma_sg_buf *buf = buf_priv;
+
+	return buf->dma_sgt;
+}
+
+const struct vb2_mem_ops vb2_dma_sg_memops = {
+	.alloc		= vb2_dma_sg_alloc,
+	.put		= vb2_dma_sg_put,
+	.get_userptr	= vb2_dma_sg_get_userptr,
+	.put_userptr	= vb2_dma_sg_put_userptr,
+	.prepare	= vb2_dma_sg_prepare,
+	.finish		= vb2_dma_sg_finish,
+	.vaddr		= vb2_dma_sg_vaddr,
+	.mmap		= vb2_dma_sg_mmap,
+	.num_users	= vb2_dma_sg_num_users,
+	.get_dmabuf	= vb2_dma_sg_get_dmabuf,
+	.map_dmabuf	= vb2_dma_sg_map_dmabuf,
+	.unmap_dmabuf	= vb2_dma_sg_unmap_dmabuf,
+	.attach_dmabuf	= vb2_dma_sg_attach_dmabuf,
+	.detach_dmabuf	= vb2_dma_sg_detach_dmabuf,
+	.cookie		= vb2_dma_sg_cookie,
+};
+EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
+
+MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
+MODULE_AUTHOR("Andrzej Pietrasiewicz");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/videobuf2/videobuf2-dvb.c b/drivers/media/common/videobuf2/videobuf2-dvb.c
new file mode 100644
index 0000000..9f38b42
--- /dev/null
+++ b/drivers/media/common/videobuf2/videobuf2-dvb.c
@@ -0,0 +1,345 @@
+/*
+ *
+ * some helper function for simple DVB cards which simply DMA the
+ * complete transport stream and let the computer sort everything else
+ * (i.e. we are using the software demux, ...).  Also uses the
+ * video-buf to manage DMA buffers.
+ *
+ * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <media/videobuf2-dvb.h>
+
+/* ------------------------------------------------------------------ */
+
+MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
+MODULE_LICENSE("GPL");
+
+/* ------------------------------------------------------------------ */
+
+static int dvb_fnc(struct vb2_buffer *vb, void *priv)
+{
+	struct vb2_dvb *dvb = priv;
+
+	dvb_dmx_swfilter(&dvb->demux, vb2_plane_vaddr(vb, 0),
+				      vb2_get_plane_payload(vb, 0));
+	return 0;
+}
+
+static int vb2_dvb_start_feed(struct dvb_demux_feed *feed)
+{
+	struct dvb_demux *demux = feed->demux;
+	struct vb2_dvb *dvb = demux->priv;
+	int rc = 0;
+
+	if (!demux->dmx.frontend)
+		return -EINVAL;
+
+	mutex_lock(&dvb->lock);
+	dvb->nfeeds++;
+
+	if (!dvb->dvbq.threadio) {
+		rc = vb2_thread_start(&dvb->dvbq, dvb_fnc, dvb, dvb->name);
+		if (rc)
+			dvb->nfeeds--;
+	}
+	if (!rc)
+		rc = dvb->nfeeds;
+	mutex_unlock(&dvb->lock);
+	return rc;
+}
+
+static int vb2_dvb_stop_feed(struct dvb_demux_feed *feed)
+{
+	struct dvb_demux *demux = feed->demux;
+	struct vb2_dvb *dvb = demux->priv;
+	int err = 0;
+
+	mutex_lock(&dvb->lock);
+	dvb->nfeeds--;
+	if (0 == dvb->nfeeds)
+		err = vb2_thread_stop(&dvb->dvbq);
+	mutex_unlock(&dvb->lock);
+	return err;
+}
+
+static int vb2_dvb_register_adapter(struct vb2_dvb_frontends *fe,
+			  struct module *module,
+			  void *adapter_priv,
+			  struct device *device,
+			  struct media_device *mdev,
+			  char *adapter_name,
+			  short *adapter_nr,
+			  int mfe_shared)
+{
+	int result;
+
+	mutex_init(&fe->lock);
+
+	/* register adapter */
+	result = dvb_register_adapter(&fe->adapter, adapter_name, module,
+		device, adapter_nr);
+	if (result < 0) {
+		pr_warn("%s: dvb_register_adapter failed (errno = %d)\n",
+		       adapter_name, result);
+	}
+	fe->adapter.priv = adapter_priv;
+	fe->adapter.mfe_shared = mfe_shared;
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+	if (mdev)
+		fe->adapter.mdev = mdev;
+#endif
+	return result;
+}
+
+static int vb2_dvb_register_frontend(struct dvb_adapter *adapter,
+	struct vb2_dvb *dvb)
+{
+	int result;
+
+	/* register frontend */
+	result = dvb_register_frontend(adapter, dvb->frontend);
+	if (result < 0) {
+		pr_warn("%s: dvb_register_frontend failed (errno = %d)\n",
+		       dvb->name, result);
+		goto fail_frontend;
+	}
+
+	/* register demux stuff */
+	dvb->demux.dmx.capabilities =
+		DMX_TS_FILTERING | DMX_SECTION_FILTERING |
+		DMX_MEMORY_BASED_FILTERING;
+	dvb->demux.priv       = dvb;
+	dvb->demux.filternum  = 256;
+	dvb->demux.feednum    = 256;
+	dvb->demux.start_feed = vb2_dvb_start_feed;
+	dvb->demux.stop_feed  = vb2_dvb_stop_feed;
+	result = dvb_dmx_init(&dvb->demux);
+	if (result < 0) {
+		pr_warn("%s: dvb_dmx_init failed (errno = %d)\n",
+		       dvb->name, result);
+		goto fail_dmx;
+	}
+
+	dvb->dmxdev.filternum    = 256;
+	dvb->dmxdev.demux        = &dvb->demux.dmx;
+	dvb->dmxdev.capabilities = 0;
+	result = dvb_dmxdev_init(&dvb->dmxdev, adapter);
+
+	if (result < 0) {
+		pr_warn("%s: dvb_dmxdev_init failed (errno = %d)\n",
+		       dvb->name, result);
+		goto fail_dmxdev;
+	}
+
+	dvb->fe_hw.source = DMX_FRONTEND_0;
+	result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
+	if (result < 0) {
+		pr_warn("%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
+		       dvb->name, result);
+		goto fail_fe_hw;
+	}
+
+	dvb->fe_mem.source = DMX_MEMORY_FE;
+	result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
+	if (result < 0) {
+		pr_warn("%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
+		       dvb->name, result);
+		goto fail_fe_mem;
+	}
+
+	result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
+	if (result < 0) {
+		pr_warn("%s: connect_frontend failed (errno = %d)\n",
+		       dvb->name, result);
+		goto fail_fe_conn;
+	}
+
+	/* register network adapter */
+	result = dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx);
+	if (result < 0) {
+		pr_warn("%s: dvb_net_init failed (errno = %d)\n",
+		       dvb->name, result);
+		goto fail_fe_conn;
+	}
+	return 0;
+
+fail_fe_conn:
+	dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
+fail_fe_mem:
+	dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
+fail_fe_hw:
+	dvb_dmxdev_release(&dvb->dmxdev);
+fail_dmxdev:
+	dvb_dmx_release(&dvb->demux);
+fail_dmx:
+	dvb_unregister_frontend(dvb->frontend);
+fail_frontend:
+	dvb_frontend_detach(dvb->frontend);
+	dvb->frontend = NULL;
+
+	return result;
+}
+
+/* ------------------------------------------------------------------ */
+/* Register a single adapter and one or more frontends */
+int vb2_dvb_register_bus(struct vb2_dvb_frontends *f,
+			 struct module *module,
+			 void *adapter_priv,
+			 struct device *device,
+			 struct media_device *mdev,
+			 short *adapter_nr,
+			 int mfe_shared)
+{
+	struct list_head *list, *q;
+	struct vb2_dvb_frontend *fe;
+	int res;
+
+	fe = vb2_dvb_get_frontend(f, 1);
+	if (!fe) {
+		pr_warn("Unable to register the adapter which has no frontends\n");
+		return -EINVAL;
+	}
+
+	/* Bring up the adapter */
+	res = vb2_dvb_register_adapter(f, module, adapter_priv, device, mdev,
+		fe->dvb.name, adapter_nr, mfe_shared);
+	if (res < 0) {
+		pr_warn("vb2_dvb_register_adapter failed (errno = %d)\n", res);
+		return res;
+	}
+
+	/* Attach all of the frontends to the adapter */
+	mutex_lock(&f->lock);
+	list_for_each_safe(list, q, &f->felist) {
+		fe = list_entry(list, struct vb2_dvb_frontend, felist);
+		res = vb2_dvb_register_frontend(&f->adapter, &fe->dvb);
+		if (res < 0) {
+			pr_warn("%s: vb2_dvb_register_frontend failed (errno = %d)\n",
+				fe->dvb.name, res);
+			goto err;
+		}
+		res = dvb_create_media_graph(&f->adapter, false);
+		if (res < 0)
+			goto err;
+	}
+
+	mutex_unlock(&f->lock);
+	return 0;
+
+err:
+	mutex_unlock(&f->lock);
+	vb2_dvb_unregister_bus(f);
+	return res;
+}
+EXPORT_SYMBOL(vb2_dvb_register_bus);
+
+void vb2_dvb_unregister_bus(struct vb2_dvb_frontends *f)
+{
+	vb2_dvb_dealloc_frontends(f);
+
+	dvb_unregister_adapter(&f->adapter);
+}
+EXPORT_SYMBOL(vb2_dvb_unregister_bus);
+
+struct vb2_dvb_frontend *vb2_dvb_get_frontend(
+	struct vb2_dvb_frontends *f, int id)
+{
+	struct list_head *list, *q;
+	struct vb2_dvb_frontend *fe, *ret = NULL;
+
+	mutex_lock(&f->lock);
+
+	list_for_each_safe(list, q, &f->felist) {
+		fe = list_entry(list, struct vb2_dvb_frontend, felist);
+		if (fe->id == id) {
+			ret = fe;
+			break;
+		}
+	}
+
+	mutex_unlock(&f->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(vb2_dvb_get_frontend);
+
+int vb2_dvb_find_frontend(struct vb2_dvb_frontends *f,
+	struct dvb_frontend *p)
+{
+	struct list_head *list, *q;
+	struct vb2_dvb_frontend *fe = NULL;
+	int ret = 0;
+
+	mutex_lock(&f->lock);
+
+	list_for_each_safe(list, q, &f->felist) {
+		fe = list_entry(list, struct vb2_dvb_frontend, felist);
+		if (fe->dvb.frontend == p) {
+			ret = fe->id;
+			break;
+		}
+	}
+
+	mutex_unlock(&f->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(vb2_dvb_find_frontend);
+
+struct vb2_dvb_frontend *vb2_dvb_alloc_frontend(
+	struct vb2_dvb_frontends *f, int id)
+{
+	struct vb2_dvb_frontend *fe;
+
+	fe = kzalloc(sizeof(struct vb2_dvb_frontend), GFP_KERNEL);
+	if (fe == NULL)
+		return NULL;
+
+	fe->id = id;
+	mutex_init(&fe->dvb.lock);
+
+	mutex_lock(&f->lock);
+	list_add_tail(&fe->felist, &f->felist);
+	mutex_unlock(&f->lock);
+	return fe;
+}
+EXPORT_SYMBOL(vb2_dvb_alloc_frontend);
+
+void vb2_dvb_dealloc_frontends(struct vb2_dvb_frontends *f)
+{
+	struct list_head *list, *q;
+	struct vb2_dvb_frontend *fe;
+
+	mutex_lock(&f->lock);
+	list_for_each_safe(list, q, &f->felist) {
+		fe = list_entry(list, struct vb2_dvb_frontend, felist);
+		if (fe->dvb.net.dvbdev) {
+			dvb_net_release(&fe->dvb.net);
+			fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
+				&fe->dvb.fe_mem);
+			fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
+				&fe->dvb.fe_hw);
+			dvb_dmxdev_release(&fe->dvb.dmxdev);
+			dvb_dmx_release(&fe->dvb.demux);
+			dvb_unregister_frontend(fe->dvb.frontend);
+		}
+		if (fe->dvb.frontend)
+			/* always allocated, may have been reset */
+			dvb_frontend_detach(fe->dvb.frontend);
+		list_del(list); /* remove list entry */
+		kfree(fe);	/* free frontend allocation */
+	}
+	mutex_unlock(&f->lock);
+}
+EXPORT_SYMBOL(vb2_dvb_dealloc_frontends);
diff --git a/drivers/media/common/videobuf2/videobuf2-memops.c b/drivers/media/common/videobuf2/videobuf2-memops.c
new file mode 100644
index 0000000..89e5198
--- /dev/null
+++ b/drivers/media/common/videobuf2/videobuf2-memops.c
@@ -0,0 +1,135 @@
+/*
+ * videobuf2-memops.c - generic memory handling routines for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ *	   Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-memops.h>
+
+/**
+ * vb2_create_framevec() - map virtual addresses to pfns
+ * @start:	Virtual user address where we start mapping
+ * @length:	Length of a range to map
+ * @write:	Should we map for writing into the area
+ *
+ * This function allocates and fills in a vector with pfns corresponding to
+ * virtual address range passed in arguments. If pfns have corresponding pages,
+ * page references are also grabbed to pin pages in memory. The function
+ * returns pointer to the vector on success and error pointer in case of
+ * failure. Returned vector needs to be freed via vb2_destroy_pfnvec().
+ */
+struct frame_vector *vb2_create_framevec(unsigned long start,
+					 unsigned long length,
+					 bool write)
+{
+	int ret;
+	unsigned long first, last;
+	unsigned long nr;
+	struct frame_vector *vec;
+	unsigned int flags = FOLL_FORCE;
+
+	if (write)
+		flags |= FOLL_WRITE;
+
+	first = start >> PAGE_SHIFT;
+	last = (start + length - 1) >> PAGE_SHIFT;
+	nr = last - first + 1;
+	vec = frame_vector_create(nr);
+	if (!vec)
+		return ERR_PTR(-ENOMEM);
+	ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec);
+	if (ret < 0)
+		goto out_destroy;
+	/* We accept only complete set of PFNs */
+	if (ret != nr) {
+		ret = -EFAULT;
+		goto out_release;
+	}
+	return vec;
+out_release:
+	put_vaddr_frames(vec);
+out_destroy:
+	frame_vector_destroy(vec);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(vb2_create_framevec);
+
+/**
+ * vb2_destroy_framevec() - release vector of mapped pfns
+ * @vec:	vector of pfns / pages to release
+ *
+ * This releases references to all pages in the vector @vec (if corresponding
+ * pfns are backed by pages) and frees the passed vector.
+ */
+void vb2_destroy_framevec(struct frame_vector *vec)
+{
+	put_vaddr_frames(vec);
+	frame_vector_destroy(vec);
+}
+EXPORT_SYMBOL(vb2_destroy_framevec);
+
+/**
+ * vb2_common_vm_open() - increase refcount of the vma
+ * @vma:	virtual memory region for the mapping
+ *
+ * This function adds another user to the provided vma. It expects
+ * struct vb2_vmarea_handler pointer in vma->vm_private_data.
+ */
+static void vb2_common_vm_open(struct vm_area_struct *vma)
+{
+	struct vb2_vmarea_handler *h = vma->vm_private_data;
+
+	pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
+	       __func__, h, refcount_read(h->refcount), vma->vm_start,
+	       vma->vm_end);
+
+	refcount_inc(h->refcount);
+}
+
+/**
+ * vb2_common_vm_close() - decrease refcount of the vma
+ * @vma:	virtual memory region for the mapping
+ *
+ * This function releases the user from the provided vma. It expects
+ * struct vb2_vmarea_handler pointer in vma->vm_private_data.
+ */
+static void vb2_common_vm_close(struct vm_area_struct *vma)
+{
+	struct vb2_vmarea_handler *h = vma->vm_private_data;
+
+	pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
+	       __func__, h, refcount_read(h->refcount), vma->vm_start,
+	       vma->vm_end);
+
+	h->put(h->arg);
+}
+
+/*
+ * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
+ * video buffers
+ */
+const struct vm_operations_struct vb2_common_vm_ops = {
+	.open = vb2_common_vm_open,
+	.close = vb2_common_vm_close,
+};
+EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
+
+MODULE_DESCRIPTION("common memory handling routines for videobuf2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
new file mode 100644
index 0000000..886a2d8
--- /dev/null
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -0,0 +1,966 @@
+/*
+ * videobuf2-v4l2.c - V4L2 driver helper framework
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ *	   Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * The vb2_thread implementation was based on code from videobuf-dvb.c:
+ *	(c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-common.h>
+
+#include <media/videobuf2-v4l2.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define dprintk(level, fmt, arg...)					      \
+	do {								      \
+		if (debug >= level)					      \
+			pr_info("vb2-v4l2: %s: " fmt, __func__, ## arg); \
+	} while (0)
+
+/* Flags that are set by the vb2 core */
+#define V4L2_BUFFER_MASK_FLAGS	(V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
+				 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
+				 V4L2_BUF_FLAG_PREPARED | \
+				 V4L2_BUF_FLAG_TIMESTAMP_MASK)
+/* Output buffer flags that should be passed on to the driver */
+#define V4L2_BUFFER_OUT_FLAGS	(V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
+				 V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
+
+/*
+ * __verify_planes_array() - verify that the planes array passed in struct
+ * v4l2_buffer from userspace can be safely used
+ */
+static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+	if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
+		return 0;
+
+	/* Is memory for copying plane information present? */
+	if (b->m.planes == NULL) {
+		dprintk(1, "multi-planar buffer passed but planes array not provided\n");
+		return -EINVAL;
+	}
+
+	if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
+		dprintk(1, "incorrect planes array length, expected %d, got %d\n",
+			vb->num_planes, b->length);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
+{
+	return __verify_planes_array(vb, pb);
+}
+
+/*
+ * __verify_length() - Verify that the bytesused value for each plane fits in
+ * the plane length and that the data offset doesn't exceed the bytesused value.
+ */
+static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+	unsigned int length;
+	unsigned int bytesused;
+	unsigned int plane;
+
+	if (!V4L2_TYPE_IS_OUTPUT(b->type))
+		return 0;
+
+	if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
+		for (plane = 0; plane < vb->num_planes; ++plane) {
+			length = (b->memory == VB2_MEMORY_USERPTR ||
+				  b->memory == VB2_MEMORY_DMABUF)
+			       ? b->m.planes[plane].length
+				: vb->planes[plane].length;
+			bytesused = b->m.planes[plane].bytesused
+				  ? b->m.planes[plane].bytesused : length;
+
+			if (b->m.planes[plane].bytesused > length)
+				return -EINVAL;
+
+			if (b->m.planes[plane].data_offset > 0 &&
+			    b->m.planes[plane].data_offset >= bytesused)
+				return -EINVAL;
+		}
+	} else {
+		length = (b->memory == VB2_MEMORY_USERPTR)
+			? b->length : vb->planes[0].length;
+
+		if (b->bytesused > length)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
+{
+	const struct v4l2_buffer *b = pb;
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct vb2_queue *q = vb->vb2_queue;
+
+	if (q->is_output) {
+		/*
+		 * For output buffers copy the timestamp if needed,
+		 * and the timecode field and flag if needed.
+		 */
+		if (q->copy_timestamp)
+			vb->timestamp = timeval_to_ns(&b->timestamp);
+		vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
+		if (b->flags & V4L2_BUF_FLAG_TIMECODE)
+			vbuf->timecode = b->timecode;
+	}
+};
+
+static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
+{
+	static bool check_once;
+
+	if (check_once)
+		return;
+
+	check_once = true;
+	WARN_ON(1);
+
+	pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
+	if (vb->vb2_queue->allow_zero_bytesused)
+		pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
+	else
+		pr_warn("use the actual size instead.\n");
+}
+
+static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
+				    const char *opname)
+{
+	if (b->type != q->type) {
+		dprintk(1, "%s: invalid buffer type\n", opname);
+		return -EINVAL;
+	}
+
+	if (b->index >= q->num_buffers) {
+		dprintk(1, "%s: buffer index out of range\n", opname);
+		return -EINVAL;
+	}
+
+	if (q->bufs[b->index] == NULL) {
+		/* Should never happen */
+		dprintk(1, "%s: buffer is NULL\n", opname);
+		return -EINVAL;
+	}
+
+	if (b->memory != q->memory) {
+		dprintk(1, "%s: invalid memory type\n", opname);
+		return -EINVAL;
+	}
+
+	return __verify_planes_array(q->bufs[b->index], b);
+}
+
+/*
+ * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
+ * returned to userspace
+ */
+static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
+{
+	struct v4l2_buffer *b = pb;
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct vb2_queue *q = vb->vb2_queue;
+	unsigned int plane;
+
+	/* Copy back data such as timestamp, flags, etc. */
+	b->index = vb->index;
+	b->type = vb->type;
+	b->memory = vb->memory;
+	b->bytesused = 0;
+
+	b->flags = vbuf->flags;
+	b->field = vbuf->field;
+	b->timestamp = ns_to_timeval(vb->timestamp);
+	b->timecode = vbuf->timecode;
+	b->sequence = vbuf->sequence;
+	b->reserved2 = 0;
+	b->reserved = 0;
+
+	if (q->is_multiplanar) {
+		/*
+		 * Fill in plane-related data if userspace provided an array
+		 * for it. The caller has already verified memory and size.
+		 */
+		b->length = vb->num_planes;
+		for (plane = 0; plane < vb->num_planes; ++plane) {
+			struct v4l2_plane *pdst = &b->m.planes[plane];
+			struct vb2_plane *psrc = &vb->planes[plane];
+
+			pdst->bytesused = psrc->bytesused;
+			pdst->length = psrc->length;
+			if (q->memory == VB2_MEMORY_MMAP)
+				pdst->m.mem_offset = psrc->m.offset;
+			else if (q->memory == VB2_MEMORY_USERPTR)
+				pdst->m.userptr = psrc->m.userptr;
+			else if (q->memory == VB2_MEMORY_DMABUF)
+				pdst->m.fd = psrc->m.fd;
+			pdst->data_offset = psrc->data_offset;
+			memset(pdst->reserved, 0, sizeof(pdst->reserved));
+		}
+	} else {
+		/*
+		 * We use length and offset in v4l2_planes array even for
+		 * single-planar buffers, but userspace does not.
+		 */
+		b->length = vb->planes[0].length;
+		b->bytesused = vb->planes[0].bytesused;
+		if (q->memory == VB2_MEMORY_MMAP)
+			b->m.offset = vb->planes[0].m.offset;
+		else if (q->memory == VB2_MEMORY_USERPTR)
+			b->m.userptr = vb->planes[0].m.userptr;
+		else if (q->memory == VB2_MEMORY_DMABUF)
+			b->m.fd = vb->planes[0].m.fd;
+	}
+
+	/*
+	 * Clear any buffer state related flags.
+	 */
+	b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
+	b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
+	if (!q->copy_timestamp) {
+		/*
+		 * For non-COPY timestamps, drop timestamp source bits
+		 * and obtain the timestamp source from the queue.
+		 */
+		b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+		b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+	}
+
+	switch (vb->state) {
+	case VB2_BUF_STATE_QUEUED:
+	case VB2_BUF_STATE_ACTIVE:
+		b->flags |= V4L2_BUF_FLAG_QUEUED;
+		break;
+	case VB2_BUF_STATE_ERROR:
+		b->flags |= V4L2_BUF_FLAG_ERROR;
+		/* fall through */
+	case VB2_BUF_STATE_DONE:
+		b->flags |= V4L2_BUF_FLAG_DONE;
+		break;
+	case VB2_BUF_STATE_PREPARED:
+		b->flags |= V4L2_BUF_FLAG_PREPARED;
+		break;
+	case VB2_BUF_STATE_PREPARING:
+	case VB2_BUF_STATE_DEQUEUED:
+	case VB2_BUF_STATE_REQUEUEING:
+		/* nothing */
+		break;
+	}
+
+	if (vb2_buffer_in_use(q, vb))
+		b->flags |= V4L2_BUF_FLAG_MAPPED;
+
+	if (!q->is_output &&
+		b->flags & V4L2_BUF_FLAG_DONE &&
+		b->flags & V4L2_BUF_FLAG_LAST)
+		q->last_buffer_dequeued = true;
+}
+
+/*
+ * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
+ * v4l2_buffer by the userspace. It also verifies that struct
+ * v4l2_buffer has a valid number of planes.
+ */
+static int __fill_vb2_buffer(struct vb2_buffer *vb,
+		const void *pb, struct vb2_plane *planes)
+{
+	struct vb2_queue *q = vb->vb2_queue;
+	const struct v4l2_buffer *b = pb;
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	unsigned int plane;
+	int ret;
+
+	ret = __verify_length(vb, b);
+	if (ret < 0) {
+		dprintk(1, "plane parameters verification failed: %d\n", ret);
+		return ret;
+	}
+	if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
+		/*
+		 * If the format's field is ALTERNATE, then the buffer's field
+		 * should be either TOP or BOTTOM, not ALTERNATE since that
+		 * makes no sense. The driver has to know whether the
+		 * buffer represents a top or a bottom field in order to
+		 * program any DMA correctly. Using ALTERNATE is wrong, since
+		 * that just says that it is either a top or a bottom field,
+		 * but not which of the two it is.
+		 */
+		dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
+		return -EINVAL;
+	}
+	vb->timestamp = 0;
+	vbuf->sequence = 0;
+
+	if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
+		if (b->memory == VB2_MEMORY_USERPTR) {
+			for (plane = 0; plane < vb->num_planes; ++plane) {
+				planes[plane].m.userptr =
+					b->m.planes[plane].m.userptr;
+				planes[plane].length =
+					b->m.planes[plane].length;
+			}
+		}
+		if (b->memory == VB2_MEMORY_DMABUF) {
+			for (plane = 0; plane < vb->num_planes; ++plane) {
+				planes[plane].m.fd =
+					b->m.planes[plane].m.fd;
+				planes[plane].length =
+					b->m.planes[plane].length;
+			}
+		}
+
+		/* Fill in driver-provided information for OUTPUT types */
+		if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+			/*
+			 * Will have to go up to b->length when API starts
+			 * accepting variable number of planes.
+			 *
+			 * If bytesused == 0 for the output buffer, then fall
+			 * back to the full buffer size. In that case
+			 * userspace clearly never bothered to set it and
+			 * it's a safe assumption that they really meant to
+			 * use the full plane sizes.
+			 *
+			 * Some drivers, e.g. old codec drivers, use bytesused == 0
+			 * as a way to indicate that streaming is finished.
+			 * In that case, the driver should use the
+			 * allow_zero_bytesused flag to keep old userspace
+			 * applications working.
+			 */
+			for (plane = 0; plane < vb->num_planes; ++plane) {
+				struct vb2_plane *pdst = &planes[plane];
+				struct v4l2_plane *psrc = &b->m.planes[plane];
+
+				if (psrc->bytesused == 0)
+					vb2_warn_zero_bytesused(vb);
+
+				if (vb->vb2_queue->allow_zero_bytesused)
+					pdst->bytesused = psrc->bytesused;
+				else
+					pdst->bytesused = psrc->bytesused ?
+						psrc->bytesused : pdst->length;
+				pdst->data_offset = psrc->data_offset;
+			}
+		}
+	} else {
+		/*
+		 * Single-planar buffers do not use planes array,
+		 * so fill in relevant v4l2_buffer struct fields instead.
+		 * In videobuf we use our internal V4l2_planes struct for
+		 * single-planar buffers as well, for simplicity.
+		 *
+		 * If bytesused == 0 for the output buffer, then fall back
+		 * to the full buffer size as that's a sensible default.
+		 *
+		 * Some drivers, e.g. old codec drivers, use bytesused == 0 as
+		 * a way to indicate that streaming is finished. In that case,
+		 * the driver should use the allow_zero_bytesused flag to keep
+		 * old userspace applications working.
+		 */
+		if (b->memory == VB2_MEMORY_USERPTR) {
+			planes[0].m.userptr = b->m.userptr;
+			planes[0].length = b->length;
+		}
+
+		if (b->memory == VB2_MEMORY_DMABUF) {
+			planes[0].m.fd = b->m.fd;
+			planes[0].length = b->length;
+		}
+
+		if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+			if (b->bytesused == 0)
+				vb2_warn_zero_bytesused(vb);
+
+			if (vb->vb2_queue->allow_zero_bytesused)
+				planes[0].bytesused = b->bytesused;
+			else
+				planes[0].bytesused = b->bytesused ?
+					b->bytesused : planes[0].length;
+		} else
+			planes[0].bytesused = 0;
+
+	}
+
+	/* Zero flags that the vb2 core handles */
+	vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
+	if (!vb->vb2_queue->copy_timestamp || !V4L2_TYPE_IS_OUTPUT(b->type)) {
+		/*
+		 * Non-COPY timestamps and non-OUTPUT queues will get
+		 * their timestamp and timestamp source flags from the
+		 * queue.
+		 */
+		vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+	}
+
+	if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+		/*
+		 * For output buffers mask out the timecode flag:
+		 * this will be handled later in vb2_qbuf().
+		 * The 'field' is valid metadata for this output buffer
+		 * and so that needs to be copied here.
+		 */
+		vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
+		vbuf->field = b->field;
+	} else {
+		/* Zero any output buffer flags as this is a capture buffer */
+		vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
+		/* Zero last flag, this is a signal from driver to userspace */
+		vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
+	}
+
+	return 0;
+}
+
+static const struct vb2_buf_ops v4l2_buf_ops = {
+	.verify_planes_array	= __verify_planes_array_core,
+	.fill_user_buffer	= __fill_v4l2_buffer,
+	.fill_vb2_buffer	= __fill_vb2_buffer,
+	.copy_timestamp		= __copy_timestamp,
+};
+
+/*
+ * vb2_querybuf() - query video buffer information
+ * @q:		videobuf queue
+ * @b:		buffer struct passed from userspace to vidioc_querybuf handler
+ *		in driver
+ *
+ * Should be called from vidioc_querybuf ioctl handler in driver.
+ * This function will verify the passed v4l2_buffer structure and fill the
+ * relevant information for the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_querybuf handler in driver.
+ */
+int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+	struct vb2_buffer *vb;
+	int ret;
+
+	if (b->type != q->type) {
+		dprintk(1, "wrong buffer type\n");
+		return -EINVAL;
+	}
+
+	if (b->index >= q->num_buffers) {
+		dprintk(1, "buffer index out of range\n");
+		return -EINVAL;
+	}
+	vb = q->bufs[b->index];
+	ret = __verify_planes_array(vb, b);
+	if (!ret)
+		vb2_core_querybuf(q, b->index, b);
+	return ret;
+}
+EXPORT_SYMBOL(vb2_querybuf);
+
+int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
+{
+	int ret = vb2_verify_memory_type(q, req->memory, req->type);
+
+	return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count);
+}
+EXPORT_SYMBOL_GPL(vb2_reqbufs);
+
+int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+	int ret;
+
+	if (vb2_fileio_is_active(q)) {
+		dprintk(1, "file io in progress\n");
+		return -EBUSY;
+	}
+
+	ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
+
+	return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
+}
+EXPORT_SYMBOL_GPL(vb2_prepare_buf);
+
+int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
+{
+	unsigned requested_planes = 1;
+	unsigned requested_sizes[VIDEO_MAX_PLANES];
+	struct v4l2_format *f = &create->format;
+	int ret = vb2_verify_memory_type(q, create->memory, f->type);
+	unsigned i;
+
+	create->index = q->num_buffers;
+	if (create->count == 0)
+		return ret != -EBUSY ? ret : 0;
+
+	switch (f->type) {
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+		requested_planes = f->fmt.pix_mp.num_planes;
+		if (requested_planes == 0 ||
+		    requested_planes > VIDEO_MAX_PLANES)
+			return -EINVAL;
+		for (i = 0; i < requested_planes; i++)
+			requested_sizes[i] =
+				f->fmt.pix_mp.plane_fmt[i].sizeimage;
+		break;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+		requested_sizes[0] = f->fmt.pix.sizeimage;
+		break;
+	case V4L2_BUF_TYPE_VBI_CAPTURE:
+	case V4L2_BUF_TYPE_VBI_OUTPUT:
+		requested_sizes[0] = f->fmt.vbi.samples_per_line *
+			(f->fmt.vbi.count[0] + f->fmt.vbi.count[1]);
+		break;
+	case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+	case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+		requested_sizes[0] = f->fmt.sliced.io_size;
+		break;
+	case V4L2_BUF_TYPE_SDR_CAPTURE:
+	case V4L2_BUF_TYPE_SDR_OUTPUT:
+		requested_sizes[0] = f->fmt.sdr.buffersize;
+		break;
+	case V4L2_BUF_TYPE_META_CAPTURE:
+		requested_sizes[0] = f->fmt.meta.buffersize;
+		break;
+	default:
+		return -EINVAL;
+	}
+	for (i = 0; i < requested_planes; i++)
+		if (requested_sizes[i] == 0)
+			return -EINVAL;
+	return ret ? ret : vb2_core_create_bufs(q, create->memory,
+		&create->count, requested_planes, requested_sizes);
+}
+EXPORT_SYMBOL_GPL(vb2_create_bufs);
+
+int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+	int ret;
+
+	if (vb2_fileio_is_active(q)) {
+		dprintk(1, "file io in progress\n");
+		return -EBUSY;
+	}
+
+	ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
+	return ret ? ret : vb2_core_qbuf(q, b->index, b);
+}
+EXPORT_SYMBOL_GPL(vb2_qbuf);
+
+int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+{
+	int ret;
+
+	if (vb2_fileio_is_active(q)) {
+		dprintk(1, "file io in progress\n");
+		return -EBUSY;
+	}
+
+	if (b->type != q->type) {
+		dprintk(1, "invalid buffer type\n");
+		return -EINVAL;
+	}
+
+	ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
+
+	/*
+	 *  After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be
+	 *  cleared.
+	 */
+	b->flags &= ~V4L2_BUF_FLAG_DONE;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(vb2_dqbuf);
+
+int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+	if (vb2_fileio_is_active(q)) {
+		dprintk(1, "file io in progress\n");
+		return -EBUSY;
+	}
+	return vb2_core_streamon(q, type);
+}
+EXPORT_SYMBOL_GPL(vb2_streamon);
+
+int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+	if (vb2_fileio_is_active(q)) {
+		dprintk(1, "file io in progress\n");
+		return -EBUSY;
+	}
+	return vb2_core_streamoff(q, type);
+}
+EXPORT_SYMBOL_GPL(vb2_streamoff);
+
+int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
+{
+	return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index,
+				eb->plane, eb->flags);
+}
+EXPORT_SYMBOL_GPL(vb2_expbuf);
+
+int vb2_queue_init(struct vb2_queue *q)
+{
+	/*
+	 * Sanity check
+	 */
+	if (WARN_ON(!q)			  ||
+	    WARN_ON(q->timestamp_flags &
+		    ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
+		      V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
+		return -EINVAL;
+
+	/* Warn that the driver should choose an appropriate timestamp type */
+	WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
+		V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
+
+	/* Warn that vb2_memory should match with v4l2_memory */
+	if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP)
+		|| WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR)
+		|| WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF))
+		return -EINVAL;
+
+	if (q->buf_struct_size == 0)
+		q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
+
+	q->buf_ops = &v4l2_buf_ops;
+	q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
+	q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
+	q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
+			== V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	/*
+	 * For compatibility with vb1: if QBUF hasn't been called yet, then
+	 * return EPOLLERR as well. This only affects capture queues, output
+	 * queues will always initialize waiting_for_buffers to false.
+	 */
+	q->quirk_poll_must_check_waiting_for_buffers = true;
+
+	return vb2_core_queue_init(q);
+}
+EXPORT_SYMBOL_GPL(vb2_queue_init);
+
+void vb2_queue_release(struct vb2_queue *q)
+{
+	vb2_core_queue_release(q);
+}
+EXPORT_SYMBOL_GPL(vb2_queue_release);
+
+__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+{
+	struct video_device *vfd = video_devdata(file);
+	__poll_t req_events = poll_requested_events(wait);
+	__poll_t res = 0;
+
+	if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
+		struct v4l2_fh *fh = file->private_data;
+
+		if (v4l2_event_pending(fh))
+			res = EPOLLPRI;
+		else if (req_events & EPOLLPRI)
+			poll_wait(file, &fh->wait, wait);
+	}
+
+	return res | vb2_core_poll(q, file, wait);
+}
+EXPORT_SYMBOL_GPL(vb2_poll);
+
+/*
+ * The following functions are not part of the vb2 core API, but are helper
+ * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
+ * and struct vb2_ops.
+ * They contain boilerplate code that most if not all drivers have to do
+ * and so they simplify the driver code.
+ */
+
+/* The queue is busy if there is a owner and you are not that owner. */
+static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
+{
+	return vdev->queue->owner && vdev->queue->owner != file->private_data;
+}
+
+/* vb2 ioctl helpers */
+
+int vb2_ioctl_reqbufs(struct file *file, void *priv,
+			  struct v4l2_requestbuffers *p)
+{
+	struct video_device *vdev = video_devdata(file);
+	int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
+
+	if (res)
+		return res;
+	if (vb2_queue_is_busy(vdev, file))
+		return -EBUSY;
+	res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count);
+	/* If count == 0, then the owner has released all buffers and he
+	   is no longer owner of the queue. Otherwise we have a new owner. */
+	if (res == 0)
+		vdev->queue->owner = p->count ? file->private_data : NULL;
+	return res;
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
+
+int vb2_ioctl_create_bufs(struct file *file, void *priv,
+			  struct v4l2_create_buffers *p)
+{
+	struct video_device *vdev = video_devdata(file);
+	int res = vb2_verify_memory_type(vdev->queue, p->memory,
+			p->format.type);
+
+	p->index = vdev->queue->num_buffers;
+	/*
+	 * If count == 0, then just check if memory and type are valid.
+	 * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
+	 */
+	if (p->count == 0)
+		return res != -EBUSY ? res : 0;
+	if (res)
+		return res;
+	if (vb2_queue_is_busy(vdev, file))
+		return -EBUSY;
+
+	res = vb2_create_bufs(vdev->queue, p);
+	if (res == 0)
+		vdev->queue->owner = file->private_data;
+	return res;
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
+
+int vb2_ioctl_prepare_buf(struct file *file, void *priv,
+			  struct v4l2_buffer *p)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (vb2_queue_is_busy(vdev, file))
+		return -EBUSY;
+	return vb2_prepare_buf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
+
+int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	/* No need to call vb2_queue_is_busy(), anyone can query buffers. */
+	return vb2_querybuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
+
+int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (vb2_queue_is_busy(vdev, file))
+		return -EBUSY;
+	return vb2_qbuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
+
+int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (vb2_queue_is_busy(vdev, file))
+		return -EBUSY;
+	return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
+
+int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (vb2_queue_is_busy(vdev, file))
+		return -EBUSY;
+	return vb2_streamon(vdev->queue, i);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
+
+int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (vb2_queue_is_busy(vdev, file))
+		return -EBUSY;
+	return vb2_streamoff(vdev->queue, i);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
+
+int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (vb2_queue_is_busy(vdev, file))
+		return -EBUSY;
+	return vb2_expbuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
+
+/* v4l2_file_operations helpers */
+
+int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	return vb2_mmap(vdev->queue, vma);
+}
+EXPORT_SYMBOL_GPL(vb2_fop_mmap);
+
+int _vb2_fop_release(struct file *file, struct mutex *lock)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	if (lock)
+		mutex_lock(lock);
+	if (file->private_data == vdev->queue->owner) {
+		vb2_queue_release(vdev->queue);
+		vdev->queue->owner = NULL;
+	}
+	if (lock)
+		mutex_unlock(lock);
+	return v4l2_fh_release(file);
+}
+EXPORT_SYMBOL_GPL(_vb2_fop_release);
+
+int vb2_fop_release(struct file *file)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+
+	return _vb2_fop_release(file, lock);
+}
+EXPORT_SYMBOL_GPL(vb2_fop_release);
+
+ssize_t vb2_fop_write(struct file *file, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+	int err = -EBUSY;
+
+	if (!(vdev->queue->io_modes & VB2_WRITE))
+		return -EINVAL;
+	if (lock && mutex_lock_interruptible(lock))
+		return -ERESTARTSYS;
+	if (vb2_queue_is_busy(vdev, file))
+		goto exit;
+	err = vb2_write(vdev->queue, buf, count, ppos,
+		       file->f_flags & O_NONBLOCK);
+	if (vdev->queue->fileio)
+		vdev->queue->owner = file->private_data;
+exit:
+	if (lock)
+		mutex_unlock(lock);
+	return err;
+}
+EXPORT_SYMBOL_GPL(vb2_fop_write);
+
+ssize_t vb2_fop_read(struct file *file, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+	int err = -EBUSY;
+
+	if (!(vdev->queue->io_modes & VB2_READ))
+		return -EINVAL;
+	if (lock && mutex_lock_interruptible(lock))
+		return -ERESTARTSYS;
+	if (vb2_queue_is_busy(vdev, file))
+		goto exit;
+	err = vb2_read(vdev->queue, buf, count, ppos,
+		       file->f_flags & O_NONBLOCK);
+	if (vdev->queue->fileio)
+		vdev->queue->owner = file->private_data;
+exit:
+	if (lock)
+		mutex_unlock(lock);
+	return err;
+}
+EXPORT_SYMBOL_GPL(vb2_fop_read);
+
+__poll_t vb2_fop_poll(struct file *file, poll_table *wait)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct vb2_queue *q = vdev->queue;
+	struct mutex *lock = q->lock ? q->lock : vdev->lock;
+	__poll_t res;
+	void *fileio;
+
+	/*
+	 * If this helper doesn't know how to lock, then you shouldn't be using
+	 * it but you should write your own.
+	 */
+	WARN_ON(!lock);
+
+	if (lock && mutex_lock_interruptible(lock))
+		return EPOLLERR;
+
+	fileio = q->fileio;
+
+	res = vb2_poll(vdev->queue, file, wait);
+
+	/* If fileio was started, then we have a new queue owner. */
+	if (!fileio && q->fileio)
+		q->owner = file->private_data;
+	if (lock)
+		mutex_unlock(lock);
+	return res;
+}
+EXPORT_SYMBOL_GPL(vb2_fop_poll);
+
+#ifndef CONFIG_MMU
+unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
+		unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+	struct video_device *vdev = video_devdata(file);
+
+	return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
+}
+EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
+#endif
+
+/* vb2_ops helpers. Only use if vq->lock is non-NULL. */
+
+void vb2_ops_wait_prepare(struct vb2_queue *vq)
+{
+	mutex_unlock(vq->lock);
+}
+EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
+
+void vb2_ops_wait_finish(struct vb2_queue *vq)
+{
+	mutex_lock(vq->lock);
+}
+EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
+
+MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
new file mode 100644
index 0000000..6dfbd5b
--- /dev/null
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -0,0 +1,451 @@
+/*
+ * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/refcount.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/videobuf2-memops.h>
+
+struct vb2_vmalloc_buf {
+	void				*vaddr;
+	struct frame_vector		*vec;
+	enum dma_data_direction		dma_dir;
+	unsigned long			size;
+	refcount_t			refcount;
+	struct vb2_vmarea_handler	handler;
+	struct dma_buf			*dbuf;
+};
+
+static void vb2_vmalloc_put(void *buf_priv);
+
+static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
+			       unsigned long size, enum dma_data_direction dma_dir,
+			       gfp_t gfp_flags)
+{
+	struct vb2_vmalloc_buf *buf;
+
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	buf->size = size;
+	buf->vaddr = vmalloc_user(buf->size);
+	buf->dma_dir = dma_dir;
+	buf->handler.refcount = &buf->refcount;
+	buf->handler.put = vb2_vmalloc_put;
+	buf->handler.arg = buf;
+
+	if (!buf->vaddr) {
+		pr_debug("vmalloc of size %ld failed\n", buf->size);
+		kfree(buf);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	refcount_set(&buf->refcount, 1);
+	return buf;
+}
+
+static void vb2_vmalloc_put(void *buf_priv)
+{
+	struct vb2_vmalloc_buf *buf = buf_priv;
+
+	if (refcount_dec_and_test(&buf->refcount)) {
+		vfree(buf->vaddr);
+		kfree(buf);
+	}
+}
+
+static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
+				     unsigned long size,
+				     enum dma_data_direction dma_dir)
+{
+	struct vb2_vmalloc_buf *buf;
+	struct frame_vector *vec;
+	int n_pages, offset, i;
+	int ret = -ENOMEM;
+
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	buf->dma_dir = dma_dir;
+	offset = vaddr & ~PAGE_MASK;
+	buf->size = size;
+	vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
+					       dma_dir == DMA_BIDIRECTIONAL);
+	if (IS_ERR(vec)) {
+		ret = PTR_ERR(vec);
+		goto fail_pfnvec_create;
+	}
+	buf->vec = vec;
+	n_pages = frame_vector_count(vec);
+	if (frame_vector_to_pages(vec) < 0) {
+		unsigned long *nums = frame_vector_pfns(vec);
+
+		/*
+		 * We cannot get page pointers for these pfns. Check memory is
+		 * physically contiguous and use direct mapping.
+		 */
+		for (i = 1; i < n_pages; i++)
+			if (nums[i-1] + 1 != nums[i])
+				goto fail_map;
+		buf->vaddr = (__force void *)
+			ioremap_nocache(__pfn_to_phys(nums[0]), size + offset);
+	} else {
+		buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
+					PAGE_KERNEL);
+	}
+
+	if (!buf->vaddr)
+		goto fail_map;
+	buf->vaddr += offset;
+	return buf;
+
+fail_map:
+	vb2_destroy_framevec(vec);
+fail_pfnvec_create:
+	kfree(buf);
+
+	return ERR_PTR(ret);
+}
+
+static void vb2_vmalloc_put_userptr(void *buf_priv)
+{
+	struct vb2_vmalloc_buf *buf = buf_priv;
+	unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
+	unsigned int i;
+	struct page **pages;
+	unsigned int n_pages;
+
+	if (!buf->vec->is_pfns) {
+		n_pages = frame_vector_count(buf->vec);
+		pages = frame_vector_pages(buf->vec);
+		if (vaddr)
+			vm_unmap_ram((void *)vaddr, n_pages);
+		if (buf->dma_dir == DMA_FROM_DEVICE ||
+		    buf->dma_dir == DMA_BIDIRECTIONAL)
+			for (i = 0; i < n_pages; i++)
+				set_page_dirty_lock(pages[i]);
+	} else {
+		iounmap((__force void __iomem *)buf->vaddr);
+	}
+	vb2_destroy_framevec(buf->vec);
+	kfree(buf);
+}
+
+static void *vb2_vmalloc_vaddr(void *buf_priv)
+{
+	struct vb2_vmalloc_buf *buf = buf_priv;
+
+	if (!buf->vaddr) {
+		pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
+		return NULL;
+	}
+
+	return buf->vaddr;
+}
+
+static unsigned int vb2_vmalloc_num_users(void *buf_priv)
+{
+	struct vb2_vmalloc_buf *buf = buf_priv;
+	return refcount_read(&buf->refcount);
+}
+
+static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+	struct vb2_vmalloc_buf *buf = buf_priv;
+	int ret;
+
+	if (!buf) {
+		pr_err("No memory to map\n");
+		return -EINVAL;
+	}
+
+	ret = remap_vmalloc_range(vma, buf->vaddr, 0);
+	if (ret) {
+		pr_err("Remapping vmalloc memory, error: %d\n", ret);
+		return ret;
+	}
+
+	/*
+	 * Make sure that vm_areas for 2 buffers won't be merged together
+	 */
+	vma->vm_flags		|= VM_DONTEXPAND;
+
+	/*
+	 * Use common vm_area operations to track buffer refcount.
+	 */
+	vma->vm_private_data	= &buf->handler;
+	vma->vm_ops		= &vb2_common_vm_ops;
+
+	vma->vm_ops->open(vma);
+
+	return 0;
+}
+
+#ifdef CONFIG_HAS_DMA
+/*********************************************/
+/*         DMABUF ops for exporters          */
+/*********************************************/
+
+struct vb2_vmalloc_attachment {
+	struct sg_table sgt;
+	enum dma_data_direction dma_dir;
+};
+
+static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
+	struct dma_buf_attachment *dbuf_attach)
+{
+	struct vb2_vmalloc_attachment *attach;
+	struct vb2_vmalloc_buf *buf = dbuf->priv;
+	int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
+	struct sg_table *sgt;
+	struct scatterlist *sg;
+	void *vaddr = buf->vaddr;
+	int ret;
+	int i;
+
+	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+	if (!attach)
+		return -ENOMEM;
+
+	sgt = &attach->sgt;
+	ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
+	if (ret) {
+		kfree(attach);
+		return ret;
+	}
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		struct page *page = vmalloc_to_page(vaddr);
+
+		if (!page) {
+			sg_free_table(sgt);
+			kfree(attach);
+			return -ENOMEM;
+		}
+		sg_set_page(sg, page, PAGE_SIZE, 0);
+		vaddr += PAGE_SIZE;
+	}
+
+	attach->dma_dir = DMA_NONE;
+	dbuf_attach->priv = attach;
+	return 0;
+}
+
+static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
+	struct dma_buf_attachment *db_attach)
+{
+	struct vb2_vmalloc_attachment *attach = db_attach->priv;
+	struct sg_table *sgt;
+
+	if (!attach)
+		return;
+
+	sgt = &attach->sgt;
+
+	/* release the scatterlist cache */
+	if (attach->dma_dir != DMA_NONE)
+		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+			attach->dma_dir);
+	sg_free_table(sgt);
+	kfree(attach);
+	db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
+	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
+{
+	struct vb2_vmalloc_attachment *attach = db_attach->priv;
+	/* stealing dmabuf mutex to serialize map/unmap operations */
+	struct mutex *lock = &db_attach->dmabuf->lock;
+	struct sg_table *sgt;
+
+	mutex_lock(lock);
+
+	sgt = &attach->sgt;
+	/* return previously mapped sg table */
+	if (attach->dma_dir == dma_dir) {
+		mutex_unlock(lock);
+		return sgt;
+	}
+
+	/* release any previous cache */
+	if (attach->dma_dir != DMA_NONE) {
+		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+			attach->dma_dir);
+		attach->dma_dir = DMA_NONE;
+	}
+
+	/* mapping to the client with new direction */
+	sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+				dma_dir);
+	if (!sgt->nents) {
+		pr_err("failed to map scatterlist\n");
+		mutex_unlock(lock);
+		return ERR_PTR(-EIO);
+	}
+
+	attach->dma_dir = dma_dir;
+
+	mutex_unlock(lock);
+
+	return sgt;
+}
+
+static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+	struct sg_table *sgt, enum dma_data_direction dma_dir)
+{
+	/* nothing to be done here */
+}
+
+static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+	/* drop reference obtained in vb2_vmalloc_get_dmabuf */
+	vb2_vmalloc_put(dbuf->priv);
+}
+
+static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
+{
+	struct vb2_vmalloc_buf *buf = dbuf->priv;
+
+	return buf->vaddr + pgnum * PAGE_SIZE;
+}
+
+static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+{
+	struct vb2_vmalloc_buf *buf = dbuf->priv;
+
+	return buf->vaddr;
+}
+
+static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
+	struct vm_area_struct *vma)
+{
+	return vb2_vmalloc_mmap(dbuf->priv, vma);
+}
+
+static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
+	.attach = vb2_vmalloc_dmabuf_ops_attach,
+	.detach = vb2_vmalloc_dmabuf_ops_detach,
+	.map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
+	.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
+	.map = vb2_vmalloc_dmabuf_ops_kmap,
+	.vmap = vb2_vmalloc_dmabuf_ops_vmap,
+	.mmap = vb2_vmalloc_dmabuf_ops_mmap,
+	.release = vb2_vmalloc_dmabuf_ops_release,
+};
+
+static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
+{
+	struct vb2_vmalloc_buf *buf = buf_priv;
+	struct dma_buf *dbuf;
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+	exp_info.ops = &vb2_vmalloc_dmabuf_ops;
+	exp_info.size = buf->size;
+	exp_info.flags = flags;
+	exp_info.priv = buf;
+
+	if (WARN_ON(!buf->vaddr))
+		return NULL;
+
+	dbuf = dma_buf_export(&exp_info);
+	if (IS_ERR(dbuf))
+		return NULL;
+
+	/* dmabuf keeps reference to vb2 buffer */
+	refcount_inc(&buf->refcount);
+
+	return dbuf;
+}
+#endif /* CONFIG_HAS_DMA */
+
+
+/*********************************************/
+/*       callbacks for DMABUF buffers        */
+/*********************************************/
+
+static int vb2_vmalloc_map_dmabuf(void *mem_priv)
+{
+	struct vb2_vmalloc_buf *buf = mem_priv;
+
+	buf->vaddr = dma_buf_vmap(buf->dbuf);
+
+	return buf->vaddr ? 0 : -EFAULT;
+}
+
+static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
+{
+	struct vb2_vmalloc_buf *buf = mem_priv;
+
+	dma_buf_vunmap(buf->dbuf, buf->vaddr);
+	buf->vaddr = NULL;
+}
+
+static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
+{
+	struct vb2_vmalloc_buf *buf = mem_priv;
+
+	if (buf->vaddr)
+		dma_buf_vunmap(buf->dbuf, buf->vaddr);
+
+	kfree(buf);
+}
+
+static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
+	unsigned long size, enum dma_data_direction dma_dir)
+{
+	struct vb2_vmalloc_buf *buf;
+
+	if (dbuf->size < size)
+		return ERR_PTR(-EFAULT);
+
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	buf->dbuf = dbuf;
+	buf->dma_dir = dma_dir;
+	buf->size = size;
+
+	return buf;
+}
+
+
+const struct vb2_mem_ops vb2_vmalloc_memops = {
+	.alloc		= vb2_vmalloc_alloc,
+	.put		= vb2_vmalloc_put,
+	.get_userptr	= vb2_vmalloc_get_userptr,
+	.put_userptr	= vb2_vmalloc_put_userptr,
+#ifdef CONFIG_HAS_DMA
+	.get_dmabuf	= vb2_vmalloc_get_dmabuf,
+#endif
+	.map_dmabuf	= vb2_vmalloc_map_dmabuf,
+	.unmap_dmabuf	= vb2_vmalloc_unmap_dmabuf,
+	.attach_dmabuf	= vb2_vmalloc_attach_dmabuf,
+	.detach_dmabuf	= vb2_vmalloc_detach_dmabuf,
+	.vaddr		= vb2_vmalloc_vaddr,
+	.mmap		= vb2_vmalloc_mmap,
+	.num_users	= vb2_vmalloc_num_users,
+};
+EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
+
+MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
+MODULE_LICENSE("GPL");