v4.19.13 snapshot.
diff --git a/drivers/iio/buffer/Kconfig b/drivers/iio/buffer/Kconfig
new file mode 100644
index 0000000..338774c
--- /dev/null
+++ b/drivers/iio/buffer/Kconfig
@@ -0,0 +1,54 @@
+#
+# Industrial I/O generic buffer implementations
+#
+# When adding new entries keep the list in alphabetical order
+
+config IIO_BUFFER_CB
+	tristate "IIO callback buffer used for push in-kernel interfaces"
+	help
+	  Should be selected by any drivers that do in-kernel push
+	  usage.  That is, those where the data is pushed to the consumer.
+
+config IIO_BUFFER_DMA
+	tristate
+	help
+	  Provides the generic IIO DMA buffer infrastructure that can be used by
+	  drivers for devices with DMA support to implement the IIO buffer.
+
+	  Should be selected by drivers that want to use the generic DMA buffer
+	  infrastructure.
+
+config IIO_BUFFER_DMAENGINE
+	tristate
+	select IIO_BUFFER_DMA
+	help
+	  Provides a bonding of the generic IIO DMA buffer infrastructure with the
+	  DMAengine framework. This can be used by converter drivers with a DMA port
+	  connected to an external DMA controller which is supported by the
+	  DMAengine framework.
+
+	  Should be selected by drivers that want to use this functionality.
+
+config IIO_BUFFER_HW_CONSUMER
+	tristate "Industrial I/O HW buffering"
+	help
+	  Provides a way to bonding when an IIO device has a direct connection
+	  to another device in hardware. In this case buffers for data transfers
+	  are handled by hardware.
+
+	  Should be selected by drivers that want to use the generic Hw consumer
+	  interface.
+
+config IIO_KFIFO_BUF
+	tristate "Industrial I/O buffering based on kfifo"
+	help
+	  A simple fifo based on kfifo.  Note that this currently provides
+	  no buffer events so it is up to userspace to work out how
+	  often to read from the buffer.
+
+config IIO_TRIGGERED_BUFFER
+	tristate
+	select IIO_TRIGGER
+	select IIO_KFIFO_BUF
+	help
+	  Provides helper functions for setting up triggered buffers.
diff --git a/drivers/iio/buffer/Makefile b/drivers/iio/buffer/Makefile
new file mode 100644
index 0000000..1403eb2
--- /dev/null
+++ b/drivers/iio/buffer/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the industrial I/O buffer implementations
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_IIO_BUFFER_CB) += industrialio-buffer-cb.o
+obj-$(CONFIG_IIO_BUFFER_DMA) += industrialio-buffer-dma.o
+obj-$(CONFIG_IIO_BUFFER_DMAENGINE) += industrialio-buffer-dmaengine.o
+obj-$(CONFIG_IIO_BUFFER_HW_CONSUMER) += industrialio-hw-consumer.o
+obj-$(CONFIG_IIO_TRIGGERED_BUFFER) += industrialio-triggered-buffer.o
+obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o
diff --git a/drivers/iio/buffer/industrialio-buffer-cb.c b/drivers/iio/buffer/industrialio-buffer-cb.c
new file mode 100644
index 0000000..ea63c83
--- /dev/null
+++ b/drivers/iio/buffer/industrialio-buffer-cb.c
@@ -0,0 +1,154 @@
+/* The industrial I/O callback buffer
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer_impl.h>
+#include <linux/iio/consumer.h>
+
+struct iio_cb_buffer {
+	struct iio_buffer buffer;
+	int (*cb)(const void *data, void *private);
+	void *private;
+	struct iio_channel *channels;
+	struct iio_dev *indio_dev;
+};
+
+static struct iio_cb_buffer *buffer_to_cb_buffer(struct iio_buffer *buffer)
+{
+	return container_of(buffer, struct iio_cb_buffer, buffer);
+}
+
+static int iio_buffer_cb_store_to(struct iio_buffer *buffer, const void *data)
+{
+	struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
+	return cb_buff->cb(data, cb_buff->private);
+}
+
+static void iio_buffer_cb_release(struct iio_buffer *buffer)
+{
+	struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
+	kfree(cb_buff->buffer.scan_mask);
+	kfree(cb_buff);
+}
+
+static const struct iio_buffer_access_funcs iio_cb_access = {
+	.store_to = &iio_buffer_cb_store_to,
+	.release = &iio_buffer_cb_release,
+
+	.modes = INDIO_BUFFER_SOFTWARE | INDIO_BUFFER_TRIGGERED,
+};
+
+struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
+					     int (*cb)(const void *data,
+						       void *private),
+					     void *private)
+{
+	int ret;
+	struct iio_cb_buffer *cb_buff;
+	struct iio_channel *chan;
+
+	cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
+	if (cb_buff == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	iio_buffer_init(&cb_buff->buffer);
+
+	cb_buff->private = private;
+	cb_buff->cb = cb;
+	cb_buff->buffer.access = &iio_cb_access;
+	INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
+
+	cb_buff->channels = iio_channel_get_all(dev);
+	if (IS_ERR(cb_buff->channels)) {
+		ret = PTR_ERR(cb_buff->channels);
+		goto error_free_cb_buff;
+	}
+
+	cb_buff->indio_dev = cb_buff->channels[0].indio_dev;
+	cb_buff->buffer.scan_mask
+		= kcalloc(BITS_TO_LONGS(cb_buff->indio_dev->masklength),
+			  sizeof(long), GFP_KERNEL);
+	if (cb_buff->buffer.scan_mask == NULL) {
+		ret = -ENOMEM;
+		goto error_release_channels;
+	}
+	chan = &cb_buff->channels[0];
+	while (chan->indio_dev) {
+		if (chan->indio_dev != cb_buff->indio_dev) {
+			ret = -EINVAL;
+			goto error_free_scan_mask;
+		}
+		set_bit(chan->channel->scan_index,
+			cb_buff->buffer.scan_mask);
+		chan++;
+	}
+
+	return cb_buff;
+
+error_free_scan_mask:
+	kfree(cb_buff->buffer.scan_mask);
+error_release_channels:
+	iio_channel_release_all(cb_buff->channels);
+error_free_cb_buff:
+	kfree(cb_buff);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
+
+int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buff,
+					size_t watermark)
+{
+	if (!watermark)
+		return -EINVAL;
+	cb_buff->buffer.watermark = watermark;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iio_channel_cb_set_buffer_watermark);
+
+int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
+{
+	return iio_update_buffers(cb_buff->indio_dev, &cb_buff->buffer,
+				  NULL);
+}
+EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
+
+void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
+{
+	iio_update_buffers(cb_buff->indio_dev, NULL, &cb_buff->buffer);
+}
+EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
+
+void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
+{
+	iio_channel_release_all(cb_buff->channels);
+	iio_buffer_put(&cb_buff->buffer);
+}
+EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
+
+struct iio_channel
+*iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
+{
+	return cb_buffer->channels;
+}
+EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);
+
+struct iio_dev
+*iio_channel_cb_get_iio_dev(const struct iio_cb_buffer *cb_buffer)
+{
+	return cb_buffer->indio_dev;
+}
+EXPORT_SYMBOL_GPL(iio_channel_cb_get_iio_dev);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
+MODULE_DESCRIPTION("Industrial I/O callback buffer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
new file mode 100644
index 0000000..b32bf57
--- /dev/null
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -0,0 +1,684 @@
+/*
+ * Copyright 2013-2015 Analog Devices Inc.
+ *  Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/buffer_impl.h>
+#include <linux/iio/buffer-dma.h>
+#include <linux/dma-mapping.h>
+#include <linux/sizes.h>
+
+/*
+ * For DMA buffers the storage is sub-divided into so called blocks. Each block
+ * has its own memory buffer. The size of the block is the granularity at which
+ * memory is exchanged between the hardware and the application. Increasing the
+ * basic unit of data exchange from one sample to one block decreases the
+ * management overhead that is associated with each sample. E.g. if we say the
+ * management overhead for one exchange is x and the unit of exchange is one
+ * sample the overhead will be x for each sample. Whereas when using a block
+ * which contains n samples the overhead per sample is reduced to x/n. This
+ * allows to achieve much higher samplerates than what can be sustained with
+ * the one sample approach.
+ *
+ * Blocks are exchanged between the DMA controller and the application via the
+ * means of two queues. The incoming queue and the outgoing queue. Blocks on the
+ * incoming queue are waiting for the DMA controller to pick them up and fill
+ * them with data. Block on the outgoing queue have been filled with data and
+ * are waiting for the application to dequeue them and read the data.
+ *
+ * A block can be in one of the following states:
+ *  * Owned by the application. In this state the application can read data from
+ *    the block.
+ *  * On the incoming list: Blocks on the incoming list are queued up to be
+ *    processed by the DMA controller.
+ *  * Owned by the DMA controller: The DMA controller is processing the block
+ *    and filling it with data.
+ *  * On the outgoing list: Blocks on the outgoing list have been successfully
+ *    processed by the DMA controller and contain data. They can be dequeued by
+ *    the application.
+ *  * Dead: A block that is dead has been marked as to be freed. It might still
+ *    be owned by either the application or the DMA controller at the moment.
+ *    But once they are done processing it instead of going to either the
+ *    incoming or outgoing queue the block will be freed.
+ *
+ * In addition to this blocks are reference counted and the memory associated
+ * with both the block structure as well as the storage memory for the block
+ * will be freed when the last reference to the block is dropped. This means a
+ * block must not be accessed without holding a reference.
+ *
+ * The iio_dma_buffer implementation provides a generic infrastructure for
+ * managing the blocks.
+ *
+ * A driver for a specific piece of hardware that has DMA capabilities need to
+ * implement the submit() callback from the iio_dma_buffer_ops structure. This
+ * callback is supposed to initiate the DMA transfer copying data from the
+ * converter to the memory region of the block. Once the DMA transfer has been
+ * completed the driver must call iio_dma_buffer_block_done() for the completed
+ * block.
+ *
+ * Prior to this it must set the bytes_used field of the block contains
+ * the actual number of bytes in the buffer. Typically this will be equal to the
+ * size of the block, but if the DMA hardware has certain alignment requirements
+ * for the transfer length it might choose to use less than the full size. In
+ * either case it is expected that bytes_used is a multiple of the bytes per
+ * datum, i.e. the block must not contain partial samples.
+ *
+ * The driver must call iio_dma_buffer_block_done() for each block it has
+ * received through its submit_block() callback, even if it does not actually
+ * perform a DMA transfer for the block, e.g. because the buffer was disabled
+ * before the block transfer was started. In this case it should set bytes_used
+ * to 0.
+ *
+ * In addition it is recommended that a driver implements the abort() callback.
+ * It will be called when the buffer is disabled and can be used to cancel
+ * pending and stop active transfers.
+ *
+ * The specific driver implementation should use the default callback
+ * implementations provided by this module for the iio_buffer_access_funcs
+ * struct. It may overload some callbacks with custom variants if the hardware
+ * has special requirements that are not handled by the generic functions. If a
+ * driver chooses to overload a callback it has to ensure that the generic
+ * callback is called from within the custom callback.
+ */
+
+static void iio_buffer_block_release(struct kref *kref)
+{
+	struct iio_dma_buffer_block *block = container_of(kref,
+		struct iio_dma_buffer_block, kref);
+
+	WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
+
+	dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
+					block->vaddr, block->phys_addr);
+
+	iio_buffer_put(&block->queue->buffer);
+	kfree(block);
+}
+
+static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
+{
+	kref_get(&block->kref);
+}
+
+static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
+{
+	kref_put(&block->kref, iio_buffer_block_release);
+}
+
+/*
+ * dma_free_coherent can sleep, hence we need to take some special care to be
+ * able to drop a reference from an atomic context.
+ */
+static LIST_HEAD(iio_dma_buffer_dead_blocks);
+static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
+
+static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
+{
+	struct iio_dma_buffer_block *block, *_block;
+	LIST_HEAD(block_list);
+
+	spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
+	list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
+	spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
+
+	list_for_each_entry_safe(block, _block, &block_list, head)
+		iio_buffer_block_release(&block->kref);
+}
+static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
+
+static void iio_buffer_block_release_atomic(struct kref *kref)
+{
+	struct iio_dma_buffer_block *block;
+	unsigned long flags;
+
+	block = container_of(kref, struct iio_dma_buffer_block, kref);
+
+	spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
+	list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
+	spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
+
+	schedule_work(&iio_dma_buffer_cleanup_work);
+}
+
+/*
+ * Version of iio_buffer_block_put() that can be called from atomic context
+ */
+static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
+{
+	kref_put(&block->kref, iio_buffer_block_release_atomic);
+}
+
+static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
+{
+	return container_of(buf, struct iio_dma_buffer_queue, buffer);
+}
+
+static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
+	struct iio_dma_buffer_queue *queue, size_t size)
+{
+	struct iio_dma_buffer_block *block;
+
+	block = kzalloc(sizeof(*block), GFP_KERNEL);
+	if (!block)
+		return NULL;
+
+	block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
+		&block->phys_addr, GFP_KERNEL);
+	if (!block->vaddr) {
+		kfree(block);
+		return NULL;
+	}
+
+	block->size = size;
+	block->state = IIO_BLOCK_STATE_DEQUEUED;
+	block->queue = queue;
+	INIT_LIST_HEAD(&block->head);
+	kref_init(&block->kref);
+
+	iio_buffer_get(&queue->buffer);
+
+	return block;
+}
+
+static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
+{
+	struct iio_dma_buffer_queue *queue = block->queue;
+
+	/*
+	 * The buffer has already been freed by the application, just drop the
+	 * reference.
+	 */
+	if (block->state != IIO_BLOCK_STATE_DEAD) {
+		block->state = IIO_BLOCK_STATE_DONE;
+		list_add_tail(&block->head, &queue->outgoing);
+	}
+}
+
+/**
+ * iio_dma_buffer_block_done() - Indicate that a block has been completed
+ * @block: The completed block
+ *
+ * Should be called when the DMA controller has finished handling the block to
+ * pass back ownership of the block to the queue.
+ */
+void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
+{
+	struct iio_dma_buffer_queue *queue = block->queue;
+	unsigned long flags;
+
+	spin_lock_irqsave(&queue->list_lock, flags);
+	_iio_dma_buffer_block_done(block);
+	spin_unlock_irqrestore(&queue->list_lock, flags);
+
+	iio_buffer_block_put_atomic(block);
+	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
+
+/**
+ * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
+ *   aborted
+ * @queue: Queue for which to complete blocks.
+ * @list: List of aborted blocks. All blocks in this list must be from @queue.
+ *
+ * Typically called from the abort() callback after the DMA controller has been
+ * stopped. This will set bytes_used to 0 for each block in the list and then
+ * hand the blocks back to the queue.
+ */
+void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
+	struct list_head *list)
+{
+	struct iio_dma_buffer_block *block, *_block;
+	unsigned long flags;
+
+	spin_lock_irqsave(&queue->list_lock, flags);
+	list_for_each_entry_safe(block, _block, list, head) {
+		list_del(&block->head);
+		block->bytes_used = 0;
+		_iio_dma_buffer_block_done(block);
+		iio_buffer_block_put_atomic(block);
+	}
+	spin_unlock_irqrestore(&queue->list_lock, flags);
+
+	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
+
+static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
+{
+	/*
+	 * If the core owns the block it can be re-used. This should be the
+	 * default case when enabling the buffer, unless the DMA controller does
+	 * not support abort and has not given back the block yet.
+	 */
+	switch (block->state) {
+	case IIO_BLOCK_STATE_DEQUEUED:
+	case IIO_BLOCK_STATE_QUEUED:
+	case IIO_BLOCK_STATE_DONE:
+		return true;
+	default:
+		return false;
+	}
+}
+
+/**
+ * iio_dma_buffer_request_update() - DMA buffer request_update callback
+ * @buffer: The buffer which to request an update
+ *
+ * Should be used as the iio_dma_buffer_request_update() callback for
+ * iio_buffer_access_ops struct for DMA buffers.
+ */
+int iio_dma_buffer_request_update(struct iio_buffer *buffer)
+{
+	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
+	struct iio_dma_buffer_block *block;
+	bool try_reuse = false;
+	size_t size;
+	int ret = 0;
+	int i;
+
+	/*
+	 * Split the buffer into two even parts. This is used as a double
+	 * buffering scheme with usually one block at a time being used by the
+	 * DMA and the other one by the application.
+	 */
+	size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
+		queue->buffer.length, 2);
+
+	mutex_lock(&queue->lock);
+
+	/* Allocations are page aligned */
+	if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
+		try_reuse = true;
+
+	queue->fileio.block_size = size;
+	queue->fileio.active_block = NULL;
+
+	spin_lock_irq(&queue->list_lock);
+	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
+		block = queue->fileio.blocks[i];
+
+		/* If we can't re-use it free it */
+		if (block && (!iio_dma_block_reusable(block) || !try_reuse))
+			block->state = IIO_BLOCK_STATE_DEAD;
+	}
+
+	/*
+	 * At this point all blocks are either owned by the core or marked as
+	 * dead. This means we can reset the lists without having to fear
+	 * corrution.
+	 */
+	INIT_LIST_HEAD(&queue->outgoing);
+	spin_unlock_irq(&queue->list_lock);
+
+	INIT_LIST_HEAD(&queue->incoming);
+
+	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
+		if (queue->fileio.blocks[i]) {
+			block = queue->fileio.blocks[i];
+			if (block->state == IIO_BLOCK_STATE_DEAD) {
+				/* Could not reuse it */
+				iio_buffer_block_put(block);
+				block = NULL;
+			} else {
+				block->size = size;
+			}
+		} else {
+			block = NULL;
+		}
+
+		if (!block) {
+			block = iio_dma_buffer_alloc_block(queue, size);
+			if (!block) {
+				ret = -ENOMEM;
+				goto out_unlock;
+			}
+			queue->fileio.blocks[i] = block;
+		}
+
+		block->state = IIO_BLOCK_STATE_QUEUED;
+		list_add_tail(&block->head, &queue->incoming);
+	}
+
+out_unlock:
+	mutex_unlock(&queue->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
+
+static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
+	struct iio_dma_buffer_block *block)
+{
+	int ret;
+
+	/*
+	 * If the hardware has already been removed we put the block into
+	 * limbo. It will neither be on the incoming nor outgoing list, nor will
+	 * it ever complete. It will just wait to be freed eventually.
+	 */
+	if (!queue->ops)
+		return;
+
+	block->state = IIO_BLOCK_STATE_ACTIVE;
+	iio_buffer_block_get(block);
+	ret = queue->ops->submit(queue, block);
+	if (ret) {
+		/*
+		 * This is a bit of a problem and there is not much we can do
+		 * other then wait for the buffer to be disabled and re-enabled
+		 * and try again. But it should not really happen unless we run
+		 * out of memory or something similar.
+		 *
+		 * TODO: Implement support in the IIO core to allow buffers to
+		 * notify consumers that something went wrong and the buffer
+		 * should be disabled.
+		 */
+		iio_buffer_block_put(block);
+	}
+}
+
+/**
+ * iio_dma_buffer_enable() - Enable DMA buffer
+ * @buffer: IIO buffer to enable
+ * @indio_dev: IIO device the buffer is attached to
+ *
+ * Needs to be called when the device that the buffer is attached to starts
+ * sampling. Typically should be the iio_buffer_access_ops enable callback.
+ *
+ * This will allocate the DMA buffers and start the DMA transfers.
+ */
+int iio_dma_buffer_enable(struct iio_buffer *buffer,
+	struct iio_dev *indio_dev)
+{
+	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
+	struct iio_dma_buffer_block *block, *_block;
+
+	mutex_lock(&queue->lock);
+	queue->active = true;
+	list_for_each_entry_safe(block, _block, &queue->incoming, head) {
+		list_del(&block->head);
+		iio_dma_buffer_submit_block(queue, block);
+	}
+	mutex_unlock(&queue->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
+
+/**
+ * iio_dma_buffer_disable() - Disable DMA buffer
+ * @buffer: IIO DMA buffer to disable
+ * @indio_dev: IIO device the buffer is attached to
+ *
+ * Needs to be called when the device that the buffer is attached to stops
+ * sampling. Typically should be the iio_buffer_access_ops disable callback.
+ */
+int iio_dma_buffer_disable(struct iio_buffer *buffer,
+	struct iio_dev *indio_dev)
+{
+	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
+
+	mutex_lock(&queue->lock);
+	queue->active = false;
+
+	if (queue->ops && queue->ops->abort)
+		queue->ops->abort(queue);
+	mutex_unlock(&queue->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
+
+static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
+	struct iio_dma_buffer_block *block)
+{
+	if (block->state == IIO_BLOCK_STATE_DEAD) {
+		iio_buffer_block_put(block);
+	} else if (queue->active) {
+		iio_dma_buffer_submit_block(queue, block);
+	} else {
+		block->state = IIO_BLOCK_STATE_QUEUED;
+		list_add_tail(&block->head, &queue->incoming);
+	}
+}
+
+static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
+	struct iio_dma_buffer_queue *queue)
+{
+	struct iio_dma_buffer_block *block;
+
+	spin_lock_irq(&queue->list_lock);
+	block = list_first_entry_or_null(&queue->outgoing, struct
+		iio_dma_buffer_block, head);
+	if (block != NULL) {
+		list_del(&block->head);
+		block->state = IIO_BLOCK_STATE_DEQUEUED;
+	}
+	spin_unlock_irq(&queue->list_lock);
+
+	return block;
+}
+
+/**
+ * iio_dma_buffer_read() - DMA buffer read callback
+ * @buffer: Buffer to read form
+ * @n: Number of bytes to read
+ * @user_buffer: Userspace buffer to copy the data to
+ *
+ * Should be used as the read_first_n callback for iio_buffer_access_ops
+ * struct for DMA buffers.
+ */
+int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
+	char __user *user_buffer)
+{
+	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
+	struct iio_dma_buffer_block *block;
+	int ret;
+
+	if (n < buffer->bytes_per_datum)
+		return -EINVAL;
+
+	mutex_lock(&queue->lock);
+
+	if (!queue->fileio.active_block) {
+		block = iio_dma_buffer_dequeue(queue);
+		if (block == NULL) {
+			ret = 0;
+			goto out_unlock;
+		}
+		queue->fileio.pos = 0;
+		queue->fileio.active_block = block;
+	} else {
+		block = queue->fileio.active_block;
+	}
+
+	n = rounddown(n, buffer->bytes_per_datum);
+	if (n > block->bytes_used - queue->fileio.pos)
+		n = block->bytes_used - queue->fileio.pos;
+
+	if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
+		ret = -EFAULT;
+		goto out_unlock;
+	}
+
+	queue->fileio.pos += n;
+
+	if (queue->fileio.pos == block->bytes_used) {
+		queue->fileio.active_block = NULL;
+		iio_dma_buffer_enqueue(queue, block);
+	}
+
+	ret = n;
+
+out_unlock:
+	mutex_unlock(&queue->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
+
+/**
+ * iio_dma_buffer_data_available() - DMA buffer data_available callback
+ * @buf: Buffer to check for data availability
+ *
+ * Should be used as the data_available callback for iio_buffer_access_ops
+ * struct for DMA buffers.
+ */
+size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
+{
+	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
+	struct iio_dma_buffer_block *block;
+	size_t data_available = 0;
+
+	/*
+	 * For counting the available bytes we'll use the size of the block not
+	 * the number of actual bytes available in the block. Otherwise it is
+	 * possible that we end up with a value that is lower than the watermark
+	 * but won't increase since all blocks are in use.
+	 */
+
+	mutex_lock(&queue->lock);
+	if (queue->fileio.active_block)
+		data_available += queue->fileio.active_block->size;
+
+	spin_lock_irq(&queue->list_lock);
+	list_for_each_entry(block, &queue->outgoing, head)
+		data_available += block->size;
+	spin_unlock_irq(&queue->list_lock);
+	mutex_unlock(&queue->lock);
+
+	return data_available;
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
+
+/**
+ * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
+ * @buffer: Buffer to set the bytes-per-datum for
+ * @bpd: The new bytes-per-datum value
+ *
+ * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
+ * struct for DMA buffers.
+ */
+int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
+{
+	buffer->bytes_per_datum = bpd;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
+
+/**
+ * iio_dma_buffer_set_length - DMA buffer set_length callback
+ * @buffer: Buffer to set the length for
+ * @length: The new buffer length
+ *
+ * Should be used as the set_length callback for iio_buffer_access_ops
+ * struct for DMA buffers.
+ */
+int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
+{
+	/* Avoid an invalid state */
+	if (length < 2)
+		length = 2;
+	buffer->length = length;
+	buffer->watermark = length / 2;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
+
+/**
+ * iio_dma_buffer_init() - Initialize DMA buffer queue
+ * @queue: Buffer to initialize
+ * @dev: DMA device
+ * @ops: DMA buffer queue callback operations
+ *
+ * The DMA device will be used by the queue to do DMA memory allocations. So it
+ * should refer to the device that will perform the DMA to ensure that
+ * allocations are done from a memory region that can be accessed by the device.
+ */
+int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
+	struct device *dev, const struct iio_dma_buffer_ops *ops)
+{
+	iio_buffer_init(&queue->buffer);
+	queue->buffer.length = PAGE_SIZE;
+	queue->buffer.watermark = queue->buffer.length / 2;
+	queue->dev = dev;
+	queue->ops = ops;
+
+	INIT_LIST_HEAD(&queue->incoming);
+	INIT_LIST_HEAD(&queue->outgoing);
+
+	mutex_init(&queue->lock);
+	spin_lock_init(&queue->list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
+
+/**
+ * iio_dma_buffer_exit() - Cleanup DMA buffer queue
+ * @queue: Buffer to cleanup
+ *
+ * After this function has completed it is safe to free any resources that are
+ * associated with the buffer and are accessed inside the callback operations.
+ */
+void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
+{
+	unsigned int i;
+
+	mutex_lock(&queue->lock);
+
+	spin_lock_irq(&queue->list_lock);
+	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
+		if (!queue->fileio.blocks[i])
+			continue;
+		queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
+	}
+	INIT_LIST_HEAD(&queue->outgoing);
+	spin_unlock_irq(&queue->list_lock);
+
+	INIT_LIST_HEAD(&queue->incoming);
+
+	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
+		if (!queue->fileio.blocks[i])
+			continue;
+		iio_buffer_block_put(queue->fileio.blocks[i]);
+		queue->fileio.blocks[i] = NULL;
+	}
+	queue->fileio.active_block = NULL;
+	queue->ops = NULL;
+
+	mutex_unlock(&queue->lock);
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
+
+/**
+ * iio_dma_buffer_release() - Release final buffer resources
+ * @queue: Buffer to release
+ *
+ * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
+ * called in the buffers release callback implementation right before freeing
+ * the memory associated with the buffer.
+ */
+void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
+{
+	mutex_destroy(&queue->lock);
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_release);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("DMA buffer for the IIO framework");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
new file mode 100644
index 0000000..2b5a320
--- /dev/null
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2014-2015 Analog Devices Inc.
+ *  Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/buffer_impl.h>
+#include <linux/iio/buffer-dma.h>
+#include <linux/iio/buffer-dmaengine.h>
+
+/*
+ * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
+ * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
+ * used to manage the buffer memory and implement the IIO buffer operations
+ * while the DMAengine framework is used to perform the DMA transfers. Combined
+ * this results in a device independent fully functional DMA buffer
+ * implementation that can be used by device drivers for peripherals which are
+ * connected to a DMA controller which has a DMAengine driver implementation.
+ */
+
+struct dmaengine_buffer {
+	struct iio_dma_buffer_queue queue;
+
+	struct dma_chan *chan;
+	struct list_head active;
+
+	size_t align;
+	size_t max_size;
+};
+
+static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
+		struct iio_buffer *buffer)
+{
+	return container_of(buffer, struct dmaengine_buffer, queue.buffer);
+}
+
+static void iio_dmaengine_buffer_block_done(void *data)
+{
+	struct iio_dma_buffer_block *block = data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&block->queue->list_lock, flags);
+	list_del(&block->head);
+	spin_unlock_irqrestore(&block->queue->list_lock, flags);
+	iio_dma_buffer_block_done(block);
+}
+
+static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
+	struct iio_dma_buffer_block *block)
+{
+	struct dmaengine_buffer *dmaengine_buffer =
+		iio_buffer_to_dmaengine_buffer(&queue->buffer);
+	struct dma_async_tx_descriptor *desc;
+	dma_cookie_t cookie;
+
+	block->bytes_used = min(block->size, dmaengine_buffer->max_size);
+	block->bytes_used = rounddown(block->bytes_used,
+			dmaengine_buffer->align);
+
+	desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
+		block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
+		DMA_PREP_INTERRUPT);
+	if (!desc)
+		return -ENOMEM;
+
+	desc->callback = iio_dmaengine_buffer_block_done;
+	desc->callback_param = block;
+
+	cookie = dmaengine_submit(desc);
+	if (dma_submit_error(cookie))
+		return dma_submit_error(cookie);
+
+	spin_lock_irq(&dmaengine_buffer->queue.list_lock);
+	list_add_tail(&block->head, &dmaengine_buffer->active);
+	spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
+
+	dma_async_issue_pending(dmaengine_buffer->chan);
+
+	return 0;
+}
+
+static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
+{
+	struct dmaengine_buffer *dmaengine_buffer =
+		iio_buffer_to_dmaengine_buffer(&queue->buffer);
+
+	dmaengine_terminate_sync(dmaengine_buffer->chan);
+	iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
+}
+
+static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
+{
+	struct dmaengine_buffer *dmaengine_buffer =
+		iio_buffer_to_dmaengine_buffer(buf);
+
+	iio_dma_buffer_release(&dmaengine_buffer->queue);
+	kfree(dmaengine_buffer);
+}
+
+static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
+	.read_first_n = iio_dma_buffer_read,
+	.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
+	.set_length = iio_dma_buffer_set_length,
+	.request_update = iio_dma_buffer_request_update,
+	.enable = iio_dma_buffer_enable,
+	.disable = iio_dma_buffer_disable,
+	.data_available = iio_dma_buffer_data_available,
+	.release = iio_dmaengine_buffer_release,
+
+	.modes = INDIO_BUFFER_HARDWARE,
+	.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
+};
+
+static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
+	.submit = iio_dmaengine_buffer_submit_block,
+	.abort = iio_dmaengine_buffer_abort,
+};
+
+/**
+ * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
+ * @dev: Parent device for the buffer
+ * @channel: DMA channel name, typically "rx".
+ *
+ * This allocates a new IIO buffer which internally uses the DMAengine framework
+ * to perform its transfers. The parent device will be used to request the DMA
+ * channel.
+ *
+ * Once done using the buffer iio_dmaengine_buffer_free() should be used to
+ * release it.
+ */
+struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
+	const char *channel)
+{
+	struct dmaengine_buffer *dmaengine_buffer;
+	unsigned int width, src_width, dest_width;
+	struct dma_slave_caps caps;
+	struct dma_chan *chan;
+	int ret;
+
+	dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
+	if (!dmaengine_buffer)
+		return ERR_PTR(-ENOMEM);
+
+	chan = dma_request_slave_channel_reason(dev, channel);
+	if (IS_ERR(chan)) {
+		ret = PTR_ERR(chan);
+		goto err_free;
+	}
+
+	ret = dma_get_slave_caps(chan, &caps);
+	if (ret < 0)
+		goto err_free;
+
+	/* Needs to be aligned to the maximum of the minimums */
+	if (caps.src_addr_widths)
+		src_width = __ffs(caps.src_addr_widths);
+	else
+		src_width = 1;
+	if (caps.dst_addr_widths)
+		dest_width = __ffs(caps.dst_addr_widths);
+	else
+		dest_width = 1;
+	width = max(src_width, dest_width);
+
+	INIT_LIST_HEAD(&dmaengine_buffer->active);
+	dmaengine_buffer->chan = chan;
+	dmaengine_buffer->align = width;
+	dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
+
+	iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
+		&iio_dmaengine_default_ops);
+
+	dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
+
+	return &dmaengine_buffer->queue.buffer;
+
+err_free:
+	kfree(dmaengine_buffer);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(iio_dmaengine_buffer_alloc);
+
+/**
+ * iio_dmaengine_buffer_free() - Free dmaengine buffer
+ * @buffer: Buffer to free
+ *
+ * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
+ */
+void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
+{
+	struct dmaengine_buffer *dmaengine_buffer =
+		iio_buffer_to_dmaengine_buffer(buffer);
+
+	iio_dma_buffer_exit(&dmaengine_buffer->queue);
+	dma_release_channel(dmaengine_buffer->chan);
+
+	iio_buffer_put(buffer);
+}
+EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);
diff --git a/drivers/iio/buffer/industrialio-hw-consumer.c b/drivers/iio/buffer/industrialio-hw-consumer.c
new file mode 100644
index 0000000..9516569
--- /dev/null
+++ b/drivers/iio/buffer/industrialio-hw-consumer.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 Analog Devices Inc.
+ *  Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/hw-consumer.h>
+#include <linux/iio/buffer_impl.h>
+
+/**
+ * struct iio_hw_consumer - IIO hw consumer block
+ * @buffers: hardware buffers list head.
+ * @channels: IIO provider channels.
+ */
+struct iio_hw_consumer {
+	struct list_head buffers;
+	struct iio_channel *channels;
+};
+
+struct hw_consumer_buffer {
+	struct list_head head;
+	struct iio_dev *indio_dev;
+	struct iio_buffer buffer;
+	long scan_mask[];
+};
+
+static struct hw_consumer_buffer *iio_buffer_to_hw_consumer_buffer(
+	struct iio_buffer *buffer)
+{
+	return container_of(buffer, struct hw_consumer_buffer, buffer);
+}
+
+static void iio_hw_buf_release(struct iio_buffer *buffer)
+{
+	struct hw_consumer_buffer *hw_buf =
+		iio_buffer_to_hw_consumer_buffer(buffer);
+	kfree(hw_buf);
+}
+
+static const struct iio_buffer_access_funcs iio_hw_buf_access = {
+	.release = &iio_hw_buf_release,
+	.modes = INDIO_BUFFER_HARDWARE,
+};
+
+static struct hw_consumer_buffer *iio_hw_consumer_get_buffer(
+	struct iio_hw_consumer *hwc, struct iio_dev *indio_dev)
+{
+	size_t mask_size = BITS_TO_LONGS(indio_dev->masklength) * sizeof(long);
+	struct hw_consumer_buffer *buf;
+
+	list_for_each_entry(buf, &hwc->buffers, head) {
+		if (buf->indio_dev == indio_dev)
+			return buf;
+	}
+
+	buf = kzalloc(sizeof(*buf) + mask_size, GFP_KERNEL);
+	if (!buf)
+		return NULL;
+
+	buf->buffer.access = &iio_hw_buf_access;
+	buf->indio_dev = indio_dev;
+	buf->buffer.scan_mask = buf->scan_mask;
+
+	iio_buffer_init(&buf->buffer);
+	list_add_tail(&buf->head, &hwc->buffers);
+
+	return buf;
+}
+
+/**
+ * iio_hw_consumer_alloc() - Allocate IIO hardware consumer
+ * @dev: Pointer to consumer device.
+ *
+ * Returns a valid iio_hw_consumer on success or a ERR_PTR() on failure.
+ */
+struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev)
+{
+	struct hw_consumer_buffer *buf;
+	struct iio_hw_consumer *hwc;
+	struct iio_channel *chan;
+	int ret;
+
+	hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
+	if (!hwc)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&hwc->buffers);
+
+	hwc->channels = iio_channel_get_all(dev);
+	if (IS_ERR(hwc->channels)) {
+		ret = PTR_ERR(hwc->channels);
+		goto err_free_hwc;
+	}
+
+	chan = &hwc->channels[0];
+	while (chan->indio_dev) {
+		buf = iio_hw_consumer_get_buffer(hwc, chan->indio_dev);
+		if (!buf) {
+			ret = -ENOMEM;
+			goto err_put_buffers;
+		}
+		set_bit(chan->channel->scan_index, buf->buffer.scan_mask);
+		chan++;
+	}
+
+	return hwc;
+
+err_put_buffers:
+	list_for_each_entry(buf, &hwc->buffers, head)
+		iio_buffer_put(&buf->buffer);
+	iio_channel_release_all(hwc->channels);
+err_free_hwc:
+	kfree(hwc);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_alloc);
+
+/**
+ * iio_hw_consumer_free() - Free IIO hardware consumer
+ * @hwc: hw consumer to free.
+ */
+void iio_hw_consumer_free(struct iio_hw_consumer *hwc)
+{
+	struct hw_consumer_buffer *buf, *n;
+
+	iio_channel_release_all(hwc->channels);
+	list_for_each_entry_safe(buf, n, &hwc->buffers, head)
+		iio_buffer_put(&buf->buffer);
+	kfree(hwc);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_free);
+
+static void devm_iio_hw_consumer_release(struct device *dev, void *res)
+{
+	iio_hw_consumer_free(*(struct iio_hw_consumer **)res);
+}
+
+static int devm_iio_hw_consumer_match(struct device *dev, void *res, void *data)
+{
+	struct iio_hw_consumer **r = res;
+
+	if (!r || !*r) {
+		WARN_ON(!r || !*r);
+		return 0;
+	}
+	return *r == data;
+}
+
+/**
+ * devm_iio_hw_consumer_alloc - Resource-managed iio_hw_consumer_alloc()
+ * @dev: Pointer to consumer device.
+ *
+ * Managed iio_hw_consumer_alloc. iio_hw_consumer allocated with this function
+ * is automatically freed on driver detach.
+ *
+ * If an iio_hw_consumer allocated with this function needs to be freed
+ * separately, devm_iio_hw_consumer_free() must be used.
+ *
+ * returns pointer to allocated iio_hw_consumer on success, NULL on failure.
+ */
+struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev)
+{
+	struct iio_hw_consumer **ptr, *iio_hwc;
+
+	ptr = devres_alloc(devm_iio_hw_consumer_release, sizeof(*ptr),
+			   GFP_KERNEL);
+	if (!ptr)
+		return NULL;
+
+	iio_hwc = iio_hw_consumer_alloc(dev);
+	if (IS_ERR(iio_hwc)) {
+		devres_free(ptr);
+	} else {
+		*ptr = iio_hwc;
+		devres_add(dev, ptr);
+	}
+
+	return iio_hwc;
+}
+EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_alloc);
+
+/**
+ * devm_iio_hw_consumer_free - Resource-managed iio_hw_consumer_free()
+ * @dev: Pointer to consumer device.
+ * @hwc: iio_hw_consumer to free.
+ *
+ * Free iio_hw_consumer allocated with devm_iio_hw_consumer_alloc().
+ */
+void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc)
+{
+	int rc;
+
+	rc = devres_release(dev, devm_iio_hw_consumer_release,
+			    devm_iio_hw_consumer_match, hwc);
+	WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_free);
+
+/**
+ * iio_hw_consumer_enable() - Enable IIO hardware consumer
+ * @hwc: iio_hw_consumer to enable.
+ *
+ * Returns 0 on success.
+ */
+int iio_hw_consumer_enable(struct iio_hw_consumer *hwc)
+{
+	struct hw_consumer_buffer *buf;
+	int ret;
+
+	list_for_each_entry(buf, &hwc->buffers, head) {
+		ret = iio_update_buffers(buf->indio_dev, &buf->buffer, NULL);
+		if (ret)
+			goto err_disable_buffers;
+	}
+
+	return 0;
+
+err_disable_buffers:
+	list_for_each_entry_continue_reverse(buf, &hwc->buffers, head)
+		iio_update_buffers(buf->indio_dev, NULL, &buf->buffer);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_enable);
+
+/**
+ * iio_hw_consumer_disable() - Disable IIO hardware consumer
+ * @hwc: iio_hw_consumer to disable.
+ */
+void iio_hw_consumer_disable(struct iio_hw_consumer *hwc)
+{
+	struct hw_consumer_buffer *buf;
+
+	list_for_each_entry(buf, &hwc->buffers, head)
+		iio_update_buffers(buf->indio_dev, NULL, &buf->buffer);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_disable);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Hardware consumer buffer the IIO framework");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/buffer/industrialio-triggered-buffer.c b/drivers/iio/buffer/industrialio-triggered-buffer.c
new file mode 100644
index 0000000..d3db1fc
--- /dev/null
+++ b/drivers/iio/buffer/industrialio-triggered-buffer.c
@@ -0,0 +1,145 @@
+ /*
+ * Copyright (c) 2012 Analog Devices, Inc.
+ *  Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = {
+	.postenable = &iio_triggered_buffer_postenable,
+	.predisable = &iio_triggered_buffer_predisable,
+};
+
+/**
+ * iio_triggered_buffer_setup() - Setup triggered buffer and pollfunc
+ * @indio_dev:		IIO device structure
+ * @h:			Function which will be used as pollfunc top half
+ * @thread:		Function which will be used as pollfunc bottom half
+ * @setup_ops:		Buffer setup functions to use for this device.
+ *			If NULL the default setup functions for triggered
+ *			buffers will be used.
+ *
+ * This function combines some common tasks which will normally be performed
+ * when setting up a triggered buffer. It will allocate the buffer and the
+ * pollfunc.
+ *
+ * Before calling this function the indio_dev structure should already be
+ * completely initialized, but not yet registered. In practice this means that
+ * this function should be called right before iio_device_register().
+ *
+ * To free the resources allocated by this function call
+ * iio_triggered_buffer_cleanup().
+ */
+int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
+	irqreturn_t (*h)(int irq, void *p),
+	irqreturn_t (*thread)(int irq, void *p),
+	const struct iio_buffer_setup_ops *setup_ops)
+{
+	struct iio_buffer *buffer;
+	int ret;
+
+	buffer = iio_kfifo_allocate();
+	if (!buffer) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+
+	iio_device_attach_buffer(indio_dev, buffer);
+
+	indio_dev->pollfunc = iio_alloc_pollfunc(h,
+						 thread,
+						 IRQF_ONESHOT,
+						 indio_dev,
+						 "%s_consumer%d",
+						 indio_dev->name,
+						 indio_dev->id);
+	if (indio_dev->pollfunc == NULL) {
+		ret = -ENOMEM;
+		goto error_kfifo_free;
+	}
+
+	/* Ring buffer functions - here trigger setup related */
+	if (setup_ops)
+		indio_dev->setup_ops = setup_ops;
+	else
+		indio_dev->setup_ops = &iio_triggered_buffer_setup_ops;
+
+	/* Flag that polled ring buffering is possible */
+	indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
+
+	return 0;
+
+error_kfifo_free:
+	iio_kfifo_free(indio_dev->buffer);
+error_ret:
+	return ret;
+}
+EXPORT_SYMBOL(iio_triggered_buffer_setup);
+
+/**
+ * iio_triggered_buffer_cleanup() - Free resources allocated by iio_triggered_buffer_setup()
+ * @indio_dev: IIO device structure
+ */
+void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev)
+{
+	iio_dealloc_pollfunc(indio_dev->pollfunc);
+	iio_kfifo_free(indio_dev->buffer);
+}
+EXPORT_SYMBOL(iio_triggered_buffer_cleanup);
+
+static void devm_iio_triggered_buffer_clean(struct device *dev, void *res)
+{
+	iio_triggered_buffer_cleanup(*(struct iio_dev **)res);
+}
+
+int devm_iio_triggered_buffer_setup(struct device *dev,
+				    struct iio_dev *indio_dev,
+				    irqreturn_t (*h)(int irq, void *p),
+				    irqreturn_t (*thread)(int irq, void *p),
+				    const struct iio_buffer_setup_ops *ops)
+{
+	struct iio_dev **ptr;
+	int ret;
+
+	ptr = devres_alloc(devm_iio_triggered_buffer_clean, sizeof(*ptr),
+			   GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	*ptr = indio_dev;
+
+	ret = iio_triggered_buffer_setup(indio_dev, h, thread, ops);
+	if (!ret)
+		devres_add(dev, ptr);
+	else
+		devres_free(ptr);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(devm_iio_triggered_buffer_setup);
+
+void devm_iio_triggered_buffer_cleanup(struct device *dev,
+				       struct iio_dev *indio_dev)
+{
+	int rc;
+
+	rc = devres_release(dev, devm_iio_triggered_buffer_clean,
+			    devm_iio_device_match, indio_dev);
+	WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_triggered_buffer_cleanup);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("IIO helper functions for setting up triggered buffers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
new file mode 100644
index 0000000..70c302a
--- /dev/null
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -0,0 +1,231 @@
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/kfifo.h>
+#include <linux/mutex.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/buffer_impl.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+
+struct iio_kfifo {
+	struct iio_buffer buffer;
+	struct kfifo kf;
+	struct mutex user_lock;
+	int update_needed;
+};
+
+#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
+
+static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
+			size_t bytes_per_datum, unsigned int length)
+{
+	if ((length == 0) || (bytes_per_datum == 0))
+		return -EINVAL;
+
+	/*
+	 * Make sure we don't overflow an unsigned int after kfifo rounds up to
+	 * the next power of 2.
+	 */
+	if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum)
+		return -EINVAL;
+
+	return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
+			     bytes_per_datum, GFP_KERNEL);
+}
+
+static int iio_request_update_kfifo(struct iio_buffer *r)
+{
+	int ret = 0;
+	struct iio_kfifo *buf = iio_to_kfifo(r);
+
+	mutex_lock(&buf->user_lock);
+	if (buf->update_needed) {
+		kfifo_free(&buf->kf);
+		ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
+				   buf->buffer.length);
+		if (ret >= 0)
+			buf->update_needed = false;
+	} else {
+		kfifo_reset_out(&buf->kf);
+	}
+	mutex_unlock(&buf->user_lock);
+
+	return ret;
+}
+
+static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
+{
+	struct iio_kfifo *kf = iio_to_kfifo(r);
+	kf->update_needed = true;
+	return 0;
+}
+
+static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
+{
+	if (r->bytes_per_datum != bpd) {
+		r->bytes_per_datum = bpd;
+		iio_mark_update_needed_kfifo(r);
+	}
+	return 0;
+}
+
+static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length)
+{
+	/* Avoid an invalid state */
+	if (length < 2)
+		length = 2;
+	if (r->length != length) {
+		r->length = length;
+		iio_mark_update_needed_kfifo(r);
+	}
+	return 0;
+}
+
+static int iio_store_to_kfifo(struct iio_buffer *r,
+			      const void *data)
+{
+	int ret;
+	struct iio_kfifo *kf = iio_to_kfifo(r);
+	ret = kfifo_in(&kf->kf, data, 1);
+	if (ret != 1)
+		return -EBUSY;
+	return 0;
+}
+
+static int iio_read_first_n_kfifo(struct iio_buffer *r,
+			   size_t n, char __user *buf)
+{
+	int ret, copied;
+	struct iio_kfifo *kf = iio_to_kfifo(r);
+
+	if (mutex_lock_interruptible(&kf->user_lock))
+		return -ERESTARTSYS;
+
+	if (!kfifo_initialized(&kf->kf) || n < kfifo_esize(&kf->kf))
+		ret = -EINVAL;
+	else
+		ret = kfifo_to_user(&kf->kf, buf, n, &copied);
+	mutex_unlock(&kf->user_lock);
+	if (ret < 0)
+		return ret;
+
+	return copied;
+}
+
+static size_t iio_kfifo_buf_data_available(struct iio_buffer *r)
+{
+	struct iio_kfifo *kf = iio_to_kfifo(r);
+	size_t samples;
+
+	mutex_lock(&kf->user_lock);
+	samples = kfifo_len(&kf->kf);
+	mutex_unlock(&kf->user_lock);
+
+	return samples;
+}
+
+static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
+{
+	struct iio_kfifo *kf = iio_to_kfifo(buffer);
+
+	mutex_destroy(&kf->user_lock);
+	kfifo_free(&kf->kf);
+	kfree(kf);
+}
+
+static const struct iio_buffer_access_funcs kfifo_access_funcs = {
+	.store_to = &iio_store_to_kfifo,
+	.read_first_n = &iio_read_first_n_kfifo,
+	.data_available = iio_kfifo_buf_data_available,
+	.request_update = &iio_request_update_kfifo,
+	.set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
+	.set_length = &iio_set_length_kfifo,
+	.release = &iio_kfifo_buffer_release,
+
+	.modes = INDIO_BUFFER_SOFTWARE | INDIO_BUFFER_TRIGGERED,
+};
+
+struct iio_buffer *iio_kfifo_allocate(void)
+{
+	struct iio_kfifo *kf;
+
+	kf = kzalloc(sizeof(*kf), GFP_KERNEL);
+	if (!kf)
+		return NULL;
+
+	kf->update_needed = true;
+	iio_buffer_init(&kf->buffer);
+	kf->buffer.access = &kfifo_access_funcs;
+	kf->buffer.length = 2;
+	mutex_init(&kf->user_lock);
+
+	return &kf->buffer;
+}
+EXPORT_SYMBOL(iio_kfifo_allocate);
+
+void iio_kfifo_free(struct iio_buffer *r)
+{
+	iio_buffer_put(r);
+}
+EXPORT_SYMBOL(iio_kfifo_free);
+
+static void devm_iio_kfifo_release(struct device *dev, void *res)
+{
+	iio_kfifo_free(*(struct iio_buffer **)res);
+}
+
+static int devm_iio_kfifo_match(struct device *dev, void *res, void *data)
+{
+	struct iio_buffer **r = res;
+
+	if (WARN_ON(!r || !*r))
+		return 0;
+
+	return *r == data;
+}
+
+/**
+ * devm_iio_fifo_allocate - Resource-managed iio_kfifo_allocate()
+ * @dev:		Device to allocate kfifo buffer for
+ *
+ * RETURNS:
+ * Pointer to allocated iio_buffer on success, NULL on failure.
+ */
+struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
+{
+	struct iio_buffer **ptr, *r;
+
+	ptr = devres_alloc(devm_iio_kfifo_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return NULL;
+
+	r = iio_kfifo_allocate();
+	if (r) {
+		*ptr = r;
+		devres_add(dev, ptr);
+	} else {
+		devres_free(ptr);
+	}
+
+	return r;
+}
+EXPORT_SYMBOL(devm_iio_kfifo_allocate);
+
+/**
+ * devm_iio_fifo_free - Resource-managed iio_kfifo_free()
+ * @dev:		Device the buffer belongs to
+ * @r:			The buffer associated with the device
+ */
+void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r)
+{
+	WARN_ON(devres_release(dev, devm_iio_kfifo_release,
+			       devm_iio_kfifo_match, r));
+}
+EXPORT_SYMBOL(devm_iio_kfifo_free);
+
+MODULE_LICENSE("GPL");