Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8013562..dd357a7 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -39,6 +39,7 @@
DMA_IN_PROGRESS,
DMA_PAUSED,
DMA_ERROR,
+ DMA_OUT_OF_ORDER,
};
/**
@@ -61,6 +62,9 @@
DMA_SLAVE,
DMA_CYCLIC,
DMA_INTERLEAVE,
+ DMA_COMPLETION_NO_ORDER,
+ DMA_REPEAT,
+ DMA_LOAD_EOT,
/* last transaction type for creation of the capabilities mask */
DMA_TX_TYPE_END,
};
@@ -83,9 +87,9 @@
/**
* Interleaved Transfer Request
* ----------------------------
- * A chunk is collection of contiguous bytes to be transfered.
+ * A chunk is collection of contiguous bytes to be transferred.
* The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
- * ICGs may or maynot change between chunks.
+ * ICGs may or may not change between chunks.
* A FRAME is the smallest series of contiguous {chunk,icg} pairs,
* that when repeated an integral number of times, specifies the transfer.
* A transfer template is specification of a Frame, the number of times
@@ -153,7 +157,7 @@
bool dst_sgl;
size_t numf;
size_t frame_size;
- struct data_chunk sgl[0];
+ struct data_chunk sgl[];
};
/**
@@ -162,7 +166,7 @@
* @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
* this transaction
* @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
- * acknowledges receipt, i.e. has has a chance to establish any dependency
+ * acknowledges receipt, i.e. has a chance to establish any dependency
* chains
* @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
* @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
@@ -176,6 +180,16 @@
* @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command
* data and the descriptor should be in different format from normal
* data descriptors.
+ * @DMA_PREP_REPEAT: tell the driver that the transaction shall be automatically
+ * repeated when it ends until a transaction is issued on the same channel
+ * with the DMA_PREP_LOAD_EOT flag set. This flag is only applicable to
+ * interleaved transactions and is ignored for all other transaction types.
+ * @DMA_PREP_LOAD_EOT: tell the driver that the transaction shall replace any
+ * active repeated (as indicated by DMA_PREP_REPEAT) transaction when the
+ * repeated transaction ends. Not setting this flag when the previously queued
+ * transaction is marked with DMA_PREP_REPEAT will cause the new transaction
+ * to never be processed and stay in the issued queue forever. The flag is
+ * ignored if the previous transaction is not a repeated transaction.
*/
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
@@ -186,6 +200,8 @@
DMA_PREP_FENCE = (1 << 5),
DMA_CTRL_REUSE = (1 << 6),
DMA_PREP_CMD = (1 << 7),
+ DMA_PREP_REPEAT = (1 << 8),
+ DMA_PREP_LOAD_EOT = (1 << 9),
};
/**
@@ -219,6 +235,62 @@
* @bytes_transferred: byte counter
*/
+/**
+ * enum dma_desc_metadata_mode - per descriptor metadata mode types supported
+ * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
+ * client driver and it is attached (via the dmaengine_desc_attach_metadata()
+ * helper) to the descriptor.
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * construct the metadata in the client's buffer
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ * descriptor
+ * 3. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ * descriptor
+ * 3. submit the transfer
+ * 4. when the transfer is completed, the metadata should be available in the
+ * attached buffer
+ *
+ * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA
+ * driver. The client driver can ask for the pointer, maximum size and the
+ * currently used size of the metadata and can directly update or read it.
+ * dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is
+ * provided as helper functions.
+ *
+ * Note: the metadata area for the descriptor is no longer valid after the
+ * transfer has been completed (valid up to the point when the completion
+ * callback returns if used).
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's
+ * metadata area
+ * 3. update the metadata at the pointer
+ * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount
+ * of data the client has placed into the metadata buffer
+ * 5. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. submit the transfer
+ * 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the
+ * pointer to the engine's metadata area
+ * 4. Read out the metadata from the pointer
+ *
+ * Note: the two mode is not compatible and clients must use one mode for a
+ * descriptor.
+ */
+enum dma_desc_metadata_mode {
+ DESC_METADATA_NONE = 0,
+ DESC_METADATA_CLIENT = BIT(0),
+ DESC_METADATA_ENGINE = BIT(1),
+};
+
struct dma_chan_percpu {
/* stats */
unsigned long memcpy_count;
@@ -238,10 +310,14 @@
/**
* struct dma_chan - devices supply DMA channels, clients use them
* @device: ptr to the dma device who supplies this channel, always !%NULL
+ * @slave: ptr to the device using this channel
* @cookie: last cookie value returned to client
* @completed_cookie: last completed cookie for this channel
* @chan_id: channel ID for sysfs
* @dev: class device for sysfs
+ * @name: backlink name for sysfs
+ * @dbg_client_name: slave name for debugfs in format:
+ * dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx"
* @device_node: used to add this to the device chan list
* @local: per-cpu pointer to a struct dma_chan_percpu
* @client_count: how many clients are using this channel
@@ -252,12 +328,17 @@
*/
struct dma_chan {
struct dma_device *device;
+ struct device *slave;
dma_cookie_t cookie;
dma_cookie_t completed_cookie;
/* sysfs */
int chan_id;
struct dma_chan_dev *dev;
+ const char *name;
+#ifdef CONFIG_DEBUG_FS
+ char *dbg_client_name;
+#endif
struct list_head device_node;
struct dma_chan_percpu __percpu *local;
@@ -276,13 +357,11 @@
* @chan: driver channel device
* @device: sysfs device
* @dev_id: parent dma_device dev_id
- * @idr_ref: reference count to gate release of dma_device dev_id
*/
struct dma_chan_dev {
struct dma_chan *chan;
struct device device;
int dev_id;
- atomic_t *idr_ref;
};
/**
@@ -402,7 +481,11 @@
* Since the enum dma_transfer_direction is not defined as bit flag for
* each type, the dma controller should set BIT(<TYPE>) and same
* should be checked by controller as well
+ * @min_burst: min burst capability per-transfer
* @max_burst: max burst capability per-transfer
+ * @max_sg_burst: max number of SG list entries executed in a single burst
+ * DMA tansaction with no software intervention for reinitialization.
+ * Zero value means unlimited number of entries.
* @cmd_pause: true, if pause is supported (i.e. for reading residue or
* for resume later)
* @cmd_resume: true, if resume is supported
@@ -415,7 +498,9 @@
u32 src_addr_widths;
u32 dst_addr_widths;
u32 directions;
+ u32 min_burst;
u32 max_burst;
+ u32 max_sg_burst;
bool cmd_pause;
bool cmd_resume;
bool cmd_terminate;
@@ -472,7 +557,19 @@
struct device *dev;
struct kref kref;
size_t len;
- dma_addr_t addr[0];
+ dma_addr_t addr[];
+};
+
+struct dma_async_tx_descriptor;
+
+struct dma_descriptor_metadata_ops {
+ int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
+ size_t len);
+
+ void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+ int (*set_len)(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
};
/**
@@ -481,13 +578,18 @@
* @cookie: tracking cookie for this transaction, set to -EBUSY if
* this tx is sitting on a dependency list
* @flags: flags to augment operation preparation, control completion, and
- * communicate status
+ * communicate status
* @phys: physical address of the descriptor
* @chan: target channel for this operation
* @tx_submit: accept the descriptor, assign ordered cookie and mark the
* descriptor pending. To be pushed on .issue_pending() call
* @callback: routine to call after this operation is complete
* @callback_param: general parameter to pass to the callback routine
+ * @desc_metadata_mode: core managed metadata mode to protect mixed use of
+ * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
+ * DESC_METADATA_NONE
+ * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the
+ * DMA driver if metadata mode is supported with the descriptor
* ---async_tx api specific fields---
* @next: at completion submit this descriptor
* @parent: pointer to the next level up in the dependency chain
@@ -504,6 +606,8 @@
dma_async_tx_callback_result callback_result;
void *callback_param;
struct dmaengine_unmap_data *unmap;
+ enum dma_desc_metadata_mode desc_metadata_mode;
+ struct dma_descriptor_metadata_ops *metadata_ops;
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent;
@@ -539,10 +643,11 @@
static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
{
- if (tx->unmap) {
- dmaengine_unmap_put(tx->unmap);
- tx->unmap = NULL;
- }
+ if (!tx->unmap)
+ return;
+
+ dmaengine_unmap_put(tx->unmap);
+ tx->unmap = NULL;
}
#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
@@ -611,11 +716,13 @@
* @residue: the remaining number of bytes left to transmit
* on the selected transfer for states DMA_IN_PROGRESS and
* DMA_PAUSED if this is implemented in the driver, else 0
+ * @in_flight_bytes: amount of data in bytes cached by the DMA.
*/
struct dma_tx_state {
dma_cookie_t last;
dma_cookie_t used;
u32 residue;
+ u32 in_flight_bytes;
};
/**
@@ -666,6 +773,7 @@
* @global_node: list_head for global dma_device_list
* @filter: information for device/slave to filter function/param mapping
* @cap_mask: one or more dma_capability flags
+ * @desc_metadata_modes: supported metadata modes by the DMA device
* @max_xor: maximum number of xor sources, 0 if no capability
* @max_pq: maximum number of PQ sources and PQ-continue capability
* @copy_align: alignment shift for memcpy operations
@@ -683,7 +791,11 @@
* Since the enum dma_transfer_direction is not defined as bit flag for
* each type, the dma controller should set BIT(<TYPE>) and same
* should be checked by controller as well
+ * @min_burst: min burst capability per-transfer
* @max_burst: max burst capability per-transfer
+ * @max_sg_burst: max number of SG list entries executed in a single burst
+ * DMA tansaction with no software intervention for reinitialization.
+ * Zero value means unlimited number of entries.
* @residue_granularity: granularity of the transfer residue reported
* by tx_status
* @device_alloc_chan_resources: allocate resources and return the
@@ -703,6 +815,8 @@
* be called after period_len bytes have been transferred.
* @device_prep_interleaved_dma: Transfer expression in a generic way.
* @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
+ * @device_caps: May be used to override the generic DMA slave capabilities
+ * with per-channel specific ones
* @device_config: Pushes a new configuration to a channel, return 0 or an error
* code
* @device_pause: Pauses any transfer happening on a channel. Returns
@@ -719,15 +833,23 @@
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
* @descriptor_reuse: a submitted transfer can be resubmitted after completion
+ * @device_release: called sometime atfer dma_async_device_unregister() is
+ * called and there are no further references to this structure. This
+ * must be implemented to free resources however many existing drivers
+ * do not and are therefore not safe to unbind while in use.
+ * @dbg_summary_show: optional routine to show contents in debugfs; default code
+ * will be used when this is omitted, but custom code can show extra,
+ * controller specific information.
*/
struct dma_device {
-
+ struct kref ref;
unsigned int chancnt;
unsigned int privatecnt;
struct list_head channels;
struct list_head global_node;
struct dma_filter filter;
dma_cap_mask_t cap_mask;
+ enum dma_desc_metadata_mode desc_metadata_modes;
unsigned short max_xor;
unsigned short max_pq;
enum dmaengine_alignment copy_align;
@@ -739,11 +861,15 @@
int dev_id;
struct device *dev;
struct module *owner;
+ struct ida chan_ida;
+ struct mutex chan_mutex; /* to protect chan_ida */
u32 src_addr_widths;
u32 dst_addr_widths;
u32 directions;
+ u32 min_burst;
u32 max_burst;
+ u32 max_sg_burst;
bool descriptor_reuse;
enum dma_residue_granularity residue_granularity;
@@ -791,6 +917,8 @@
struct dma_chan *chan, dma_addr_t dst, u64 data,
unsigned long flags);
+ void (*device_caps)(struct dma_chan *chan,
+ struct dma_slave_caps *caps);
int (*device_config)(struct dma_chan *chan,
struct dma_slave_config *config);
int (*device_pause)(struct dma_chan *chan);
@@ -802,6 +930,12 @@
dma_cookie_t cookie,
struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan);
+ void (*device_release)(struct dma_device *dev);
+ /* debugfs support */
+#ifdef CONFIG_DEBUG_FS
+ void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
+ struct dentry *dbg_dev_root;
+#endif
};
static inline int dmaengine_slave_config(struct dma_chan *chan,
@@ -878,6 +1012,9 @@
{
if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
return NULL;
+ if (flags & DMA_PREP_REPEAT &&
+ !test_bit(DMA_REPEAT, chan->device->cap_mask.bits))
+ return NULL;
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
@@ -904,6 +1041,41 @@
len, flags);
}
+static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
+ enum dma_desc_metadata_mode mode)
+{
+ if (!chan)
+ return false;
+
+ return !!(chan->device->desc_metadata_modes & mode);
+}
+
+#ifdef CONFIG_DMA_ENGINE
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len);
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
+#else /* CONFIG_DMA_ENGINE */
+static inline int dmaengine_desc_attach_metadata(
+ struct dma_async_tx_descriptor *desc, void *data, size_t len)
+{
+ return -EINVAL;
+}
+static inline void *dmaengine_desc_get_metadata_ptr(
+ struct dma_async_tx_descriptor *desc, size_t *payload_len,
+ size_t *max_len)
+{
+ return NULL;
+}
+static inline int dmaengine_desc_set_metadata_len(
+ struct dma_async_tx_descriptor *desc, size_t payload_len)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_DMA_ENGINE */
+
/**
* dmaengine_terminate_all() - Terminate all active DMA transfers
* @chan: The channel for which to terminate the transfers
@@ -932,7 +1104,7 @@
* dmaengine_synchronize() needs to be called before it is safe to free
* any memory that is accessed by previously submitted descriptors or before
* freeing any resources accessed from within the completion callback of any
- * perviously submitted descriptors.
+ * previously submitted descriptors.
*
* This function can be called from atomic context as well as from within a
* complete callback of a descriptor submitted on the same channel.
@@ -954,7 +1126,7 @@
*
* Synchronizes to the DMA channel termination to the current context. When this
* function returns it is guaranteed that all transfers for previously issued
- * descriptors have stopped and and it is safe to free the memory assoicated
+ * descriptors have stopped and it is safe to free the memory associated
* with them. Furthermore it is guaranteed that all complete callback functions
* for a previously submitted descriptor have finished running and it is safe to
* free resources accessed from within the complete callbacks.
@@ -1031,14 +1203,7 @@
static inline bool dmaengine_check_align(enum dmaengine_alignment align,
size_t off1, size_t off2, size_t len)
{
- size_t mask;
-
- if (!align)
- return true;
- mask = (1 << align) - 1;
- if (mask & (off1 | off2 | len))
- return false;
- return true;
+ return !(((1 << align) - 1) & (off1 | off2 | len));
}
static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
@@ -1112,9 +1277,9 @@
{
if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
return dma_dev_to_maxpq(dma);
- else if (dmaf_p_disabled_continue(flags))
+ if (dmaf_p_disabled_continue(flags))
return dma_dev_to_maxpq(dma) - 1;
- else if (dmaf_continue(flags))
+ if (dmaf_continue(flags))
return dma_dev_to_maxpq(dma) - 3;
BUG();
}
@@ -1125,7 +1290,7 @@
if (inc) {
if (dir_icg)
return dir_icg;
- else if (sgl)
+ if (sgl)
return icg;
}
@@ -1291,11 +1456,12 @@
static inline void
dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
{
- if (st) {
- st->last = last;
- st->used = used;
- st->residue = residue;
- }
+ if (!st)
+ return;
+
+ st->last = last;
+ st->used = used;
+ st->residue = residue;
}
#ifdef CONFIG_DMA_ENGINE
@@ -1306,7 +1472,6 @@
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
dma_filter_fn fn, void *fn_param,
struct device_node *np);
-struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
@@ -1336,11 +1501,6 @@
{
return NULL;
}
-static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
- const char *name)
-{
- return NULL;
-}
static inline struct dma_chan *dma_request_chan(struct device *dev,
const char *name)
{
@@ -1361,8 +1521,6 @@
}
#endif
-#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
-
static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
{
struct dma_slave_caps caps;
@@ -1372,12 +1530,11 @@
if (ret)
return ret;
- if (caps.descriptor_reuse) {
- tx->flags |= DMA_CTRL_REUSE;
- return 0;
- } else {
+ if (!caps.descriptor_reuse)
return -EPERM;
- }
+
+ tx->flags |= DMA_CTRL_REUSE;
+ return 0;
}
static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
@@ -1393,10 +1550,10 @@
static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
{
/* this is supported for reusable desc, so check that */
- if (dmaengine_desc_test_reuse(desc))
- return desc->desc_free(desc);
- else
+ if (!dmaengine_desc_test_reuse(desc))
return -EPERM;
+
+ return desc->desc_free(desc);
}
/* --- DMA device --- */
@@ -1404,16 +1561,25 @@
int dma_async_device_register(struct dma_device *device);
int dmaenginem_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
+int dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan);
+void dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
-struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
-struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
#define dma_request_channel(mask, x, y) \
__dma_request_channel(&(mask), x, y, NULL)
-#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
- __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
+
+/* Deprecated, please use dma_request_chan() directly */
+static inline struct dma_chan * __deprecated
+dma_request_slave_channel(struct device *dev, const char *name)
+{
+ struct dma_chan *ch = dma_request_chan(dev, name);
+
+ return IS_ERR(ch) ? NULL : ch;
+}
static inline struct dma_chan
-*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
+*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
dma_filter_fn fn, void *fn_param,
struct device *dev, const char *name)
{
@@ -1426,6 +1592,23 @@
if (!fn || !fn_param)
return NULL;
- return __dma_request_channel(mask, fn, fn_param, NULL);
+ return __dma_request_channel(&mask, fn, fn_param, NULL);
+}
+
+static inline char *
+dmaengine_get_direction_text(enum dma_transfer_direction dir)
+{
+ switch (dir) {
+ case DMA_DEV_TO_MEM:
+ return "DEV_TO_MEM";
+ case DMA_MEM_TO_DEV:
+ return "MEM_TO_DEV";
+ case DMA_MEM_TO_MEM:
+ return "MEM_TO_MEM";
+ case DMA_DEV_TO_DEV:
+ return "DEV_TO_DEV";
+ default:
+ return "invalid";
+ }
}
#endif /* DMAENGINE_H */