blob: 5daa937684a46dfb967d74ee55c22f4f1f3bab96 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-or-later */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4 * Takashi Iwai <tiwai@suse.de>
5 *
6 * Generic memory allocators
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8
9#ifndef __SOUND_MEMALLOC_H
10#define __SOUND_MEMALLOC_H
11
12#include <asm/page.h>
13
14struct device;
15
16/*
17 * buffer device info
18 */
19struct snd_dma_device {
20 int type; /* SNDRV_DMA_TYPE_XXX */
21 struct device *dev; /* generic device */
22};
23
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024#define snd_dma_continuous_data(x) ((struct device *)(__force unsigned long)(x))
25
26
27/*
28 * buffer types
29 */
30#define SNDRV_DMA_TYPE_UNKNOWN 0 /* not defined */
31#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
32#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
David Brazdil0f672f62019-12-10 10:32:29 +000033#define SNDRV_DMA_TYPE_DEV_UC 5 /* continuous non-cahced */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000034#ifdef CONFIG_SND_DMA_SGBUF
35#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */
David Brazdil0f672f62019-12-10 10:32:29 +000036#define SNDRV_DMA_TYPE_DEV_UC_SG 6 /* SG non-cached */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000037#else
38#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
David Brazdil0f672f62019-12-10 10:32:29 +000039#define SNDRV_DMA_TYPE_DEV_UC_SG SNDRV_DMA_TYPE_DEV_UC
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000040#endif
41#ifdef CONFIG_GENERIC_ALLOCATOR
42#define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */
43#else
44#define SNDRV_DMA_TYPE_DEV_IRAM SNDRV_DMA_TYPE_DEV
45#endif
Olivier Deprez157378f2022-04-04 15:47:50 +020046#define SNDRV_DMA_TYPE_VMALLOC 7 /* vmalloc'ed buffer */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047
48/*
49 * info for buffer allocation
50 */
51struct snd_dma_buffer {
52 struct snd_dma_device dev; /* device type */
53 unsigned char *area; /* virtual pointer */
54 dma_addr_t addr; /* physical address */
55 size_t bytes; /* buffer size in bytes */
56 void *private_data; /* private for allocator; don't touch */
57};
58
59/*
60 * return the pages matching with the given byte size
61 */
62static inline unsigned int snd_sgbuf_aligned_pages(size_t size)
63{
64 return (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
65}
66
67#ifdef CONFIG_SND_DMA_SGBUF
68/*
69 * Scatter-Gather generic device pages
70 */
71void *snd_malloc_sgbuf_pages(struct device *device,
72 size_t size, struct snd_dma_buffer *dmab,
73 size_t *res_size);
74int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab);
75
76struct snd_sg_page {
77 void *buf;
78 dma_addr_t addr;
79};
80
81struct snd_sg_buf {
82 int size; /* allocated byte size */
83 int pages; /* allocated pages */
84 int tblsize; /* allocated table size */
85 struct snd_sg_page *table; /* address table */
86 struct page **page_table; /* page table (for vmap/vunmap) */
87 struct device *dev;
88};
89
90/*
91 * return the physical address at the corresponding offset
92 */
93static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
94 size_t offset)
95{
96 struct snd_sg_buf *sgbuf = dmab->private_data;
Olivier Deprez157378f2022-04-04 15:47:50 +020097 dma_addr_t addr;
98
99 if (!sgbuf)
100 return dmab->addr + offset;
101 addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000102 addr &= ~((dma_addr_t)PAGE_SIZE - 1);
103 return addr + offset % PAGE_SIZE;
104}
105
106/*
107 * return the virtual address at the corresponding offset
108 */
109static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab,
110 size_t offset)
111{
112 struct snd_sg_buf *sgbuf = dmab->private_data;
Olivier Deprez157378f2022-04-04 15:47:50 +0200113
114 if (!sgbuf)
115 return dmab->area + offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116 return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE;
117}
118
119unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
120 unsigned int ofs, unsigned int size);
121#else
122/* non-SG versions */
123static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
124 size_t offset)
125{
126 return dmab->addr + offset;
127}
128
129static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab,
130 size_t offset)
131{
132 return dmab->area + offset;
133}
134
135#define snd_sgbuf_get_chunk_size(dmab, ofs, size) (size)
136
137#endif /* CONFIG_SND_DMA_SGBUF */
138
139/* allocate/release a buffer */
140int snd_dma_alloc_pages(int type, struct device *dev, size_t size,
141 struct snd_dma_buffer *dmab);
142int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size,
143 struct snd_dma_buffer *dmab);
144void snd_dma_free_pages(struct snd_dma_buffer *dmab);
145
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146#endif /* __SOUND_MEMALLOC_H */
147