David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | #ifndef __LINUX_CMA_H |
| 3 | #define __LINUX_CMA_H |
| 4 | |
| 5 | /* |
| 6 | * Contiguous Memory Allocator for DMA mapping framework |
| 7 | * Copyright (c) 2010-2011 by Samsung Electronics. |
| 8 | * Written by: |
| 9 | * Marek Szyprowski <m.szyprowski@samsung.com> |
| 10 | * Michal Nazarewicz <mina86@mina86.com> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11 | */ |
| 12 | |
| 13 | /* |
| 14 | * Contiguous Memory Allocator |
| 15 | * |
| 16 | * The Contiguous Memory Allocator (CMA) makes it possible to |
| 17 | * allocate big contiguous chunks of memory after the system has |
| 18 | * booted. |
| 19 | * |
| 20 | * Why is it needed? |
| 21 | * |
| 22 | * Various devices on embedded systems have no scatter-getter and/or |
| 23 | * IO map support and require contiguous blocks of memory to |
| 24 | * operate. They include devices such as cameras, hardware video |
| 25 | * coders, etc. |
| 26 | * |
| 27 | * Such devices often require big memory buffers (a full HD frame |
| 28 | * is, for instance, more then 2 mega pixels large, i.e. more than 6 |
| 29 | * MB of memory), which makes mechanisms such as kmalloc() or |
| 30 | * alloc_page() ineffective. |
| 31 | * |
| 32 | * At the same time, a solution where a big memory region is |
| 33 | * reserved for a device is suboptimal since often more memory is |
| 34 | * reserved then strictly required and, moreover, the memory is |
| 35 | * inaccessible to page system even if device drivers don't use it. |
| 36 | * |
| 37 | * CMA tries to solve this issue by operating on memory regions |
| 38 | * where only movable pages can be allocated from. This way, kernel |
| 39 | * can use the memory for pagecache and when device driver requests |
| 40 | * it, allocated pages can be migrated. |
| 41 | * |
| 42 | * Driver usage |
| 43 | * |
| 44 | * CMA should not be used by the device drivers directly. It is |
| 45 | * only a helper framework for dma-mapping subsystem. |
| 46 | * |
| 47 | * For more information, see kernel-docs in kernel/dma/contiguous.c |
| 48 | */ |
| 49 | |
| 50 | #ifdef __KERNEL__ |
| 51 | |
| 52 | #include <linux/device.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 53 | #include <linux/mm.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 54 | |
| 55 | struct cma; |
| 56 | struct page; |
| 57 | |
| 58 | #ifdef CONFIG_DMA_CMA |
| 59 | |
| 60 | extern struct cma *dma_contiguous_default_area; |
| 61 | |
| 62 | static inline struct cma *dev_get_cma_area(struct device *dev) |
| 63 | { |
| 64 | if (dev && dev->cma_area) |
| 65 | return dev->cma_area; |
| 66 | return dma_contiguous_default_area; |
| 67 | } |
| 68 | |
| 69 | static inline void dev_set_cma_area(struct device *dev, struct cma *cma) |
| 70 | { |
| 71 | if (dev) |
| 72 | dev->cma_area = cma; |
| 73 | } |
| 74 | |
| 75 | static inline void dma_contiguous_set_default(struct cma *cma) |
| 76 | { |
| 77 | dma_contiguous_default_area = cma; |
| 78 | } |
| 79 | |
| 80 | void dma_contiguous_reserve(phys_addr_t addr_limit); |
| 81 | |
| 82 | int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, |
| 83 | phys_addr_t limit, struct cma **res_cma, |
| 84 | bool fixed); |
| 85 | |
| 86 | /** |
| 87 | * dma_declare_contiguous() - reserve area for contiguous memory handling |
| 88 | * for particular device |
| 89 | * @dev: Pointer to device structure. |
| 90 | * @size: Size of the reserved memory. |
| 91 | * @base: Start address of the reserved memory (optional, 0 for any). |
| 92 | * @limit: End address of the reserved memory (optional, 0 for any). |
| 93 | * |
| 94 | * This function reserves memory for specified device. It should be |
| 95 | * called by board specific code when early allocator (memblock or bootmem) |
| 96 | * is still activate. |
| 97 | */ |
| 98 | |
| 99 | static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, |
| 100 | phys_addr_t base, phys_addr_t limit) |
| 101 | { |
| 102 | struct cma *cma; |
| 103 | int ret; |
| 104 | ret = dma_contiguous_reserve_area(size, base, limit, &cma, true); |
| 105 | if (ret == 0) |
| 106 | dev_set_cma_area(dev, cma); |
| 107 | |
| 108 | return ret; |
| 109 | } |
| 110 | |
| 111 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, |
| 112 | unsigned int order, bool no_warn); |
| 113 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, |
| 114 | int count); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 115 | struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp); |
| 116 | void dma_free_contiguous(struct device *dev, struct page *page, size_t size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 117 | |
| 118 | #else |
| 119 | |
| 120 | static inline struct cma *dev_get_cma_area(struct device *dev) |
| 121 | { |
| 122 | return NULL; |
| 123 | } |
| 124 | |
| 125 | static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { } |
| 126 | |
| 127 | static inline void dma_contiguous_set_default(struct cma *cma) { } |
| 128 | |
| 129 | static inline void dma_contiguous_reserve(phys_addr_t limit) { } |
| 130 | |
| 131 | static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, |
| 132 | phys_addr_t limit, struct cma **res_cma, |
| 133 | bool fixed) |
| 134 | { |
| 135 | return -ENOSYS; |
| 136 | } |
| 137 | |
| 138 | static inline |
| 139 | int dma_declare_contiguous(struct device *dev, phys_addr_t size, |
| 140 | phys_addr_t base, phys_addr_t limit) |
| 141 | { |
| 142 | return -ENOSYS; |
| 143 | } |
| 144 | |
| 145 | static inline |
| 146 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, |
| 147 | unsigned int order, bool no_warn) |
| 148 | { |
| 149 | return NULL; |
| 150 | } |
| 151 | |
| 152 | static inline |
| 153 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, |
| 154 | int count) |
| 155 | { |
| 156 | return false; |
| 157 | } |
| 158 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 159 | /* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */ |
| 160 | static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, |
| 161 | gfp_t gfp) |
| 162 | { |
| 163 | return NULL; |
| 164 | } |
| 165 | |
| 166 | static inline void dma_free_contiguous(struct device *dev, struct page *page, |
| 167 | size_t size) |
| 168 | { |
| 169 | __free_pages(page, get_order(size)); |
| 170 | } |
| 171 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 172 | #endif |
| 173 | |
| 174 | #endif |
| 175 | |
| 176 | #endif |