blob: adf993a3bd58010de6db1c6e39c85ea2f085818d [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_DMA_DIRECT_H
3#define _LINUX_DMA_DIRECT_H 1
4
5#include <linux/dma-mapping.h>
6#include <linux/mem_encrypt.h>
7
8#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
9#include <asm/dma-direct.h>
10#else
11static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
12{
13 dma_addr_t dev_addr = (dma_addr_t)paddr;
14
15 return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
16}
17
18static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
19{
20 phys_addr_t paddr = (phys_addr_t)dev_addr;
21
22 return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
23}
24
25static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
26{
27 if (!dev->dma_mask)
28 return false;
29
David Brazdil0f672f62019-12-10 10:32:29 +000030 return addr + size - 1 <=
31 min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000032}
33#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
34
David Brazdil0f672f62019-12-10 10:32:29 +000035#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
36bool force_dma_unencrypted(struct device *dev);
37#else
38static inline bool force_dma_unencrypted(struct device *dev)
39{
40 return false;
41}
42#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
43
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044/*
45 * If memory encryption is supported, phys_to_dma will set the memory encryption
46 * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
47 * and __dma_to_phys versions should only be used on non-encrypted memory for
48 * special occasions like DMA coherent buffers.
49 */
50static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
51{
52 return __sme_set(__phys_to_dma(dev, paddr));
53}
54
55static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
56{
57 return __sme_clr(__dma_to_phys(dev, daddr));
58}
59
David Brazdil0f672f62019-12-10 10:32:29 +000060u64 dma_direct_get_required_mask(struct device *dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000061void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
62 gfp_t gfp, unsigned long attrs);
63void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
64 dma_addr_t dma_addr, unsigned long attrs);
David Brazdil0f672f62019-12-10 10:32:29 +000065void *dma_direct_alloc_pages(struct device *dev, size_t size,
66 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
67void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
68 dma_addr_t dma_addr, unsigned long attrs);
69struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
70 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
71void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072int dma_direct_supported(struct device *dev, u64 mask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073#endif /* _LINUX_DMA_DIRECT_H */