blob: 0a8fced6aaec4834d9a2edda370f394f63c9808d [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LINUX_SWIOTLB_H
3#define __LINUX_SWIOTLB_H
4
5#include <linux/dma-direction.h>
6#include <linux/init.h>
7#include <linux/types.h>
Olivier Deprez0e641232021-09-23 10:07:05 +02008#include <linux/limits.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009
10struct device;
11struct page;
12struct scatterlist;
13
14enum swiotlb_force {
15 SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */
16 SWIOTLB_FORCE, /* swiotlb=force */
17 SWIOTLB_NO_FORCE, /* swiotlb=noforce */
18};
19
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000020/*
21 * Maximum allowable number of contiguous slabs to map,
22 * must be a power of 2. What is the appropriate value ?
23 * The complexity of {map,unmap}_single is linearly dependent on this value.
24 */
25#define IO_TLB_SEGSIZE 128
26
27/*
28 * log of the size of each IO TLB slab. The number of slabs is command line
29 * controllable.
30 */
31#define IO_TLB_SHIFT 11
32
33extern void swiotlb_init(int verbose);
34int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
35extern unsigned long swiotlb_nr_tbl(void);
36unsigned long swiotlb_size_or_default(void);
37extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
38extern void __init swiotlb_update_mem_attributes(void);
39
40/*
41 * Enumeration for sync targets
42 */
43enum dma_sync_target {
44 SYNC_FOR_CPU = 0,
45 SYNC_FOR_DEVICE = 1,
46};
47
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000048extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
49 dma_addr_t tbl_dma_addr,
David Brazdil0f672f62019-12-10 10:32:29 +000050 phys_addr_t phys,
51 size_t mapping_size,
52 size_t alloc_size,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000053 enum dma_data_direction dir,
54 unsigned long attrs);
55
56extern void swiotlb_tbl_unmap_single(struct device *hwdev,
57 phys_addr_t tlb_addr,
David Brazdil0f672f62019-12-10 10:32:29 +000058 size_t mapping_size,
59 size_t alloc_size,
60 enum dma_data_direction dir,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000061 unsigned long attrs);
62
63extern void swiotlb_tbl_sync_single(struct device *hwdev,
64 phys_addr_t tlb_addr,
65 size_t size, enum dma_data_direction dir,
66 enum dma_sync_target target);
67
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068#ifdef CONFIG_SWIOTLB
David Brazdil0f672f62019-12-10 10:32:29 +000069extern enum swiotlb_force swiotlb_force;
70extern phys_addr_t io_tlb_start, io_tlb_end;
71
72static inline bool is_swiotlb_buffer(phys_addr_t paddr)
73{
74 return paddr >= io_tlb_start && paddr < io_tlb_end;
75}
76
77bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
78 size_t size, enum dma_data_direction dir, unsigned long attrs);
79void __init swiotlb_exit(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000080unsigned int swiotlb_max_segment(void);
David Brazdil0f672f62019-12-10 10:32:29 +000081size_t swiotlb_max_mapping_size(struct device *dev);
82bool is_swiotlb_active(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083#else
David Brazdil0f672f62019-12-10 10:32:29 +000084#define swiotlb_force SWIOTLB_NO_FORCE
85static inline bool is_swiotlb_buffer(phys_addr_t paddr)
86{
87 return false;
88}
89static inline bool swiotlb_map(struct device *dev, phys_addr_t *phys,
90 dma_addr_t *dma_addr, size_t size, enum dma_data_direction dir,
91 unsigned long attrs)
92{
93 return false;
94}
95static inline void swiotlb_exit(void)
96{
97}
98static inline unsigned int swiotlb_max_segment(void)
99{
100 return 0;
101}
102static inline size_t swiotlb_max_mapping_size(struct device *dev)
103{
104 return SIZE_MAX;
105}
106
107static inline bool is_swiotlb_active(void)
108{
109 return false;
110}
111#endif /* CONFIG_SWIOTLB */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112
113extern void swiotlb_print_info(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114extern void swiotlb_set_max_segment(unsigned int);
115
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116#endif /* __LINUX_SWIOTLB_H */