blob: a0637abffee88b0f0b12f3c6520cd5d2d77e779d [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (c) 2006, Intel Corporation.
4 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 * Copyright (C) 2006-2008 Intel Corporation
6 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8
9#ifndef _IOVA_H_
10#define _IOVA_H_
11
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/rbtree.h>
15#include <linux/atomic.h>
16#include <linux/dma-mapping.h>
17
18/* iova structure */
19struct iova {
20 struct rb_node node;
21 unsigned long pfn_hi; /* Highest allocated pfn */
22 unsigned long pfn_lo; /* Lowest allocated pfn */
23};
24
25struct iova_magazine;
26struct iova_cpu_rcache;
27
28#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
29#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
30
31struct iova_rcache {
32 spinlock_t lock;
33 unsigned long depot_size;
34 struct iova_magazine *depot[MAX_GLOBAL_MAGS];
35 struct iova_cpu_rcache __percpu *cpu_rcaches;
36};
37
38struct iova_domain;
39
40/* Call-Back from IOVA code into IOMMU drivers */
41typedef void (* iova_flush_cb)(struct iova_domain *domain);
42
43/* Destructor for per-entry data */
44typedef void (* iova_entry_dtor)(unsigned long data);
45
46/* Number of entries per Flush Queue */
47#define IOVA_FQ_SIZE 256
48
49/* Timeout (in ms) after which entries are flushed from the Flush-Queue */
50#define IOVA_FQ_TIMEOUT 10
51
52/* Flush Queue entry for defered flushing */
53struct iova_fq_entry {
54 unsigned long iova_pfn;
55 unsigned long pages;
56 unsigned long data;
57 u64 counter; /* Flush counter when this entrie was added */
58};
59
60/* Per-CPU Flush Queue structure */
61struct iova_fq {
62 struct iova_fq_entry entries[IOVA_FQ_SIZE];
63 unsigned head, tail;
64 spinlock_t lock;
65};
66
67/* holds all the iova translations for a domain */
68struct iova_domain {
69 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
70 struct rb_root rbroot; /* iova domain rbtree root */
71 struct rb_node *cached_node; /* Save last alloced node */
72 struct rb_node *cached32_node; /* Save last 32-bit alloced node */
73 unsigned long granule; /* pfn granularity for this domain */
74 unsigned long start_pfn; /* Lower limit for this domain */
75 unsigned long dma_32bit_pfn;
David Brazdil0f672f62019-12-10 10:32:29 +000076 unsigned long max32_alloc_size; /* Size of last failed allocation */
77 struct iova_fq __percpu *fq; /* Flush Queue */
78
79 atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
80 have been started */
81
82 atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
83 have been finished */
84
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000085 struct iova anchor; /* rbtree lookup anchor */
86 struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
87
88 iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
89 TLBs */
90
91 iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
92 iova entry */
93
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000094 struct timer_list fq_timer; /* Timer to regularily empty the
95 flush-queues */
96 atomic_t fq_timer_on; /* 1 when timer is active, 0
97 when not */
98};
99
100static inline unsigned long iova_size(struct iova *iova)
101{
102 return iova->pfn_hi - iova->pfn_lo + 1;
103}
104
105static inline unsigned long iova_shift(struct iova_domain *iovad)
106{
107 return __ffs(iovad->granule);
108}
109
110static inline unsigned long iova_mask(struct iova_domain *iovad)
111{
112 return iovad->granule - 1;
113}
114
115static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
116{
117 return iova & iova_mask(iovad);
118}
119
120static inline size_t iova_align(struct iova_domain *iovad, size_t size)
121{
122 return ALIGN(size, iovad->granule);
123}
124
125static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
126{
127 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
128}
129
130static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
131{
132 return iova >> iova_shift(iovad);
133}
134
135#if IS_ENABLED(CONFIG_IOMMU_IOVA)
136int iova_cache_get(void);
137void iova_cache_put(void);
138
139struct iova *alloc_iova_mem(void);
140void free_iova_mem(struct iova *iova);
141void free_iova(struct iova_domain *iovad, unsigned long pfn);
142void __free_iova(struct iova_domain *iovad, struct iova *iova);
143struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
144 unsigned long limit_pfn,
145 bool size_aligned);
146void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
147 unsigned long size);
148void queue_iova(struct iova_domain *iovad,
149 unsigned long pfn, unsigned long pages,
150 unsigned long data);
151unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
152 unsigned long limit_pfn, bool flush_rcache);
153struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
154 unsigned long pfn_hi);
155void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
156void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
157 unsigned long start_pfn);
David Brazdil0f672f62019-12-10 10:32:29 +0000158bool has_iova_flush_queue(struct iova_domain *iovad);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000159int init_iova_flush_queue(struct iova_domain *iovad,
160 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
161struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
162void put_iova_domain(struct iova_domain *iovad);
163struct iova *split_and_remove_iova(struct iova_domain *iovad,
164 struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
165void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
166#else
167static inline int iova_cache_get(void)
168{
169 return -ENOTSUPP;
170}
171
172static inline void iova_cache_put(void)
173{
174}
175
176static inline struct iova *alloc_iova_mem(void)
177{
178 return NULL;
179}
180
181static inline void free_iova_mem(struct iova *iova)
182{
183}
184
185static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
186{
187}
188
189static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
190{
191}
192
193static inline struct iova *alloc_iova(struct iova_domain *iovad,
194 unsigned long size,
195 unsigned long limit_pfn,
196 bool size_aligned)
197{
198 return NULL;
199}
200
201static inline void free_iova_fast(struct iova_domain *iovad,
202 unsigned long pfn,
203 unsigned long size)
204{
205}
206
207static inline void queue_iova(struct iova_domain *iovad,
208 unsigned long pfn, unsigned long pages,
209 unsigned long data)
210{
211}
212
213static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
214 unsigned long size,
215 unsigned long limit_pfn,
216 bool flush_rcache)
217{
218 return 0;
219}
220
221static inline struct iova *reserve_iova(struct iova_domain *iovad,
222 unsigned long pfn_lo,
223 unsigned long pfn_hi)
224{
225 return NULL;
226}
227
228static inline void copy_reserved_iova(struct iova_domain *from,
229 struct iova_domain *to)
230{
231}
232
233static inline void init_iova_domain(struct iova_domain *iovad,
234 unsigned long granule,
235 unsigned long start_pfn)
236{
237}
238
David Brazdil0f672f62019-12-10 10:32:29 +0000239static inline bool has_iova_flush_queue(struct iova_domain *iovad)
240{
241 return false;
242}
243
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000244static inline int init_iova_flush_queue(struct iova_domain *iovad,
245 iova_flush_cb flush_cb,
246 iova_entry_dtor entry_dtor)
247{
248 return -ENODEV;
249}
250
251static inline struct iova *find_iova(struct iova_domain *iovad,
252 unsigned long pfn)
253{
254 return NULL;
255}
256
257static inline void put_iova_domain(struct iova_domain *iovad)
258{
259}
260
261static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
262 struct iova *iova,
263 unsigned long pfn_lo,
264 unsigned long pfn_hi)
265{
266 return NULL;
267}
268
269static inline void free_cpu_cached_iovas(unsigned int cpu,
270 struct iova_domain *iovad)
271{
272}
273#endif
274
275#endif