blob: 72a7f03a59f4c44f0aef6f50f740f6b12df977ad [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_DAX_H
3#define _LINUX_DAX_H
4
5#include <linux/fs.h>
6#include <linux/mm.h>
7#include <linux/radix-tree.h>
8#include <asm/pgtable.h>
9
David Brazdil0f672f62019-12-10 10:32:29 +000010/* Flag for synchronous flush */
11#define DAXDEV_F_SYNC (1UL << 0)
12
13typedef unsigned long dax_entry_t;
14
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015struct iomap_ops;
16struct dax_device;
17struct dax_operations {
18 /*
19 * direct_access: translate a device-relative
20 * logical-page-offset into an absolute physical pfn. Return the
21 * number of pages available for DAX at that pfn.
22 */
23 long (*direct_access)(struct dax_device *, pgoff_t, long,
24 void **, pfn_t *);
David Brazdil0f672f62019-12-10 10:32:29 +000025 /*
26 * Validate whether this device is usable as an fsdax backing
27 * device.
28 */
29 bool (*dax_supported)(struct dax_device *, struct block_device *, int,
30 sector_t, sector_t);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000031 /* copy_from_iter: required operation for fs-dax direct-i/o */
32 size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
33 struct iov_iter *);
34 /* copy_to_iter: required operation for fs-dax direct-i/o */
35 size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t,
36 struct iov_iter *);
37};
38
39extern struct attribute_group dax_attribute_group;
40
41#if IS_ENABLED(CONFIG_DAX)
42struct dax_device *dax_get_by_host(const char *host);
43struct dax_device *alloc_dax(void *private, const char *host,
David Brazdil0f672f62019-12-10 10:32:29 +000044 const struct dax_operations *ops, unsigned long flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045void put_dax(struct dax_device *dax_dev);
46void kill_dax(struct dax_device *dax_dev);
47void dax_write_cache(struct dax_device *dax_dev, bool wc);
48bool dax_write_cache_enabled(struct dax_device *dax_dev);
David Brazdil0f672f62019-12-10 10:32:29 +000049bool __dax_synchronous(struct dax_device *dax_dev);
50static inline bool dax_synchronous(struct dax_device *dax_dev)
51{
52 return __dax_synchronous(dax_dev);
53}
54void __set_dax_synchronous(struct dax_device *dax_dev);
55static inline void set_dax_synchronous(struct dax_device *dax_dev)
56{
57 __set_dax_synchronous(dax_dev);
58}
Olivier Deprez0e641232021-09-23 10:07:05 +020059bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
60 int blocksize, sector_t start, sector_t len);
David Brazdil0f672f62019-12-10 10:32:29 +000061/*
62 * Check if given mapping is supported by the file / underlying device.
63 */
64static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
65 struct dax_device *dax_dev)
66{
67 if (!(vma->vm_flags & VM_SYNC))
68 return true;
69 if (!IS_DAX(file_inode(vma->vm_file)))
70 return false;
71 return dax_synchronous(dax_dev);
72}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073#else
74static inline struct dax_device *dax_get_by_host(const char *host)
75{
76 return NULL;
77}
78static inline struct dax_device *alloc_dax(void *private, const char *host,
David Brazdil0f672f62019-12-10 10:32:29 +000079 const struct dax_operations *ops, unsigned long flags)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000080{
81 /*
82 * Callers should check IS_ENABLED(CONFIG_DAX) to know if this
83 * NULL is an error or expected.
84 */
85 return NULL;
86}
87static inline void put_dax(struct dax_device *dax_dev)
88{
89}
90static inline void kill_dax(struct dax_device *dax_dev)
91{
92}
93static inline void dax_write_cache(struct dax_device *dax_dev, bool wc)
94{
95}
96static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
97{
98 return false;
99}
David Brazdil0f672f62019-12-10 10:32:29 +0000100static inline bool dax_synchronous(struct dax_device *dax_dev)
101{
102 return true;
103}
104static inline void set_dax_synchronous(struct dax_device *dax_dev)
105{
106}
Olivier Deprez0e641232021-09-23 10:07:05 +0200107static inline bool dax_supported(struct dax_device *dax_dev,
108 struct block_device *bdev, int blocksize, sector_t start,
109 sector_t len)
110{
111 return false;
112}
David Brazdil0f672f62019-12-10 10:32:29 +0000113static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
114 struct dax_device *dax_dev)
115{
116 return !(vma->vm_flags & VM_SYNC);
117}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118#endif
119
120struct writeback_control;
121int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
122#if IS_ENABLED(CONFIG_FS_DAX)
123bool __bdev_dax_supported(struct block_device *bdev, int blocksize);
124static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
125{
126 return __bdev_dax_supported(bdev, blocksize);
127}
128
David Brazdil0f672f62019-12-10 10:32:29 +0000129bool __generic_fsdax_supported(struct dax_device *dax_dev,
130 struct block_device *bdev, int blocksize, sector_t start,
131 sector_t sectors);
132static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
133 struct block_device *bdev, int blocksize, sector_t start,
134 sector_t sectors)
135{
136 return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
137 sectors);
138}
139
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000140static inline struct dax_device *fs_dax_get_by_host(const char *host)
141{
142 return dax_get_by_host(host);
143}
144
145static inline void fs_put_dax(struct dax_device *dax_dev)
146{
147 put_dax(dax_dev);
148}
149
150struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
151int dax_writeback_mapping_range(struct address_space *mapping,
152 struct block_device *bdev, struct writeback_control *wbc);
153
154struct page *dax_layout_busy_page(struct address_space *mapping);
David Brazdil0f672f62019-12-10 10:32:29 +0000155dax_entry_t dax_lock_page(struct page *page);
156void dax_unlock_page(struct page *page, dax_entry_t cookie);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000157#else
158static inline bool bdev_dax_supported(struct block_device *bdev,
159 int blocksize)
160{
161 return false;
162}
163
David Brazdil0f672f62019-12-10 10:32:29 +0000164static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
165 struct block_device *bdev, int blocksize, sector_t start,
166 sector_t sectors)
167{
168 return false;
169}
170
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171static inline struct dax_device *fs_dax_get_by_host(const char *host)
172{
173 return NULL;
174}
175
176static inline void fs_put_dax(struct dax_device *dax_dev)
177{
178}
179
180static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
181{
182 return NULL;
183}
184
185static inline struct page *dax_layout_busy_page(struct address_space *mapping)
186{
187 return NULL;
188}
189
190static inline int dax_writeback_mapping_range(struct address_space *mapping,
191 struct block_device *bdev, struct writeback_control *wbc)
192{
193 return -EOPNOTSUPP;
194}
195
David Brazdil0f672f62019-12-10 10:32:29 +0000196static inline dax_entry_t dax_lock_page(struct page *page)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000197{
198 if (IS_DAX(page->mapping->host))
David Brazdil0f672f62019-12-10 10:32:29 +0000199 return ~0UL;
200 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000201}
202
David Brazdil0f672f62019-12-10 10:32:29 +0000203static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000204{
205}
206#endif
207
Olivier Deprez0e641232021-09-23 10:07:05 +0200208#if IS_ENABLED(CONFIG_DAX)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000209int dax_read_lock(void);
210void dax_read_unlock(int id);
Olivier Deprez0e641232021-09-23 10:07:05 +0200211#else
212static inline int dax_read_lock(void)
213{
214 return 0;
215}
216
217static inline void dax_read_unlock(int id)
218{
219}
220#endif /* CONFIG_DAX */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000221bool dax_alive(struct dax_device *dax_dev);
222void *dax_get_private(struct dax_device *dax_dev);
223long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
224 void **kaddr, pfn_t *pfn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000225size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
226 size_t bytes, struct iov_iter *i);
227size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
228 size_t bytes, struct iov_iter *i);
229void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
230
231ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
232 const struct iomap_ops *ops);
233vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
234 pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
235vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
236 enum page_entry_size pe_size, pfn_t pfn);
237int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
238int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
239 pgoff_t index);
240
241#ifdef CONFIG_FS_DAX
242int __dax_zero_page_range(struct block_device *bdev,
243 struct dax_device *dax_dev, sector_t sector,
244 unsigned int offset, unsigned int length);
245#else
246static inline int __dax_zero_page_range(struct block_device *bdev,
247 struct dax_device *dax_dev, sector_t sector,
248 unsigned int offset, unsigned int length)
249{
250 return -ENXIO;
251}
252#endif
253
254static inline bool dax_mapping(struct address_space *mapping)
255{
256 return mapping->host && IS_DAX(mapping->host);
257}
258
259#endif