Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
| 5 | */ |
| 6 | #ifndef __XFS_BUF_H__ |
| 7 | #define __XFS_BUF_H__ |
| 8 | |
| 9 | #include <linux/list.h> |
| 10 | #include <linux/types.h> |
| 11 | #include <linux/spinlock.h> |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/fs.h> |
| 14 | #include <linux/dax.h> |
| 15 | #include <linux/uio.h> |
| 16 | #include <linux/list_lru.h> |
| 17 | |
| 18 | /* |
| 19 | * Base types |
| 20 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 21 | struct xfs_buf; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 22 | |
| 23 | #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) |
| 24 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 25 | #define XBF_READ (1 << 0) /* buffer intended for reading from device */ |
| 26 | #define XBF_WRITE (1 << 1) /* buffer intended for writing to device */ |
| 27 | #define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */ |
| 28 | #define XBF_NO_IOACCT (1 << 3) /* bypass I/O accounting (non-LRU bufs) */ |
| 29 | #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ |
| 30 | #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ |
| 31 | #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 32 | #define XBF_WRITE_FAIL (1 << 7) /* async writes have failed on this buffer */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 33 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 34 | /* buffer type flags for write callbacks */ |
| 35 | #define _XBF_INODES (1 << 16)/* inode buffer */ |
| 36 | #define _XBF_DQUOTS (1 << 17)/* dquot buffer */ |
| 37 | #define _XBF_LOGRECOVERY (1 << 18)/* log recovery buffer */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 38 | |
| 39 | /* flags used only internally */ |
| 40 | #define _XBF_PAGES (1 << 20)/* backed by refcounted pages */ |
| 41 | #define _XBF_KMEM (1 << 21)/* backed by heap memory */ |
| 42 | #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 43 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 44 | /* flags used only as arguments to access routines */ |
| 45 | #define XBF_TRYLOCK (1 << 30)/* lock requested, but do not wait */ |
| 46 | #define XBF_UNMAPPED (1 << 31)/* do not map the buffer */ |
| 47 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 48 | typedef unsigned int xfs_buf_flags_t; |
| 49 | |
| 50 | #define XFS_BUF_FLAGS \ |
| 51 | { XBF_READ, "READ" }, \ |
| 52 | { XBF_WRITE, "WRITE" }, \ |
| 53 | { XBF_READ_AHEAD, "READ_AHEAD" }, \ |
| 54 | { XBF_NO_IOACCT, "NO_IOACCT" }, \ |
| 55 | { XBF_ASYNC, "ASYNC" }, \ |
| 56 | { XBF_DONE, "DONE" }, \ |
| 57 | { XBF_STALE, "STALE" }, \ |
| 58 | { XBF_WRITE_FAIL, "WRITE_FAIL" }, \ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 59 | { _XBF_INODES, "INODES" }, \ |
| 60 | { _XBF_DQUOTS, "DQUOTS" }, \ |
| 61 | { _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 62 | { _XBF_PAGES, "PAGES" }, \ |
| 63 | { _XBF_KMEM, "KMEM" }, \ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 64 | { _XBF_DELWRI_Q, "DELWRI_Q" }, \ |
| 65 | /* The following interface flags should never be set */ \ |
| 66 | { XBF_TRYLOCK, "TRYLOCK" }, \ |
| 67 | { XBF_UNMAPPED, "UNMAPPED" } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 68 | |
| 69 | /* |
| 70 | * Internal state flags. |
| 71 | */ |
| 72 | #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */ |
| 73 | #define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */ |
| 74 | |
| 75 | /* |
| 76 | * The xfs_buftarg contains 2 notions of "sector size" - |
| 77 | * |
| 78 | * 1) The metadata sector size, which is the minimum unit and |
| 79 | * alignment of IO which will be performed by metadata operations. |
| 80 | * 2) The device logical sector size |
| 81 | * |
| 82 | * The first is specified at mkfs time, and is stored on-disk in the |
| 83 | * superblock's sb_sectsize. |
| 84 | * |
| 85 | * The latter is derived from the underlying device, and controls direct IO |
| 86 | * alignment constraints. |
| 87 | */ |
| 88 | typedef struct xfs_buftarg { |
| 89 | dev_t bt_dev; |
| 90 | struct block_device *bt_bdev; |
| 91 | struct dax_device *bt_daxdev; |
| 92 | struct xfs_mount *bt_mount; |
| 93 | unsigned int bt_meta_sectorsize; |
| 94 | size_t bt_meta_sectormask; |
| 95 | size_t bt_logical_sectorsize; |
| 96 | size_t bt_logical_sectormask; |
| 97 | |
| 98 | /* LRU control structures */ |
| 99 | struct shrinker bt_shrinker; |
| 100 | struct list_lru bt_lru; |
| 101 | |
| 102 | struct percpu_counter bt_io_count; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 103 | struct ratelimit_state bt_ioerror_rl; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 104 | } xfs_buftarg_t; |
| 105 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 106 | #define XB_PAGES 2 |
| 107 | |
| 108 | struct xfs_buf_map { |
| 109 | xfs_daddr_t bm_bn; /* block number for I/O */ |
| 110 | int bm_len; /* size of I/O */ |
| 111 | }; |
| 112 | |
| 113 | #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \ |
| 114 | struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) }; |
| 115 | |
| 116 | struct xfs_buf_ops { |
| 117 | char *name; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 118 | union { |
| 119 | __be32 magic[2]; /* v4 and v5 on disk magic values */ |
| 120 | __be16 magic16[2]; /* v4 and v5 on disk magic values */ |
| 121 | }; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 122 | void (*verify_read)(struct xfs_buf *); |
| 123 | void (*verify_write)(struct xfs_buf *); |
| 124 | xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp); |
| 125 | }; |
| 126 | |
| 127 | typedef struct xfs_buf { |
| 128 | /* |
| 129 | * first cacheline holds all the fields needed for an uncontended cache |
| 130 | * hit to be fully processed. The semaphore straddles the cacheline |
| 131 | * boundary, but the counter and lock sits on the first cacheline, |
| 132 | * which is the only bit that is touched if we hit the semaphore |
| 133 | * fast-path on locking. |
| 134 | */ |
| 135 | struct rhash_head b_rhash_head; /* pag buffer hash node */ |
| 136 | xfs_daddr_t b_bn; /* block number of buffer */ |
| 137 | int b_length; /* size of buffer in BBs */ |
| 138 | atomic_t b_hold; /* reference count */ |
| 139 | atomic_t b_lru_ref; /* lru reclaim ref count */ |
| 140 | xfs_buf_flags_t b_flags; /* status flags */ |
| 141 | struct semaphore b_sema; /* semaphore for lockables */ |
| 142 | |
| 143 | /* |
| 144 | * concurrent access to b_lru and b_lru_flags are protected by |
| 145 | * bt_lru_lock and not by b_sema |
| 146 | */ |
| 147 | struct list_head b_lru; /* lru list */ |
| 148 | spinlock_t b_lock; /* internal state lock */ |
| 149 | unsigned int b_state; /* internal state flags */ |
| 150 | int b_io_error; /* internal IO error state */ |
| 151 | wait_queue_head_t b_waiters; /* unpin waiters */ |
| 152 | struct list_head b_list; |
| 153 | struct xfs_perag *b_pag; /* contains rbtree root */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 154 | struct xfs_mount *b_mount; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 155 | xfs_buftarg_t *b_target; /* buffer target (device) */ |
| 156 | void *b_addr; /* virtual address of buffer */ |
| 157 | struct work_struct b_ioend_work; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 158 | struct completion b_iowait; /* queue for I/O waiters */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 159 | struct xfs_buf_log_item *b_log_item; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 160 | struct list_head b_li_list; /* Log items list head */ |
| 161 | struct xfs_trans *b_transp; |
| 162 | struct page **b_pages; /* array of page pointers */ |
| 163 | struct page *b_page_array[XB_PAGES]; /* inline pages */ |
| 164 | struct xfs_buf_map *b_maps; /* compound buffer map */ |
| 165 | struct xfs_buf_map __b_map; /* inline compound buffer map */ |
| 166 | int b_map_count; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 167 | atomic_t b_pin_count; /* pin count */ |
| 168 | atomic_t b_io_remaining; /* #outstanding I/O requests */ |
| 169 | unsigned int b_page_count; /* size of page array */ |
| 170 | unsigned int b_offset; /* page offset in first page */ |
| 171 | int b_error; /* error code on I/O */ |
| 172 | |
| 173 | /* |
| 174 | * async write failure retry count. Initialised to zero on the first |
| 175 | * failure, then when it exceeds the maximum configured without a |
| 176 | * success the write is considered to be failed permanently and the |
| 177 | * iodone handler will take appropriate action. |
| 178 | * |
| 179 | * For retry timeouts, we record the jiffie of the first failure. This |
| 180 | * means that we can change the retry timeout for buffers already under |
| 181 | * I/O and thus avoid getting stuck in a retry loop with a long timeout. |
| 182 | * |
| 183 | * last_error is used to ensure that we are getting repeated errors, not |
| 184 | * different errors. e.g. a block device might change ENOSPC to EIO when |
| 185 | * a failure timeout occurs, so we want to re-initialise the error |
| 186 | * retry behaviour appropriately when that happens. |
| 187 | */ |
| 188 | int b_retries; |
| 189 | unsigned long b_first_retry_time; /* in jiffies */ |
| 190 | int b_last_error; |
| 191 | |
| 192 | const struct xfs_buf_ops *b_ops; |
| 193 | } xfs_buf_t; |
| 194 | |
| 195 | /* Finding and Reading Buffers */ |
| 196 | struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target, |
| 197 | xfs_daddr_t blkno, size_t numblks, |
| 198 | xfs_buf_flags_t flags); |
| 199 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 200 | int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map, |
| 201 | int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp); |
| 202 | int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map, |
| 203 | int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp, |
| 204 | const struct xfs_buf_ops *ops, xfs_failaddr_t fa); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 205 | void xfs_buf_readahead_map(struct xfs_buftarg *target, |
| 206 | struct xfs_buf_map *map, int nmaps, |
| 207 | const struct xfs_buf_ops *ops); |
| 208 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 209 | static inline int |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 210 | xfs_buf_get( |
| 211 | struct xfs_buftarg *target, |
| 212 | xfs_daddr_t blkno, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 213 | size_t numblks, |
| 214 | struct xfs_buf **bpp) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 215 | { |
| 216 | DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 217 | |
| 218 | return xfs_buf_get_map(target, &map, 1, 0, bpp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 219 | } |
| 220 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 221 | static inline int |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 222 | xfs_buf_read( |
| 223 | struct xfs_buftarg *target, |
| 224 | xfs_daddr_t blkno, |
| 225 | size_t numblks, |
| 226 | xfs_buf_flags_t flags, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 227 | struct xfs_buf **bpp, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 228 | const struct xfs_buf_ops *ops) |
| 229 | { |
| 230 | DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 231 | |
| 232 | return xfs_buf_read_map(target, &map, 1, flags, bpp, ops, |
| 233 | __builtin_return_address(0)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 234 | } |
| 235 | |
| 236 | static inline void |
| 237 | xfs_buf_readahead( |
| 238 | struct xfs_buftarg *target, |
| 239 | xfs_daddr_t blkno, |
| 240 | size_t numblks, |
| 241 | const struct xfs_buf_ops *ops) |
| 242 | { |
| 243 | DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); |
| 244 | return xfs_buf_readahead_map(target, &map, 1, ops); |
| 245 | } |
| 246 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 247 | int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, int flags, |
| 248 | struct xfs_buf **bpp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 249 | int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr, |
| 250 | size_t numblks, int flags, struct xfs_buf **bpp, |
| 251 | const struct xfs_buf_ops *ops); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 252 | int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 253 | void xfs_buf_hold(struct xfs_buf *bp); |
| 254 | |
| 255 | /* Releasing Buffers */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 256 | extern void xfs_buf_rele(xfs_buf_t *); |
| 257 | |
| 258 | /* Locking and Unlocking Buffers */ |
| 259 | extern int xfs_buf_trylock(xfs_buf_t *); |
| 260 | extern void xfs_buf_lock(xfs_buf_t *); |
| 261 | extern void xfs_buf_unlock(xfs_buf_t *); |
| 262 | #define xfs_buf_islocked(bp) \ |
| 263 | ((bp)->b_sema.count <= 0) |
| 264 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 265 | static inline void xfs_buf_relse(xfs_buf_t *bp) |
| 266 | { |
| 267 | xfs_buf_unlock(bp); |
| 268 | xfs_buf_rele(bp); |
| 269 | } |
| 270 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 271 | /* Buffer Read and Write Routines */ |
| 272 | extern int xfs_bwrite(struct xfs_buf *bp); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 273 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 274 | extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error, |
| 275 | xfs_failaddr_t failaddr); |
| 276 | #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 277 | extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa); |
| 278 | void xfs_buf_ioend_fail(struct xfs_buf *); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 279 | void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 280 | void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa); |
| 281 | #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 282 | |
| 283 | /* Buffer Utility Routines */ |
| 284 | extern void *xfs_buf_offset(struct xfs_buf *, size_t); |
| 285 | extern void xfs_buf_stale(struct xfs_buf *bp); |
| 286 | |
| 287 | /* Delayed Write Buffer Routines */ |
| 288 | extern void xfs_buf_delwri_cancel(struct list_head *); |
| 289 | extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *); |
| 290 | extern int xfs_buf_delwri_submit(struct list_head *); |
| 291 | extern int xfs_buf_delwri_submit_nowait(struct list_head *); |
| 292 | extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *); |
| 293 | |
| 294 | /* Buffer Daemon Setup Routines */ |
| 295 | extern int xfs_buf_init(void); |
| 296 | extern void xfs_buf_terminate(void); |
| 297 | |
| 298 | /* |
| 299 | * These macros use the IO block map rather than b_bn. b_bn is now really |
| 300 | * just for the buffer cache index for cached buffers. As IO does not use b_bn |
| 301 | * anymore, uncached buffers do not use b_bn at all and hence must modify the IO |
| 302 | * map directly. Uncached buffers are not allowed to be discontiguous, so this |
| 303 | * is safe to do. |
| 304 | * |
| 305 | * In future, uncached buffers will pass the block number directly to the io |
| 306 | * request function and hence these macros will go away at that point. |
| 307 | */ |
| 308 | #define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn) |
| 309 | #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno)) |
| 310 | |
| 311 | void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref); |
| 312 | |
| 313 | /* |
| 314 | * If the buffer is already on the LRU, do nothing. Otherwise set the buffer |
| 315 | * up with a reference count of 0 so it will be tossed from the cache when |
| 316 | * released. |
| 317 | */ |
| 318 | static inline void xfs_buf_oneshot(struct xfs_buf *bp) |
| 319 | { |
| 320 | if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1) |
| 321 | return; |
| 322 | atomic_set(&bp->b_lru_ref, 0); |
| 323 | } |
| 324 | |
| 325 | static inline int xfs_buf_ispinned(struct xfs_buf *bp) |
| 326 | { |
| 327 | return atomic_read(&bp->b_pin_count); |
| 328 | } |
| 329 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 330 | static inline int |
| 331 | xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset) |
| 332 | { |
| 333 | return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length), |
| 334 | cksum_offset); |
| 335 | } |
| 336 | |
| 337 | static inline void |
| 338 | xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset) |
| 339 | { |
| 340 | xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), |
| 341 | cksum_offset); |
| 342 | } |
| 343 | |
| 344 | /* |
| 345 | * Handling of buftargs. |
| 346 | */ |
| 347 | extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *, |
| 348 | struct block_device *, struct dax_device *); |
| 349 | extern void xfs_free_buftarg(struct xfs_buftarg *); |
| 350 | extern void xfs_wait_buftarg(xfs_buftarg_t *); |
| 351 | extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int); |
| 352 | |
| 353 | #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev) |
| 354 | #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev) |
| 355 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 356 | static inline int |
| 357 | xfs_buftarg_dma_alignment(struct xfs_buftarg *bt) |
| 358 | { |
| 359 | return queue_dma_alignment(bt->bt_bdev->bd_disk->queue); |
| 360 | } |
| 361 | |
| 362 | int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops); |
| 363 | bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic); |
| 364 | bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic); |
| 365 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 366 | #endif /* __XFS_BUF_H__ */ |