David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2001 Jens Axboe <axboe@suse.de> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4 | */ |
| 5 | #ifndef __LINUX_BIO_H |
| 6 | #define __LINUX_BIO_H |
| 7 | |
| 8 | #include <linux/highmem.h> |
| 9 | #include <linux/mempool.h> |
| 10 | #include <linux/ioprio.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11 | |
| 12 | #ifdef CONFIG_BLOCK |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 13 | /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ |
| 14 | #include <linux/blk_types.h> |
| 15 | |
| 16 | #define BIO_DEBUG |
| 17 | |
| 18 | #ifdef BIO_DEBUG |
| 19 | #define BIO_BUG_ON BUG_ON |
| 20 | #else |
| 21 | #define BIO_BUG_ON |
| 22 | #endif |
| 23 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 24 | #define BIO_MAX_PAGES 256 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 25 | |
| 26 | #define bio_prio(bio) (bio)->bi_ioprio |
| 27 | #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) |
| 28 | |
| 29 | #define bio_iter_iovec(bio, iter) \ |
| 30 | bvec_iter_bvec((bio)->bi_io_vec, (iter)) |
| 31 | |
| 32 | #define bio_iter_page(bio, iter) \ |
| 33 | bvec_iter_page((bio)->bi_io_vec, (iter)) |
| 34 | #define bio_iter_len(bio, iter) \ |
| 35 | bvec_iter_len((bio)->bi_io_vec, (iter)) |
| 36 | #define bio_iter_offset(bio, iter) \ |
| 37 | bvec_iter_offset((bio)->bi_io_vec, (iter)) |
| 38 | |
| 39 | #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) |
| 40 | #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) |
| 41 | #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) |
| 42 | |
| 43 | #define bio_multiple_segments(bio) \ |
| 44 | ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) |
| 45 | |
| 46 | #define bvec_iter_sectors(iter) ((iter).bi_size >> 9) |
| 47 | #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) |
| 48 | |
| 49 | #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) |
| 50 | #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) |
| 51 | |
| 52 | /* |
| 53 | * Return the data direction, READ or WRITE. |
| 54 | */ |
| 55 | #define bio_data_dir(bio) \ |
| 56 | (op_is_write(bio_op(bio)) ? WRITE : READ) |
| 57 | |
| 58 | /* |
| 59 | * Check whether this bio carries any data or not. A NULL bio is allowed. |
| 60 | */ |
| 61 | static inline bool bio_has_data(struct bio *bio) |
| 62 | { |
| 63 | if (bio && |
| 64 | bio->bi_iter.bi_size && |
| 65 | bio_op(bio) != REQ_OP_DISCARD && |
| 66 | bio_op(bio) != REQ_OP_SECURE_ERASE && |
| 67 | bio_op(bio) != REQ_OP_WRITE_ZEROES) |
| 68 | return true; |
| 69 | |
| 70 | return false; |
| 71 | } |
| 72 | |
| 73 | static inline bool bio_no_advance_iter(struct bio *bio) |
| 74 | { |
| 75 | return bio_op(bio) == REQ_OP_DISCARD || |
| 76 | bio_op(bio) == REQ_OP_SECURE_ERASE || |
| 77 | bio_op(bio) == REQ_OP_WRITE_SAME || |
| 78 | bio_op(bio) == REQ_OP_WRITE_ZEROES; |
| 79 | } |
| 80 | |
| 81 | static inline bool bio_mergeable(struct bio *bio) |
| 82 | { |
| 83 | if (bio->bi_opf & REQ_NOMERGE_FLAGS) |
| 84 | return false; |
| 85 | |
| 86 | return true; |
| 87 | } |
| 88 | |
| 89 | static inline unsigned int bio_cur_bytes(struct bio *bio) |
| 90 | { |
| 91 | if (bio_has_data(bio)) |
| 92 | return bio_iovec(bio).bv_len; |
| 93 | else /* dataless requests such as discard */ |
| 94 | return bio->bi_iter.bi_size; |
| 95 | } |
| 96 | |
| 97 | static inline void *bio_data(struct bio *bio) |
| 98 | { |
| 99 | if (bio_has_data(bio)) |
| 100 | return page_address(bio_page(bio)) + bio_offset(bio); |
| 101 | |
| 102 | return NULL; |
| 103 | } |
| 104 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 105 | /** |
| 106 | * bio_full - check if the bio is full |
| 107 | * @bio: bio to check |
| 108 | * @len: length of one segment to be added |
| 109 | * |
| 110 | * Return true if @bio is full and one segment with @len bytes can't be |
| 111 | * added to the bio, otherwise return false |
| 112 | */ |
| 113 | static inline bool bio_full(struct bio *bio, unsigned len) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 114 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 115 | if (bio->bi_vcnt >= bio->bi_max_vecs) |
| 116 | return true; |
| 117 | |
| 118 | if (bio->bi_iter.bi_size > UINT_MAX - len) |
| 119 | return true; |
| 120 | |
| 121 | return false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 122 | } |
| 123 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 124 | static inline bool bio_next_segment(const struct bio *bio, |
| 125 | struct bvec_iter_all *iter) |
| 126 | { |
| 127 | if (iter->idx >= bio->bi_vcnt) |
| 128 | return false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 129 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 130 | bvec_advance(&bio->bi_io_vec[iter->idx], iter); |
| 131 | return true; |
| 132 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 133 | |
| 134 | /* |
| 135 | * drivers should _never_ use the all version - the bio may have been split |
| 136 | * before it got to the driver and the driver won't own all of it |
| 137 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 138 | #define bio_for_each_segment_all(bvl, bio, iter) \ |
| 139 | for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 140 | |
| 141 | static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, |
| 142 | unsigned bytes) |
| 143 | { |
| 144 | iter->bi_sector += bytes >> 9; |
| 145 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 146 | if (bio_no_advance_iter(bio)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 147 | iter->bi_size -= bytes; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 148 | else |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 149 | bvec_iter_advance(bio->bi_io_vec, iter, bytes); |
| 150 | /* TODO: It is reasonable to complete bio with error here. */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 151 | } |
| 152 | |
| 153 | #define __bio_for_each_segment(bvl, bio, iter, start) \ |
| 154 | for (iter = (start); \ |
| 155 | (iter).bi_size && \ |
| 156 | ((bvl = bio_iter_iovec((bio), (iter))), 1); \ |
| 157 | bio_advance_iter((bio), &(iter), (bvl).bv_len)) |
| 158 | |
| 159 | #define bio_for_each_segment(bvl, bio, iter) \ |
| 160 | __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) |
| 161 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 162 | #define __bio_for_each_bvec(bvl, bio, iter, start) \ |
| 163 | for (iter = (start); \ |
| 164 | (iter).bi_size && \ |
| 165 | ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \ |
| 166 | bio_advance_iter((bio), &(iter), (bvl).bv_len)) |
| 167 | |
| 168 | /* iterate over multi-page bvec */ |
| 169 | #define bio_for_each_bvec(bvl, bio, iter) \ |
| 170 | __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) |
| 171 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 172 | #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) |
| 173 | |
| 174 | static inline unsigned bio_segments(struct bio *bio) |
| 175 | { |
| 176 | unsigned segs = 0; |
| 177 | struct bio_vec bv; |
| 178 | struct bvec_iter iter; |
| 179 | |
| 180 | /* |
| 181 | * We special case discard/write same/write zeroes, because they |
| 182 | * interpret bi_size differently: |
| 183 | */ |
| 184 | |
| 185 | switch (bio_op(bio)) { |
| 186 | case REQ_OP_DISCARD: |
| 187 | case REQ_OP_SECURE_ERASE: |
| 188 | case REQ_OP_WRITE_ZEROES: |
| 189 | return 0; |
| 190 | case REQ_OP_WRITE_SAME: |
| 191 | return 1; |
| 192 | default: |
| 193 | break; |
| 194 | } |
| 195 | |
| 196 | bio_for_each_segment(bv, bio, iter) |
| 197 | segs++; |
| 198 | |
| 199 | return segs; |
| 200 | } |
| 201 | |
| 202 | /* |
| 203 | * get a reference to a bio, so it won't disappear. the intended use is |
| 204 | * something like: |
| 205 | * |
| 206 | * bio_get(bio); |
| 207 | * submit_bio(rw, bio); |
| 208 | * if (bio->bi_flags ...) |
| 209 | * do_something |
| 210 | * bio_put(bio); |
| 211 | * |
| 212 | * without the bio_get(), it could potentially complete I/O before submit_bio |
| 213 | * returns. and then bio would be freed memory when if (bio->bi_flags ...) |
| 214 | * runs |
| 215 | */ |
| 216 | static inline void bio_get(struct bio *bio) |
| 217 | { |
| 218 | bio->bi_flags |= (1 << BIO_REFFED); |
| 219 | smp_mb__before_atomic(); |
| 220 | atomic_inc(&bio->__bi_cnt); |
| 221 | } |
| 222 | |
| 223 | static inline void bio_cnt_set(struct bio *bio, unsigned int count) |
| 224 | { |
| 225 | if (count != 1) { |
| 226 | bio->bi_flags |= (1 << BIO_REFFED); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 227 | smp_mb(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 228 | } |
| 229 | atomic_set(&bio->__bi_cnt, count); |
| 230 | } |
| 231 | |
| 232 | static inline bool bio_flagged(struct bio *bio, unsigned int bit) |
| 233 | { |
| 234 | return (bio->bi_flags & (1U << bit)) != 0; |
| 235 | } |
| 236 | |
| 237 | static inline void bio_set_flag(struct bio *bio, unsigned int bit) |
| 238 | { |
| 239 | bio->bi_flags |= (1U << bit); |
| 240 | } |
| 241 | |
| 242 | static inline void bio_clear_flag(struct bio *bio, unsigned int bit) |
| 243 | { |
| 244 | bio->bi_flags &= ~(1U << bit); |
| 245 | } |
| 246 | |
| 247 | static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) |
| 248 | { |
| 249 | *bv = bio_iovec(bio); |
| 250 | } |
| 251 | |
| 252 | static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) |
| 253 | { |
| 254 | struct bvec_iter iter = bio->bi_iter; |
| 255 | int idx; |
| 256 | |
| 257 | if (unlikely(!bio_multiple_segments(bio))) { |
| 258 | *bv = bio_iovec(bio); |
| 259 | return; |
| 260 | } |
| 261 | |
| 262 | bio_advance_iter(bio, &iter, iter.bi_size); |
| 263 | |
| 264 | if (!iter.bi_bvec_done) |
| 265 | idx = iter.bi_idx - 1; |
| 266 | else /* in the middle of bvec */ |
| 267 | idx = iter.bi_idx; |
| 268 | |
| 269 | *bv = bio->bi_io_vec[idx]; |
| 270 | |
| 271 | /* |
| 272 | * iter.bi_bvec_done records actual length of the last bvec |
| 273 | * if this bio ends in the middle of one io vector |
| 274 | */ |
| 275 | if (iter.bi_bvec_done) |
| 276 | bv->bv_len = iter.bi_bvec_done; |
| 277 | } |
| 278 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 279 | static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) |
| 280 | { |
| 281 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); |
| 282 | return bio->bi_io_vec; |
| 283 | } |
| 284 | |
| 285 | static inline struct page *bio_first_page_all(struct bio *bio) |
| 286 | { |
| 287 | return bio_first_bvec_all(bio)->bv_page; |
| 288 | } |
| 289 | |
| 290 | static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) |
| 291 | { |
| 292 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); |
| 293 | return &bio->bi_io_vec[bio->bi_vcnt - 1]; |
| 294 | } |
| 295 | |
| 296 | enum bip_flags { |
| 297 | BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ |
| 298 | BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ |
| 299 | BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ |
| 300 | BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ |
| 301 | BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ |
| 302 | }; |
| 303 | |
| 304 | /* |
| 305 | * bio integrity payload |
| 306 | */ |
| 307 | struct bio_integrity_payload { |
| 308 | struct bio *bip_bio; /* parent bio */ |
| 309 | |
| 310 | struct bvec_iter bip_iter; |
| 311 | |
| 312 | unsigned short bip_slab; /* slab the bip came from */ |
| 313 | unsigned short bip_vcnt; /* # of integrity bio_vecs */ |
| 314 | unsigned short bip_max_vcnt; /* integrity bio_vec slots */ |
| 315 | unsigned short bip_flags; /* control flags */ |
| 316 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 317 | struct bvec_iter bio_iter; /* for rewinding parent bio */ |
| 318 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 319 | struct work_struct bip_work; /* I/O completion */ |
| 320 | |
| 321 | struct bio_vec *bip_vec; |
| 322 | struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ |
| 323 | }; |
| 324 | |
| 325 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 326 | |
| 327 | static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) |
| 328 | { |
| 329 | if (bio->bi_opf & REQ_INTEGRITY) |
| 330 | return bio->bi_integrity; |
| 331 | |
| 332 | return NULL; |
| 333 | } |
| 334 | |
| 335 | static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) |
| 336 | { |
| 337 | struct bio_integrity_payload *bip = bio_integrity(bio); |
| 338 | |
| 339 | if (bip) |
| 340 | return bip->bip_flags & flag; |
| 341 | |
| 342 | return false; |
| 343 | } |
| 344 | |
| 345 | static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) |
| 346 | { |
| 347 | return bip->bip_iter.bi_sector; |
| 348 | } |
| 349 | |
| 350 | static inline void bip_set_seed(struct bio_integrity_payload *bip, |
| 351 | sector_t seed) |
| 352 | { |
| 353 | bip->bip_iter.bi_sector = seed; |
| 354 | } |
| 355 | |
| 356 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
| 357 | |
| 358 | extern void bio_trim(struct bio *bio, int offset, int size); |
| 359 | extern struct bio *bio_split(struct bio *bio, int sectors, |
| 360 | gfp_t gfp, struct bio_set *bs); |
| 361 | |
| 362 | /** |
| 363 | * bio_next_split - get next @sectors from a bio, splitting if necessary |
| 364 | * @bio: bio to split |
| 365 | * @sectors: number of sectors to split from the front of @bio |
| 366 | * @gfp: gfp mask |
| 367 | * @bs: bio set to allocate from |
| 368 | * |
| 369 | * Returns a bio representing the next @sectors of @bio - if the bio is smaller |
| 370 | * than @sectors, returns the original bio unchanged. |
| 371 | */ |
| 372 | static inline struct bio *bio_next_split(struct bio *bio, int sectors, |
| 373 | gfp_t gfp, struct bio_set *bs) |
| 374 | { |
| 375 | if (sectors >= bio_sectors(bio)) |
| 376 | return bio; |
| 377 | |
| 378 | return bio_split(bio, sectors, gfp, bs); |
| 379 | } |
| 380 | |
| 381 | enum { |
| 382 | BIOSET_NEED_BVECS = BIT(0), |
| 383 | BIOSET_NEED_RESCUER = BIT(1), |
| 384 | }; |
| 385 | extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); |
| 386 | extern void bioset_exit(struct bio_set *); |
| 387 | extern int biovec_init_pool(mempool_t *pool, int pool_entries); |
| 388 | extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); |
| 389 | |
| 390 | extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *); |
| 391 | extern void bio_put(struct bio *); |
| 392 | |
| 393 | extern void __bio_clone_fast(struct bio *, struct bio *); |
| 394 | extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); |
| 395 | |
| 396 | extern struct bio_set fs_bio_set; |
| 397 | |
| 398 | static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) |
| 399 | { |
| 400 | return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set); |
| 401 | } |
| 402 | |
| 403 | static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) |
| 404 | { |
| 405 | return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); |
| 406 | } |
| 407 | |
| 408 | extern blk_qc_t submit_bio(struct bio *); |
| 409 | |
| 410 | extern void bio_endio(struct bio *); |
| 411 | |
| 412 | static inline void bio_io_error(struct bio *bio) |
| 413 | { |
| 414 | bio->bi_status = BLK_STS_IOERR; |
| 415 | bio_endio(bio); |
| 416 | } |
| 417 | |
| 418 | static inline void bio_wouldblock_error(struct bio *bio) |
| 419 | { |
| 420 | bio->bi_status = BLK_STS_AGAIN; |
| 421 | bio_endio(bio); |
| 422 | } |
| 423 | |
| 424 | struct request_queue; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 425 | |
| 426 | extern int submit_bio_wait(struct bio *bio); |
| 427 | extern void bio_advance(struct bio *, unsigned); |
| 428 | |
| 429 | extern void bio_init(struct bio *bio, struct bio_vec *table, |
| 430 | unsigned short max_vecs); |
| 431 | extern void bio_uninit(struct bio *); |
| 432 | extern void bio_reset(struct bio *); |
| 433 | void bio_chain(struct bio *, struct bio *); |
| 434 | |
| 435 | extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); |
| 436 | extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, |
| 437 | unsigned int, unsigned int); |
| 438 | bool __bio_try_merge_page(struct bio *bio, struct page *page, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 439 | unsigned int len, unsigned int off, bool *same_page); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 440 | void __bio_add_page(struct bio *bio, struct page *page, |
| 441 | unsigned int len, unsigned int off); |
| 442 | int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 443 | void bio_release_pages(struct bio *bio, bool mark_dirty); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 444 | struct rq_map_data; |
| 445 | extern struct bio *bio_map_user_iov(struct request_queue *, |
| 446 | struct iov_iter *, gfp_t); |
| 447 | extern void bio_unmap_user(struct bio *); |
| 448 | extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, |
| 449 | gfp_t); |
| 450 | extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, |
| 451 | gfp_t, int); |
| 452 | extern void bio_set_pages_dirty(struct bio *bio); |
| 453 | extern void bio_check_pages_dirty(struct bio *bio); |
| 454 | |
| 455 | void generic_start_io_acct(struct request_queue *q, int op, |
| 456 | unsigned long sectors, struct hd_struct *part); |
| 457 | void generic_end_io_acct(struct request_queue *q, int op, |
| 458 | struct hd_struct *part, |
| 459 | unsigned long start_time); |
| 460 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 461 | extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, |
| 462 | struct bio *src, struct bvec_iter *src_iter); |
| 463 | extern void bio_copy_data(struct bio *dst, struct bio *src); |
| 464 | extern void bio_list_copy_data(struct bio *dst, struct bio *src); |
| 465 | extern void bio_free_pages(struct bio *bio); |
| 466 | |
| 467 | extern struct bio *bio_copy_user_iov(struct request_queue *, |
| 468 | struct rq_map_data *, |
| 469 | struct iov_iter *, |
| 470 | gfp_t); |
| 471 | extern int bio_uncopy_user(struct bio *); |
| 472 | void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); |
| 473 | |
| 474 | static inline void zero_fill_bio(struct bio *bio) |
| 475 | { |
| 476 | zero_fill_bio_iter(bio, bio->bi_iter); |
| 477 | } |
| 478 | |
| 479 | extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); |
| 480 | extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); |
| 481 | extern unsigned int bvec_nr_vecs(unsigned short idx); |
| 482 | extern const char *bio_devname(struct bio *bio, char *buffer); |
| 483 | |
| 484 | #define bio_set_dev(bio, bdev) \ |
| 485 | do { \ |
| 486 | if ((bio)->bi_disk != (bdev)->bd_disk) \ |
| 487 | bio_clear_flag(bio, BIO_THROTTLED);\ |
| 488 | (bio)->bi_disk = (bdev)->bd_disk; \ |
| 489 | (bio)->bi_partno = (bdev)->bd_partno; \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 490 | bio_associate_blkg(bio); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 491 | } while (0) |
| 492 | |
| 493 | #define bio_copy_dev(dst, src) \ |
| 494 | do { \ |
| 495 | (dst)->bi_disk = (src)->bi_disk; \ |
| 496 | (dst)->bi_partno = (src)->bi_partno; \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 497 | bio_clone_blkg_association(dst, src); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 498 | } while (0) |
| 499 | |
| 500 | #define bio_dev(bio) \ |
| 501 | disk_devt((bio)->bi_disk) |
| 502 | |
| 503 | #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 504 | void bio_associate_blkg_from_page(struct bio *bio, struct page *page); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 505 | #else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 506 | static inline void bio_associate_blkg_from_page(struct bio *bio, |
| 507 | struct page *page) { } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 508 | #endif |
| 509 | |
| 510 | #ifdef CONFIG_BLK_CGROUP |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 511 | void bio_disassociate_blkg(struct bio *bio); |
| 512 | void bio_associate_blkg(struct bio *bio); |
| 513 | void bio_associate_blkg_from_css(struct bio *bio, |
| 514 | struct cgroup_subsys_state *css); |
| 515 | void bio_clone_blkg_association(struct bio *dst, struct bio *src); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 516 | #else /* CONFIG_BLK_CGROUP */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 517 | static inline void bio_disassociate_blkg(struct bio *bio) { } |
| 518 | static inline void bio_associate_blkg(struct bio *bio) { } |
| 519 | static inline void bio_associate_blkg_from_css(struct bio *bio, |
| 520 | struct cgroup_subsys_state *css) |
| 521 | { } |
| 522 | static inline void bio_clone_blkg_association(struct bio *dst, |
| 523 | struct bio *src) { } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 524 | #endif /* CONFIG_BLK_CGROUP */ |
| 525 | |
| 526 | #ifdef CONFIG_HIGHMEM |
| 527 | /* |
| 528 | * remember never ever reenable interrupts between a bvec_kmap_irq and |
| 529 | * bvec_kunmap_irq! |
| 530 | */ |
| 531 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) |
| 532 | { |
| 533 | unsigned long addr; |
| 534 | |
| 535 | /* |
| 536 | * might not be a highmem page, but the preempt/irq count |
| 537 | * balancing is a lot nicer this way |
| 538 | */ |
| 539 | local_irq_save(*flags); |
| 540 | addr = (unsigned long) kmap_atomic(bvec->bv_page); |
| 541 | |
| 542 | BUG_ON(addr & ~PAGE_MASK); |
| 543 | |
| 544 | return (char *) addr + bvec->bv_offset; |
| 545 | } |
| 546 | |
| 547 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) |
| 548 | { |
| 549 | unsigned long ptr = (unsigned long) buffer & PAGE_MASK; |
| 550 | |
| 551 | kunmap_atomic((void *) ptr); |
| 552 | local_irq_restore(*flags); |
| 553 | } |
| 554 | |
| 555 | #else |
| 556 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) |
| 557 | { |
| 558 | return page_address(bvec->bv_page) + bvec->bv_offset; |
| 559 | } |
| 560 | |
| 561 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) |
| 562 | { |
| 563 | *flags = 0; |
| 564 | } |
| 565 | #endif |
| 566 | |
| 567 | /* |
| 568 | * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. |
| 569 | * |
| 570 | * A bio_list anchors a singly-linked list of bios chained through the bi_next |
| 571 | * member of the bio. The bio_list also caches the last list member to allow |
| 572 | * fast access to the tail. |
| 573 | */ |
| 574 | struct bio_list { |
| 575 | struct bio *head; |
| 576 | struct bio *tail; |
| 577 | }; |
| 578 | |
| 579 | static inline int bio_list_empty(const struct bio_list *bl) |
| 580 | { |
| 581 | return bl->head == NULL; |
| 582 | } |
| 583 | |
| 584 | static inline void bio_list_init(struct bio_list *bl) |
| 585 | { |
| 586 | bl->head = bl->tail = NULL; |
| 587 | } |
| 588 | |
| 589 | #define BIO_EMPTY_LIST { NULL, NULL } |
| 590 | |
| 591 | #define bio_list_for_each(bio, bl) \ |
| 592 | for (bio = (bl)->head; bio; bio = bio->bi_next) |
| 593 | |
| 594 | static inline unsigned bio_list_size(const struct bio_list *bl) |
| 595 | { |
| 596 | unsigned sz = 0; |
| 597 | struct bio *bio; |
| 598 | |
| 599 | bio_list_for_each(bio, bl) |
| 600 | sz++; |
| 601 | |
| 602 | return sz; |
| 603 | } |
| 604 | |
| 605 | static inline void bio_list_add(struct bio_list *bl, struct bio *bio) |
| 606 | { |
| 607 | bio->bi_next = NULL; |
| 608 | |
| 609 | if (bl->tail) |
| 610 | bl->tail->bi_next = bio; |
| 611 | else |
| 612 | bl->head = bio; |
| 613 | |
| 614 | bl->tail = bio; |
| 615 | } |
| 616 | |
| 617 | static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) |
| 618 | { |
| 619 | bio->bi_next = bl->head; |
| 620 | |
| 621 | bl->head = bio; |
| 622 | |
| 623 | if (!bl->tail) |
| 624 | bl->tail = bio; |
| 625 | } |
| 626 | |
| 627 | static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) |
| 628 | { |
| 629 | if (!bl2->head) |
| 630 | return; |
| 631 | |
| 632 | if (bl->tail) |
| 633 | bl->tail->bi_next = bl2->head; |
| 634 | else |
| 635 | bl->head = bl2->head; |
| 636 | |
| 637 | bl->tail = bl2->tail; |
| 638 | } |
| 639 | |
| 640 | static inline void bio_list_merge_head(struct bio_list *bl, |
| 641 | struct bio_list *bl2) |
| 642 | { |
| 643 | if (!bl2->head) |
| 644 | return; |
| 645 | |
| 646 | if (bl->head) |
| 647 | bl2->tail->bi_next = bl->head; |
| 648 | else |
| 649 | bl->tail = bl2->tail; |
| 650 | |
| 651 | bl->head = bl2->head; |
| 652 | } |
| 653 | |
| 654 | static inline struct bio *bio_list_peek(struct bio_list *bl) |
| 655 | { |
| 656 | return bl->head; |
| 657 | } |
| 658 | |
| 659 | static inline struct bio *bio_list_pop(struct bio_list *bl) |
| 660 | { |
| 661 | struct bio *bio = bl->head; |
| 662 | |
| 663 | if (bio) { |
| 664 | bl->head = bl->head->bi_next; |
| 665 | if (!bl->head) |
| 666 | bl->tail = NULL; |
| 667 | |
| 668 | bio->bi_next = NULL; |
| 669 | } |
| 670 | |
| 671 | return bio; |
| 672 | } |
| 673 | |
| 674 | static inline struct bio *bio_list_get(struct bio_list *bl) |
| 675 | { |
| 676 | struct bio *bio = bl->head; |
| 677 | |
| 678 | bl->head = bl->tail = NULL; |
| 679 | |
| 680 | return bio; |
| 681 | } |
| 682 | |
| 683 | /* |
| 684 | * Increment chain count for the bio. Make sure the CHAIN flag update |
| 685 | * is visible before the raised count. |
| 686 | */ |
| 687 | static inline void bio_inc_remaining(struct bio *bio) |
| 688 | { |
| 689 | bio_set_flag(bio, BIO_CHAIN); |
| 690 | smp_mb__before_atomic(); |
| 691 | atomic_inc(&bio->__bi_remaining); |
| 692 | } |
| 693 | |
| 694 | /* |
| 695 | * bio_set is used to allow other portions of the IO system to |
| 696 | * allocate their own private memory pools for bio and iovec structures. |
| 697 | * These memory pools in turn all allocate from the bio_slab |
| 698 | * and the bvec_slabs[]. |
| 699 | */ |
| 700 | #define BIO_POOL_SIZE 2 |
| 701 | |
| 702 | struct bio_set { |
| 703 | struct kmem_cache *bio_slab; |
| 704 | unsigned int front_pad; |
| 705 | |
| 706 | mempool_t bio_pool; |
| 707 | mempool_t bvec_pool; |
| 708 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 709 | mempool_t bio_integrity_pool; |
| 710 | mempool_t bvec_integrity_pool; |
| 711 | #endif |
| 712 | |
| 713 | /* |
| 714 | * Deadlock avoidance for stacking block drivers: see comments in |
| 715 | * bio_alloc_bioset() for details |
| 716 | */ |
| 717 | spinlock_t rescue_lock; |
| 718 | struct bio_list rescue_list; |
| 719 | struct work_struct rescue_work; |
| 720 | struct workqueue_struct *rescue_workqueue; |
| 721 | }; |
| 722 | |
| 723 | struct biovec_slab { |
| 724 | int nr_vecs; |
| 725 | char *name; |
| 726 | struct kmem_cache *slab; |
| 727 | }; |
| 728 | |
| 729 | static inline bool bioset_initialized(struct bio_set *bs) |
| 730 | { |
| 731 | return bs->bio_slab != NULL; |
| 732 | } |
| 733 | |
| 734 | /* |
| 735 | * a small number of entries is fine, not going to be performance critical. |
| 736 | * basically we just need to survive |
| 737 | */ |
| 738 | #define BIO_SPLIT_ENTRIES 2 |
| 739 | |
| 740 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 741 | |
| 742 | #define bip_for_each_vec(bvl, bip, iter) \ |
| 743 | for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) |
| 744 | |
| 745 | #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ |
| 746 | for_each_bio(_bio) \ |
| 747 | bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) |
| 748 | |
| 749 | extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); |
| 750 | extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); |
| 751 | extern bool bio_integrity_prep(struct bio *); |
| 752 | extern void bio_integrity_advance(struct bio *, unsigned int); |
| 753 | extern void bio_integrity_trim(struct bio *); |
| 754 | extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); |
| 755 | extern int bioset_integrity_create(struct bio_set *, int); |
| 756 | extern void bioset_integrity_free(struct bio_set *); |
| 757 | extern void bio_integrity_init(void); |
| 758 | |
| 759 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
| 760 | |
| 761 | static inline void *bio_integrity(struct bio *bio) |
| 762 | { |
| 763 | return NULL; |
| 764 | } |
| 765 | |
| 766 | static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) |
| 767 | { |
| 768 | return 0; |
| 769 | } |
| 770 | |
| 771 | static inline void bioset_integrity_free (struct bio_set *bs) |
| 772 | { |
| 773 | return; |
| 774 | } |
| 775 | |
| 776 | static inline bool bio_integrity_prep(struct bio *bio) |
| 777 | { |
| 778 | return true; |
| 779 | } |
| 780 | |
| 781 | static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, |
| 782 | gfp_t gfp_mask) |
| 783 | { |
| 784 | return 0; |
| 785 | } |
| 786 | |
| 787 | static inline void bio_integrity_advance(struct bio *bio, |
| 788 | unsigned int bytes_done) |
| 789 | { |
| 790 | return; |
| 791 | } |
| 792 | |
| 793 | static inline void bio_integrity_trim(struct bio *bio) |
| 794 | { |
| 795 | return; |
| 796 | } |
| 797 | |
| 798 | static inline void bio_integrity_init(void) |
| 799 | { |
| 800 | return; |
| 801 | } |
| 802 | |
| 803 | static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) |
| 804 | { |
| 805 | return false; |
| 806 | } |
| 807 | |
| 808 | static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp, |
| 809 | unsigned int nr) |
| 810 | { |
| 811 | return ERR_PTR(-EINVAL); |
| 812 | } |
| 813 | |
| 814 | static inline int bio_integrity_add_page(struct bio *bio, struct page *page, |
| 815 | unsigned int len, unsigned int offset) |
| 816 | { |
| 817 | return 0; |
| 818 | } |
| 819 | |
| 820 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
| 821 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 822 | /* |
| 823 | * Mark a bio as polled. Note that for async polled IO, the caller must |
| 824 | * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). |
| 825 | * We cannot block waiting for requests on polled IO, as those completions |
| 826 | * must be found by the caller. This is different than IRQ driven IO, where |
| 827 | * it's safe to wait for IO to complete. |
| 828 | */ |
| 829 | static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) |
| 830 | { |
| 831 | bio->bi_opf |= REQ_HIPRI; |
| 832 | if (!is_sync_kiocb(kiocb)) |
| 833 | bio->bi_opf |= REQ_NOWAIT; |
| 834 | } |
| 835 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 836 | #endif /* CONFIG_BLOCK */ |
| 837 | #endif /* __LINUX_BIO_H */ |