blob: 23b7a73cd7575815f936c1203351873f21a86868 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 */
5#ifndef __LINUX_BIO_H
6#define __LINUX_BIO_H
7
8#include <linux/highmem.h>
9#include <linux/mempool.h>
10#include <linux/ioprio.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
12#include <linux/blk_types.h>
13
14#define BIO_DEBUG
15
16#ifdef BIO_DEBUG
17#define BIO_BUG_ON BUG_ON
18#else
19#define BIO_BUG_ON
20#endif
21
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000022#define BIO_MAX_PAGES 256
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000023
24#define bio_prio(bio) (bio)->bi_ioprio
25#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
26
27#define bio_iter_iovec(bio, iter) \
28 bvec_iter_bvec((bio)->bi_io_vec, (iter))
29
30#define bio_iter_page(bio, iter) \
31 bvec_iter_page((bio)->bi_io_vec, (iter))
32#define bio_iter_len(bio, iter) \
33 bvec_iter_len((bio)->bi_io_vec, (iter))
34#define bio_iter_offset(bio, iter) \
35 bvec_iter_offset((bio)->bi_io_vec, (iter))
36
37#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
38#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
39#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
40
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000041#define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
42#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
43
44#define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter)
45#define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
46
47/*
48 * Return the data direction, READ or WRITE.
49 */
50#define bio_data_dir(bio) \
51 (op_is_write(bio_op(bio)) ? WRITE : READ)
52
53/*
54 * Check whether this bio carries any data or not. A NULL bio is allowed.
55 */
56static inline bool bio_has_data(struct bio *bio)
57{
58 if (bio &&
59 bio->bi_iter.bi_size &&
60 bio_op(bio) != REQ_OP_DISCARD &&
61 bio_op(bio) != REQ_OP_SECURE_ERASE &&
62 bio_op(bio) != REQ_OP_WRITE_ZEROES)
63 return true;
64
65 return false;
66}
67
Olivier Deprez157378f2022-04-04 15:47:50 +020068static inline bool bio_no_advance_iter(const struct bio *bio)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000069{
70 return bio_op(bio) == REQ_OP_DISCARD ||
71 bio_op(bio) == REQ_OP_SECURE_ERASE ||
72 bio_op(bio) == REQ_OP_WRITE_SAME ||
73 bio_op(bio) == REQ_OP_WRITE_ZEROES;
74}
75
76static inline bool bio_mergeable(struct bio *bio)
77{
78 if (bio->bi_opf & REQ_NOMERGE_FLAGS)
79 return false;
80
81 return true;
82}
83
84static inline unsigned int bio_cur_bytes(struct bio *bio)
85{
86 if (bio_has_data(bio))
87 return bio_iovec(bio).bv_len;
88 else /* dataless requests such as discard */
89 return bio->bi_iter.bi_size;
90}
91
92static inline void *bio_data(struct bio *bio)
93{
94 if (bio_has_data(bio))
95 return page_address(bio_page(bio)) + bio_offset(bio);
96
97 return NULL;
98}
99
David Brazdil0f672f62019-12-10 10:32:29 +0000100/**
101 * bio_full - check if the bio is full
102 * @bio: bio to check
103 * @len: length of one segment to be added
104 *
105 * Return true if @bio is full and one segment with @len bytes can't be
106 * added to the bio, otherwise return false
107 */
108static inline bool bio_full(struct bio *bio, unsigned len)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000109{
David Brazdil0f672f62019-12-10 10:32:29 +0000110 if (bio->bi_vcnt >= bio->bi_max_vecs)
111 return true;
112
113 if (bio->bi_iter.bi_size > UINT_MAX - len)
114 return true;
115
116 return false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000117}
118
David Brazdil0f672f62019-12-10 10:32:29 +0000119static inline bool bio_next_segment(const struct bio *bio,
120 struct bvec_iter_all *iter)
121{
122 if (iter->idx >= bio->bi_vcnt)
123 return false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000124
David Brazdil0f672f62019-12-10 10:32:29 +0000125 bvec_advance(&bio->bi_io_vec[iter->idx], iter);
126 return true;
127}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000128
129/*
130 * drivers should _never_ use the all version - the bio may have been split
131 * before it got to the driver and the driver won't own all of it
132 */
David Brazdil0f672f62019-12-10 10:32:29 +0000133#define bio_for_each_segment_all(bvl, bio, iter) \
134 for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000135
Olivier Deprez157378f2022-04-04 15:47:50 +0200136static inline void bio_advance_iter(const struct bio *bio,
137 struct bvec_iter *iter, unsigned int bytes)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000138{
139 iter->bi_sector += bytes >> 9;
140
David Brazdil0f672f62019-12-10 10:32:29 +0000141 if (bio_no_advance_iter(bio))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000142 iter->bi_size -= bytes;
David Brazdil0f672f62019-12-10 10:32:29 +0000143 else
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000144 bvec_iter_advance(bio->bi_io_vec, iter, bytes);
145 /* TODO: It is reasonable to complete bio with error here. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146}
147
148#define __bio_for_each_segment(bvl, bio, iter, start) \
149 for (iter = (start); \
150 (iter).bi_size && \
151 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
152 bio_advance_iter((bio), &(iter), (bvl).bv_len))
153
154#define bio_for_each_segment(bvl, bio, iter) \
155 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
156
David Brazdil0f672f62019-12-10 10:32:29 +0000157#define __bio_for_each_bvec(bvl, bio, iter, start) \
158 for (iter = (start); \
159 (iter).bi_size && \
160 ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
161 bio_advance_iter((bio), &(iter), (bvl).bv_len))
162
163/* iterate over multi-page bvec */
164#define bio_for_each_bvec(bvl, bio, iter) \
165 __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
166
Olivier Deprez157378f2022-04-04 15:47:50 +0200167/*
168 * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
169 * same reasons as bio_for_each_segment_all().
170 */
171#define bio_for_each_bvec_all(bvl, bio, i) \
172 for (i = 0, bvl = bio_first_bvec_all(bio); \
173 i < (bio)->bi_vcnt; i++, bvl++) \
174
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000175#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
176
177static inline unsigned bio_segments(struct bio *bio)
178{
179 unsigned segs = 0;
180 struct bio_vec bv;
181 struct bvec_iter iter;
182
183 /*
184 * We special case discard/write same/write zeroes, because they
185 * interpret bi_size differently:
186 */
187
188 switch (bio_op(bio)) {
189 case REQ_OP_DISCARD:
190 case REQ_OP_SECURE_ERASE:
191 case REQ_OP_WRITE_ZEROES:
192 return 0;
193 case REQ_OP_WRITE_SAME:
194 return 1;
195 default:
196 break;
197 }
198
199 bio_for_each_segment(bv, bio, iter)
200 segs++;
201
202 return segs;
203}
204
205/*
206 * get a reference to a bio, so it won't disappear. the intended use is
207 * something like:
208 *
209 * bio_get(bio);
210 * submit_bio(rw, bio);
211 * if (bio->bi_flags ...)
212 * do_something
213 * bio_put(bio);
214 *
215 * without the bio_get(), it could potentially complete I/O before submit_bio
216 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
217 * runs
218 */
219static inline void bio_get(struct bio *bio)
220{
221 bio->bi_flags |= (1 << BIO_REFFED);
222 smp_mb__before_atomic();
223 atomic_inc(&bio->__bi_cnt);
224}
225
226static inline void bio_cnt_set(struct bio *bio, unsigned int count)
227{
228 if (count != 1) {
229 bio->bi_flags |= (1 << BIO_REFFED);
David Brazdil0f672f62019-12-10 10:32:29 +0000230 smp_mb();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000231 }
232 atomic_set(&bio->__bi_cnt, count);
233}
234
235static inline bool bio_flagged(struct bio *bio, unsigned int bit)
236{
237 return (bio->bi_flags & (1U << bit)) != 0;
238}
239
240static inline void bio_set_flag(struct bio *bio, unsigned int bit)
241{
242 bio->bi_flags |= (1U << bit);
243}
244
245static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
246{
247 bio->bi_flags &= ~(1U << bit);
248}
249
250static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
251{
Olivier Deprez0e641232021-09-23 10:07:05 +0200252 *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000253}
254
255static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
256{
257 struct bvec_iter iter = bio->bi_iter;
258 int idx;
259
Olivier Deprez0e641232021-09-23 10:07:05 +0200260 bio_get_first_bvec(bio, bv);
261 if (bv->bv_len == bio->bi_iter.bi_size)
262 return; /* this bio only has a single bvec */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000263
264 bio_advance_iter(bio, &iter, iter.bi_size);
265
266 if (!iter.bi_bvec_done)
267 idx = iter.bi_idx - 1;
268 else /* in the middle of bvec */
269 idx = iter.bi_idx;
270
271 *bv = bio->bi_io_vec[idx];
272
273 /*
274 * iter.bi_bvec_done records actual length of the last bvec
275 * if this bio ends in the middle of one io vector
276 */
277 if (iter.bi_bvec_done)
278 bv->bv_len = iter.bi_bvec_done;
279}
280
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000281static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
282{
283 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
284 return bio->bi_io_vec;
285}
286
287static inline struct page *bio_first_page_all(struct bio *bio)
288{
289 return bio_first_bvec_all(bio)->bv_page;
290}
291
292static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
293{
294 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
295 return &bio->bi_io_vec[bio->bi_vcnt - 1];
296}
297
298enum bip_flags {
299 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
300 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
301 BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
302 BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
303 BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
304};
305
306/*
307 * bio integrity payload
308 */
309struct bio_integrity_payload {
310 struct bio *bip_bio; /* parent bio */
311
312 struct bvec_iter bip_iter;
313
314 unsigned short bip_slab; /* slab the bip came from */
315 unsigned short bip_vcnt; /* # of integrity bio_vecs */
316 unsigned short bip_max_vcnt; /* integrity bio_vec slots */
317 unsigned short bip_flags; /* control flags */
318
David Brazdil0f672f62019-12-10 10:32:29 +0000319 struct bvec_iter bio_iter; /* for rewinding parent bio */
320
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000321 struct work_struct bip_work; /* I/O completion */
322
323 struct bio_vec *bip_vec;
Olivier Deprez157378f2022-04-04 15:47:50 +0200324 struct bio_vec bip_inline_vecs[];/* embedded bvec array */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000325};
326
327#if defined(CONFIG_BLK_DEV_INTEGRITY)
328
329static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
330{
331 if (bio->bi_opf & REQ_INTEGRITY)
332 return bio->bi_integrity;
333
334 return NULL;
335}
336
337static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
338{
339 struct bio_integrity_payload *bip = bio_integrity(bio);
340
341 if (bip)
342 return bip->bip_flags & flag;
343
344 return false;
345}
346
347static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
348{
349 return bip->bip_iter.bi_sector;
350}
351
352static inline void bip_set_seed(struct bio_integrity_payload *bip,
353 sector_t seed)
354{
355 bip->bip_iter.bi_sector = seed;
356}
357
358#endif /* CONFIG_BLK_DEV_INTEGRITY */
359
360extern void bio_trim(struct bio *bio, int offset, int size);
361extern struct bio *bio_split(struct bio *bio, int sectors,
362 gfp_t gfp, struct bio_set *bs);
363
364/**
365 * bio_next_split - get next @sectors from a bio, splitting if necessary
366 * @bio: bio to split
367 * @sectors: number of sectors to split from the front of @bio
368 * @gfp: gfp mask
369 * @bs: bio set to allocate from
370 *
371 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
372 * than @sectors, returns the original bio unchanged.
373 */
374static inline struct bio *bio_next_split(struct bio *bio, int sectors,
375 gfp_t gfp, struct bio_set *bs)
376{
377 if (sectors >= bio_sectors(bio))
378 return bio;
379
380 return bio_split(bio, sectors, gfp, bs);
381}
382
383enum {
384 BIOSET_NEED_BVECS = BIT(0),
385 BIOSET_NEED_RESCUER = BIT(1),
386};
387extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
388extern void bioset_exit(struct bio_set *);
389extern int biovec_init_pool(mempool_t *pool, int pool_entries);
390extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
391
392extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
393extern void bio_put(struct bio *);
394
395extern void __bio_clone_fast(struct bio *, struct bio *);
396extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
397
398extern struct bio_set fs_bio_set;
399
400static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
401{
402 return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
403}
404
405static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
406{
407 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
408}
409
410extern blk_qc_t submit_bio(struct bio *);
411
412extern void bio_endio(struct bio *);
413
414static inline void bio_io_error(struct bio *bio)
415{
416 bio->bi_status = BLK_STS_IOERR;
417 bio_endio(bio);
418}
419
420static inline void bio_wouldblock_error(struct bio *bio)
421{
Olivier Deprez157378f2022-04-04 15:47:50 +0200422 bio_set_flag(bio, BIO_QUIET);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000423 bio->bi_status = BLK_STS_AGAIN;
424 bio_endio(bio);
425}
426
427struct request_queue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000428
429extern int submit_bio_wait(struct bio *bio);
430extern void bio_advance(struct bio *, unsigned);
431
432extern void bio_init(struct bio *bio, struct bio_vec *table,
433 unsigned short max_vecs);
434extern void bio_uninit(struct bio *);
435extern void bio_reset(struct bio *);
436void bio_chain(struct bio *, struct bio *);
437
438extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
439extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
440 unsigned int, unsigned int);
441bool __bio_try_merge_page(struct bio *bio, struct page *page,
David Brazdil0f672f62019-12-10 10:32:29 +0000442 unsigned int len, unsigned int off, bool *same_page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000443void __bio_add_page(struct bio *bio, struct page *page,
444 unsigned int len, unsigned int off);
445int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
David Brazdil0f672f62019-12-10 10:32:29 +0000446void bio_release_pages(struct bio *bio, bool mark_dirty);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000447extern void bio_set_pages_dirty(struct bio *bio);
448extern void bio_check_pages_dirty(struct bio *bio);
449
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000450extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
451 struct bio *src, struct bvec_iter *src_iter);
452extern void bio_copy_data(struct bio *dst, struct bio *src);
453extern void bio_list_copy_data(struct bio *dst, struct bio *src);
454extern void bio_free_pages(struct bio *bio);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000455void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
Olivier Deprez0e641232021-09-23 10:07:05 +0200456void bio_truncate(struct bio *bio, unsigned new_size);
Olivier Deprez157378f2022-04-04 15:47:50 +0200457void guard_bio_eod(struct bio *bio);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000458
459static inline void zero_fill_bio(struct bio *bio)
460{
461 zero_fill_bio_iter(bio, bio->bi_iter);
462}
463
464extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
465extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
466extern unsigned int bvec_nr_vecs(unsigned short idx);
467extern const char *bio_devname(struct bio *bio, char *buffer);
468
469#define bio_set_dev(bio, bdev) \
470do { \
471 if ((bio)->bi_disk != (bdev)->bd_disk) \
472 bio_clear_flag(bio, BIO_THROTTLED);\
473 (bio)->bi_disk = (bdev)->bd_disk; \
474 (bio)->bi_partno = (bdev)->bd_partno; \
David Brazdil0f672f62019-12-10 10:32:29 +0000475 bio_associate_blkg(bio); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000476} while (0)
477
478#define bio_copy_dev(dst, src) \
479do { \
480 (dst)->bi_disk = (src)->bi_disk; \
481 (dst)->bi_partno = (src)->bi_partno; \
David Brazdil0f672f62019-12-10 10:32:29 +0000482 bio_clone_blkg_association(dst, src); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000483} while (0)
484
485#define bio_dev(bio) \
486 disk_devt((bio)->bi_disk)
487
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000488#ifdef CONFIG_BLK_CGROUP
David Brazdil0f672f62019-12-10 10:32:29 +0000489void bio_associate_blkg(struct bio *bio);
490void bio_associate_blkg_from_css(struct bio *bio,
491 struct cgroup_subsys_state *css);
492void bio_clone_blkg_association(struct bio *dst, struct bio *src);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000493#else /* CONFIG_BLK_CGROUP */
David Brazdil0f672f62019-12-10 10:32:29 +0000494static inline void bio_associate_blkg(struct bio *bio) { }
495static inline void bio_associate_blkg_from_css(struct bio *bio,
496 struct cgroup_subsys_state *css)
497{ }
498static inline void bio_clone_blkg_association(struct bio *dst,
499 struct bio *src) { }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000500#endif /* CONFIG_BLK_CGROUP */
501
502#ifdef CONFIG_HIGHMEM
503/*
504 * remember never ever reenable interrupts between a bvec_kmap_irq and
505 * bvec_kunmap_irq!
506 */
507static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
508{
509 unsigned long addr;
510
511 /*
512 * might not be a highmem page, but the preempt/irq count
513 * balancing is a lot nicer this way
514 */
515 local_irq_save(*flags);
516 addr = (unsigned long) kmap_atomic(bvec->bv_page);
517
518 BUG_ON(addr & ~PAGE_MASK);
519
520 return (char *) addr + bvec->bv_offset;
521}
522
523static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
524{
525 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
526
527 kunmap_atomic((void *) ptr);
528 local_irq_restore(*flags);
529}
530
531#else
532static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
533{
534 return page_address(bvec->bv_page) + bvec->bv_offset;
535}
536
537static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
538{
539 *flags = 0;
540}
541#endif
542
543/*
544 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
545 *
546 * A bio_list anchors a singly-linked list of bios chained through the bi_next
547 * member of the bio. The bio_list also caches the last list member to allow
548 * fast access to the tail.
549 */
550struct bio_list {
551 struct bio *head;
552 struct bio *tail;
553};
554
555static inline int bio_list_empty(const struct bio_list *bl)
556{
557 return bl->head == NULL;
558}
559
560static inline void bio_list_init(struct bio_list *bl)
561{
562 bl->head = bl->tail = NULL;
563}
564
565#define BIO_EMPTY_LIST { NULL, NULL }
566
567#define bio_list_for_each(bio, bl) \
568 for (bio = (bl)->head; bio; bio = bio->bi_next)
569
570static inline unsigned bio_list_size(const struct bio_list *bl)
571{
572 unsigned sz = 0;
573 struct bio *bio;
574
575 bio_list_for_each(bio, bl)
576 sz++;
577
578 return sz;
579}
580
581static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
582{
583 bio->bi_next = NULL;
584
585 if (bl->tail)
586 bl->tail->bi_next = bio;
587 else
588 bl->head = bio;
589
590 bl->tail = bio;
591}
592
593static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
594{
595 bio->bi_next = bl->head;
596
597 bl->head = bio;
598
599 if (!bl->tail)
600 bl->tail = bio;
601}
602
603static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
604{
605 if (!bl2->head)
606 return;
607
608 if (bl->tail)
609 bl->tail->bi_next = bl2->head;
610 else
611 bl->head = bl2->head;
612
613 bl->tail = bl2->tail;
614}
615
616static inline void bio_list_merge_head(struct bio_list *bl,
617 struct bio_list *bl2)
618{
619 if (!bl2->head)
620 return;
621
622 if (bl->head)
623 bl2->tail->bi_next = bl->head;
624 else
625 bl->tail = bl2->tail;
626
627 bl->head = bl2->head;
628}
629
630static inline struct bio *bio_list_peek(struct bio_list *bl)
631{
632 return bl->head;
633}
634
635static inline struct bio *bio_list_pop(struct bio_list *bl)
636{
637 struct bio *bio = bl->head;
638
639 if (bio) {
640 bl->head = bl->head->bi_next;
641 if (!bl->head)
642 bl->tail = NULL;
643
644 bio->bi_next = NULL;
645 }
646
647 return bio;
648}
649
650static inline struct bio *bio_list_get(struct bio_list *bl)
651{
652 struct bio *bio = bl->head;
653
654 bl->head = bl->tail = NULL;
655
656 return bio;
657}
658
659/*
660 * Increment chain count for the bio. Make sure the CHAIN flag update
661 * is visible before the raised count.
662 */
663static inline void bio_inc_remaining(struct bio *bio)
664{
665 bio_set_flag(bio, BIO_CHAIN);
666 smp_mb__before_atomic();
667 atomic_inc(&bio->__bi_remaining);
668}
669
670/*
671 * bio_set is used to allow other portions of the IO system to
672 * allocate their own private memory pools for bio and iovec structures.
673 * These memory pools in turn all allocate from the bio_slab
674 * and the bvec_slabs[].
675 */
676#define BIO_POOL_SIZE 2
677
678struct bio_set {
679 struct kmem_cache *bio_slab;
680 unsigned int front_pad;
681
682 mempool_t bio_pool;
683 mempool_t bvec_pool;
684#if defined(CONFIG_BLK_DEV_INTEGRITY)
685 mempool_t bio_integrity_pool;
686 mempool_t bvec_integrity_pool;
687#endif
688
689 /*
690 * Deadlock avoidance for stacking block drivers: see comments in
691 * bio_alloc_bioset() for details
692 */
693 spinlock_t rescue_lock;
694 struct bio_list rescue_list;
695 struct work_struct rescue_work;
696 struct workqueue_struct *rescue_workqueue;
697};
698
699struct biovec_slab {
700 int nr_vecs;
701 char *name;
702 struct kmem_cache *slab;
703};
704
705static inline bool bioset_initialized(struct bio_set *bs)
706{
707 return bs->bio_slab != NULL;
708}
709
710/*
711 * a small number of entries is fine, not going to be performance critical.
712 * basically we just need to survive
713 */
714#define BIO_SPLIT_ENTRIES 2
715
716#if defined(CONFIG_BLK_DEV_INTEGRITY)
717
718#define bip_for_each_vec(bvl, bip, iter) \
719 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
720
721#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
722 for_each_bio(_bio) \
723 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
724
725extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
726extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
727extern bool bio_integrity_prep(struct bio *);
728extern void bio_integrity_advance(struct bio *, unsigned int);
729extern void bio_integrity_trim(struct bio *);
730extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
731extern int bioset_integrity_create(struct bio_set *, int);
732extern void bioset_integrity_free(struct bio_set *);
733extern void bio_integrity_init(void);
734
735#else /* CONFIG_BLK_DEV_INTEGRITY */
736
737static inline void *bio_integrity(struct bio *bio)
738{
739 return NULL;
740}
741
742static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
743{
744 return 0;
745}
746
747static inline void bioset_integrity_free (struct bio_set *bs)
748{
749 return;
750}
751
752static inline bool bio_integrity_prep(struct bio *bio)
753{
754 return true;
755}
756
757static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
758 gfp_t gfp_mask)
759{
760 return 0;
761}
762
763static inline void bio_integrity_advance(struct bio *bio,
764 unsigned int bytes_done)
765{
766 return;
767}
768
769static inline void bio_integrity_trim(struct bio *bio)
770{
771 return;
772}
773
774static inline void bio_integrity_init(void)
775{
776 return;
777}
778
779static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
780{
781 return false;
782}
783
784static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
785 unsigned int nr)
786{
787 return ERR_PTR(-EINVAL);
788}
789
790static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
791 unsigned int len, unsigned int offset)
792{
793 return 0;
794}
795
796#endif /* CONFIG_BLK_DEV_INTEGRITY */
797
David Brazdil0f672f62019-12-10 10:32:29 +0000798/*
799 * Mark a bio as polled. Note that for async polled IO, the caller must
800 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
801 * We cannot block waiting for requests on polled IO, as those completions
802 * must be found by the caller. This is different than IRQ driven IO, where
803 * it's safe to wait for IO to complete.
804 */
805static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
806{
807 bio->bi_opf |= REQ_HIPRI;
808 if (!is_sync_kiocb(kiocb))
809 bio->bi_opf |= REQ_NOWAIT;
810}
811
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000812#endif /* __LINUX_BIO_H */