blob: b0927b27a8a88037764b6a53f923720b9519379b [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 */
5#ifndef __LINUX_BIO_H
6#define __LINUX_BIO_H
7
8#include <linux/highmem.h>
9#include <linux/mempool.h>
10#include <linux/ioprio.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011
12#ifdef CONFIG_BLOCK
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
14#include <linux/blk_types.h>
15
16#define BIO_DEBUG
17
18#ifdef BIO_DEBUG
19#define BIO_BUG_ON BUG_ON
20#else
21#define BIO_BUG_ON
22#endif
23
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024#define BIO_MAX_PAGES 256
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000025
26#define bio_prio(bio) (bio)->bi_ioprio
27#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
28
29#define bio_iter_iovec(bio, iter) \
30 bvec_iter_bvec((bio)->bi_io_vec, (iter))
31
32#define bio_iter_page(bio, iter) \
33 bvec_iter_page((bio)->bi_io_vec, (iter))
34#define bio_iter_len(bio, iter) \
35 bvec_iter_len((bio)->bi_io_vec, (iter))
36#define bio_iter_offset(bio, iter) \
37 bvec_iter_offset((bio)->bi_io_vec, (iter))
38
39#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
40#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
41#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
42
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000043#define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
44#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
45
46#define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter)
47#define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
48
49/*
50 * Return the data direction, READ or WRITE.
51 */
52#define bio_data_dir(bio) \
53 (op_is_write(bio_op(bio)) ? WRITE : READ)
54
55/*
56 * Check whether this bio carries any data or not. A NULL bio is allowed.
57 */
58static inline bool bio_has_data(struct bio *bio)
59{
60 if (bio &&
61 bio->bi_iter.bi_size &&
62 bio_op(bio) != REQ_OP_DISCARD &&
63 bio_op(bio) != REQ_OP_SECURE_ERASE &&
64 bio_op(bio) != REQ_OP_WRITE_ZEROES)
65 return true;
66
67 return false;
68}
69
70static inline bool bio_no_advance_iter(struct bio *bio)
71{
72 return bio_op(bio) == REQ_OP_DISCARD ||
73 bio_op(bio) == REQ_OP_SECURE_ERASE ||
74 bio_op(bio) == REQ_OP_WRITE_SAME ||
75 bio_op(bio) == REQ_OP_WRITE_ZEROES;
76}
77
78static inline bool bio_mergeable(struct bio *bio)
79{
80 if (bio->bi_opf & REQ_NOMERGE_FLAGS)
81 return false;
82
83 return true;
84}
85
86static inline unsigned int bio_cur_bytes(struct bio *bio)
87{
88 if (bio_has_data(bio))
89 return bio_iovec(bio).bv_len;
90 else /* dataless requests such as discard */
91 return bio->bi_iter.bi_size;
92}
93
94static inline void *bio_data(struct bio *bio)
95{
96 if (bio_has_data(bio))
97 return page_address(bio_page(bio)) + bio_offset(bio);
98
99 return NULL;
100}
101
David Brazdil0f672f62019-12-10 10:32:29 +0000102/**
103 * bio_full - check if the bio is full
104 * @bio: bio to check
105 * @len: length of one segment to be added
106 *
107 * Return true if @bio is full and one segment with @len bytes can't be
108 * added to the bio, otherwise return false
109 */
110static inline bool bio_full(struct bio *bio, unsigned len)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111{
David Brazdil0f672f62019-12-10 10:32:29 +0000112 if (bio->bi_vcnt >= bio->bi_max_vecs)
113 return true;
114
115 if (bio->bi_iter.bi_size > UINT_MAX - len)
116 return true;
117
118 return false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000119}
120
David Brazdil0f672f62019-12-10 10:32:29 +0000121static inline bool bio_next_segment(const struct bio *bio,
122 struct bvec_iter_all *iter)
123{
124 if (iter->idx >= bio->bi_vcnt)
125 return false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000126
David Brazdil0f672f62019-12-10 10:32:29 +0000127 bvec_advance(&bio->bi_io_vec[iter->idx], iter);
128 return true;
129}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130
131/*
132 * drivers should _never_ use the all version - the bio may have been split
133 * before it got to the driver and the driver won't own all of it
134 */
David Brazdil0f672f62019-12-10 10:32:29 +0000135#define bio_for_each_segment_all(bvl, bio, iter) \
136 for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000137
138static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
139 unsigned bytes)
140{
141 iter->bi_sector += bytes >> 9;
142
David Brazdil0f672f62019-12-10 10:32:29 +0000143 if (bio_no_advance_iter(bio))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000144 iter->bi_size -= bytes;
David Brazdil0f672f62019-12-10 10:32:29 +0000145 else
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146 bvec_iter_advance(bio->bi_io_vec, iter, bytes);
147 /* TODO: It is reasonable to complete bio with error here. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000148}
149
150#define __bio_for_each_segment(bvl, bio, iter, start) \
151 for (iter = (start); \
152 (iter).bi_size && \
153 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
154 bio_advance_iter((bio), &(iter), (bvl).bv_len))
155
156#define bio_for_each_segment(bvl, bio, iter) \
157 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
158
David Brazdil0f672f62019-12-10 10:32:29 +0000159#define __bio_for_each_bvec(bvl, bio, iter, start) \
160 for (iter = (start); \
161 (iter).bi_size && \
162 ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
163 bio_advance_iter((bio), &(iter), (bvl).bv_len))
164
165/* iterate over multi-page bvec */
166#define bio_for_each_bvec(bvl, bio, iter) \
167 __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
168
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000169#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
170
171static inline unsigned bio_segments(struct bio *bio)
172{
173 unsigned segs = 0;
174 struct bio_vec bv;
175 struct bvec_iter iter;
176
177 /*
178 * We special case discard/write same/write zeroes, because they
179 * interpret bi_size differently:
180 */
181
182 switch (bio_op(bio)) {
183 case REQ_OP_DISCARD:
184 case REQ_OP_SECURE_ERASE:
185 case REQ_OP_WRITE_ZEROES:
186 return 0;
187 case REQ_OP_WRITE_SAME:
188 return 1;
189 default:
190 break;
191 }
192
193 bio_for_each_segment(bv, bio, iter)
194 segs++;
195
196 return segs;
197}
198
199/*
200 * get a reference to a bio, so it won't disappear. the intended use is
201 * something like:
202 *
203 * bio_get(bio);
204 * submit_bio(rw, bio);
205 * if (bio->bi_flags ...)
206 * do_something
207 * bio_put(bio);
208 *
209 * without the bio_get(), it could potentially complete I/O before submit_bio
210 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
211 * runs
212 */
213static inline void bio_get(struct bio *bio)
214{
215 bio->bi_flags |= (1 << BIO_REFFED);
216 smp_mb__before_atomic();
217 atomic_inc(&bio->__bi_cnt);
218}
219
220static inline void bio_cnt_set(struct bio *bio, unsigned int count)
221{
222 if (count != 1) {
223 bio->bi_flags |= (1 << BIO_REFFED);
David Brazdil0f672f62019-12-10 10:32:29 +0000224 smp_mb();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000225 }
226 atomic_set(&bio->__bi_cnt, count);
227}
228
229static inline bool bio_flagged(struct bio *bio, unsigned int bit)
230{
231 return (bio->bi_flags & (1U << bit)) != 0;
232}
233
234static inline void bio_set_flag(struct bio *bio, unsigned int bit)
235{
236 bio->bi_flags |= (1U << bit);
237}
238
239static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
240{
241 bio->bi_flags &= ~(1U << bit);
242}
243
244static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
245{
Olivier Deprez0e641232021-09-23 10:07:05 +0200246 *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000247}
248
249static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
250{
251 struct bvec_iter iter = bio->bi_iter;
252 int idx;
253
Olivier Deprez0e641232021-09-23 10:07:05 +0200254 bio_get_first_bvec(bio, bv);
255 if (bv->bv_len == bio->bi_iter.bi_size)
256 return; /* this bio only has a single bvec */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000257
258 bio_advance_iter(bio, &iter, iter.bi_size);
259
260 if (!iter.bi_bvec_done)
261 idx = iter.bi_idx - 1;
262 else /* in the middle of bvec */
263 idx = iter.bi_idx;
264
265 *bv = bio->bi_io_vec[idx];
266
267 /*
268 * iter.bi_bvec_done records actual length of the last bvec
269 * if this bio ends in the middle of one io vector
270 */
271 if (iter.bi_bvec_done)
272 bv->bv_len = iter.bi_bvec_done;
273}
274
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000275static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
276{
277 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
278 return bio->bi_io_vec;
279}
280
281static inline struct page *bio_first_page_all(struct bio *bio)
282{
283 return bio_first_bvec_all(bio)->bv_page;
284}
285
286static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
287{
288 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
289 return &bio->bi_io_vec[bio->bi_vcnt - 1];
290}
291
292enum bip_flags {
293 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
294 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
295 BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
296 BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
297 BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
298};
299
300/*
301 * bio integrity payload
302 */
303struct bio_integrity_payload {
304 struct bio *bip_bio; /* parent bio */
305
306 struct bvec_iter bip_iter;
307
308 unsigned short bip_slab; /* slab the bip came from */
309 unsigned short bip_vcnt; /* # of integrity bio_vecs */
310 unsigned short bip_max_vcnt; /* integrity bio_vec slots */
311 unsigned short bip_flags; /* control flags */
312
David Brazdil0f672f62019-12-10 10:32:29 +0000313 struct bvec_iter bio_iter; /* for rewinding parent bio */
314
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000315 struct work_struct bip_work; /* I/O completion */
316
317 struct bio_vec *bip_vec;
318 struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
319};
320
321#if defined(CONFIG_BLK_DEV_INTEGRITY)
322
323static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
324{
325 if (bio->bi_opf & REQ_INTEGRITY)
326 return bio->bi_integrity;
327
328 return NULL;
329}
330
331static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
332{
333 struct bio_integrity_payload *bip = bio_integrity(bio);
334
335 if (bip)
336 return bip->bip_flags & flag;
337
338 return false;
339}
340
341static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
342{
343 return bip->bip_iter.bi_sector;
344}
345
346static inline void bip_set_seed(struct bio_integrity_payload *bip,
347 sector_t seed)
348{
349 bip->bip_iter.bi_sector = seed;
350}
351
352#endif /* CONFIG_BLK_DEV_INTEGRITY */
353
354extern void bio_trim(struct bio *bio, int offset, int size);
355extern struct bio *bio_split(struct bio *bio, int sectors,
356 gfp_t gfp, struct bio_set *bs);
357
358/**
359 * bio_next_split - get next @sectors from a bio, splitting if necessary
360 * @bio: bio to split
361 * @sectors: number of sectors to split from the front of @bio
362 * @gfp: gfp mask
363 * @bs: bio set to allocate from
364 *
365 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
366 * than @sectors, returns the original bio unchanged.
367 */
368static inline struct bio *bio_next_split(struct bio *bio, int sectors,
369 gfp_t gfp, struct bio_set *bs)
370{
371 if (sectors >= bio_sectors(bio))
372 return bio;
373
374 return bio_split(bio, sectors, gfp, bs);
375}
376
377enum {
378 BIOSET_NEED_BVECS = BIT(0),
379 BIOSET_NEED_RESCUER = BIT(1),
380};
381extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
382extern void bioset_exit(struct bio_set *);
383extern int biovec_init_pool(mempool_t *pool, int pool_entries);
384extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
385
386extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
387extern void bio_put(struct bio *);
388
389extern void __bio_clone_fast(struct bio *, struct bio *);
390extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
391
392extern struct bio_set fs_bio_set;
393
394static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
395{
396 return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
397}
398
399static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
400{
401 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
402}
403
404extern blk_qc_t submit_bio(struct bio *);
405
406extern void bio_endio(struct bio *);
407
408static inline void bio_io_error(struct bio *bio)
409{
410 bio->bi_status = BLK_STS_IOERR;
411 bio_endio(bio);
412}
413
414static inline void bio_wouldblock_error(struct bio *bio)
415{
416 bio->bi_status = BLK_STS_AGAIN;
417 bio_endio(bio);
418}
419
420struct request_queue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000421
422extern int submit_bio_wait(struct bio *bio);
423extern void bio_advance(struct bio *, unsigned);
424
425extern void bio_init(struct bio *bio, struct bio_vec *table,
426 unsigned short max_vecs);
427extern void bio_uninit(struct bio *);
428extern void bio_reset(struct bio *);
429void bio_chain(struct bio *, struct bio *);
430
431extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
432extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
433 unsigned int, unsigned int);
434bool __bio_try_merge_page(struct bio *bio, struct page *page,
David Brazdil0f672f62019-12-10 10:32:29 +0000435 unsigned int len, unsigned int off, bool *same_page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000436void __bio_add_page(struct bio *bio, struct page *page,
437 unsigned int len, unsigned int off);
438int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
David Brazdil0f672f62019-12-10 10:32:29 +0000439void bio_release_pages(struct bio *bio, bool mark_dirty);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000440struct rq_map_data;
441extern struct bio *bio_map_user_iov(struct request_queue *,
442 struct iov_iter *, gfp_t);
443extern void bio_unmap_user(struct bio *);
444extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
445 gfp_t);
446extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
447 gfp_t, int);
448extern void bio_set_pages_dirty(struct bio *bio);
449extern void bio_check_pages_dirty(struct bio *bio);
450
451void generic_start_io_acct(struct request_queue *q, int op,
452 unsigned long sectors, struct hd_struct *part);
453void generic_end_io_acct(struct request_queue *q, int op,
454 struct hd_struct *part,
455 unsigned long start_time);
456
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000457extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
458 struct bio *src, struct bvec_iter *src_iter);
459extern void bio_copy_data(struct bio *dst, struct bio *src);
460extern void bio_list_copy_data(struct bio *dst, struct bio *src);
461extern void bio_free_pages(struct bio *bio);
462
463extern struct bio *bio_copy_user_iov(struct request_queue *,
464 struct rq_map_data *,
465 struct iov_iter *,
466 gfp_t);
467extern int bio_uncopy_user(struct bio *);
468void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
Olivier Deprez0e641232021-09-23 10:07:05 +0200469void bio_truncate(struct bio *bio, unsigned new_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000470
471static inline void zero_fill_bio(struct bio *bio)
472{
473 zero_fill_bio_iter(bio, bio->bi_iter);
474}
475
476extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
477extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
478extern unsigned int bvec_nr_vecs(unsigned short idx);
479extern const char *bio_devname(struct bio *bio, char *buffer);
480
481#define bio_set_dev(bio, bdev) \
482do { \
483 if ((bio)->bi_disk != (bdev)->bd_disk) \
484 bio_clear_flag(bio, BIO_THROTTLED);\
485 (bio)->bi_disk = (bdev)->bd_disk; \
486 (bio)->bi_partno = (bdev)->bd_partno; \
David Brazdil0f672f62019-12-10 10:32:29 +0000487 bio_associate_blkg(bio); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000488} while (0)
489
490#define bio_copy_dev(dst, src) \
491do { \
492 (dst)->bi_disk = (src)->bi_disk; \
493 (dst)->bi_partno = (src)->bi_partno; \
David Brazdil0f672f62019-12-10 10:32:29 +0000494 bio_clone_blkg_association(dst, src); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000495} while (0)
496
497#define bio_dev(bio) \
498 disk_devt((bio)->bi_disk)
499
500#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
David Brazdil0f672f62019-12-10 10:32:29 +0000501void bio_associate_blkg_from_page(struct bio *bio, struct page *page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000502#else
David Brazdil0f672f62019-12-10 10:32:29 +0000503static inline void bio_associate_blkg_from_page(struct bio *bio,
504 struct page *page) { }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000505#endif
506
507#ifdef CONFIG_BLK_CGROUP
David Brazdil0f672f62019-12-10 10:32:29 +0000508void bio_disassociate_blkg(struct bio *bio);
509void bio_associate_blkg(struct bio *bio);
510void bio_associate_blkg_from_css(struct bio *bio,
511 struct cgroup_subsys_state *css);
512void bio_clone_blkg_association(struct bio *dst, struct bio *src);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000513#else /* CONFIG_BLK_CGROUP */
David Brazdil0f672f62019-12-10 10:32:29 +0000514static inline void bio_disassociate_blkg(struct bio *bio) { }
515static inline void bio_associate_blkg(struct bio *bio) { }
516static inline void bio_associate_blkg_from_css(struct bio *bio,
517 struct cgroup_subsys_state *css)
518{ }
519static inline void bio_clone_blkg_association(struct bio *dst,
520 struct bio *src) { }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000521#endif /* CONFIG_BLK_CGROUP */
522
523#ifdef CONFIG_HIGHMEM
524/*
525 * remember never ever reenable interrupts between a bvec_kmap_irq and
526 * bvec_kunmap_irq!
527 */
528static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
529{
530 unsigned long addr;
531
532 /*
533 * might not be a highmem page, but the preempt/irq count
534 * balancing is a lot nicer this way
535 */
536 local_irq_save(*flags);
537 addr = (unsigned long) kmap_atomic(bvec->bv_page);
538
539 BUG_ON(addr & ~PAGE_MASK);
540
541 return (char *) addr + bvec->bv_offset;
542}
543
544static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
545{
546 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
547
548 kunmap_atomic((void *) ptr);
549 local_irq_restore(*flags);
550}
551
552#else
553static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
554{
555 return page_address(bvec->bv_page) + bvec->bv_offset;
556}
557
558static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
559{
560 *flags = 0;
561}
562#endif
563
564/*
565 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
566 *
567 * A bio_list anchors a singly-linked list of bios chained through the bi_next
568 * member of the bio. The bio_list also caches the last list member to allow
569 * fast access to the tail.
570 */
571struct bio_list {
572 struct bio *head;
573 struct bio *tail;
574};
575
576static inline int bio_list_empty(const struct bio_list *bl)
577{
578 return bl->head == NULL;
579}
580
581static inline void bio_list_init(struct bio_list *bl)
582{
583 bl->head = bl->tail = NULL;
584}
585
586#define BIO_EMPTY_LIST { NULL, NULL }
587
588#define bio_list_for_each(bio, bl) \
589 for (bio = (bl)->head; bio; bio = bio->bi_next)
590
591static inline unsigned bio_list_size(const struct bio_list *bl)
592{
593 unsigned sz = 0;
594 struct bio *bio;
595
596 bio_list_for_each(bio, bl)
597 sz++;
598
599 return sz;
600}
601
602static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
603{
604 bio->bi_next = NULL;
605
606 if (bl->tail)
607 bl->tail->bi_next = bio;
608 else
609 bl->head = bio;
610
611 bl->tail = bio;
612}
613
614static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
615{
616 bio->bi_next = bl->head;
617
618 bl->head = bio;
619
620 if (!bl->tail)
621 bl->tail = bio;
622}
623
624static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
625{
626 if (!bl2->head)
627 return;
628
629 if (bl->tail)
630 bl->tail->bi_next = bl2->head;
631 else
632 bl->head = bl2->head;
633
634 bl->tail = bl2->tail;
635}
636
637static inline void bio_list_merge_head(struct bio_list *bl,
638 struct bio_list *bl2)
639{
640 if (!bl2->head)
641 return;
642
643 if (bl->head)
644 bl2->tail->bi_next = bl->head;
645 else
646 bl->tail = bl2->tail;
647
648 bl->head = bl2->head;
649}
650
651static inline struct bio *bio_list_peek(struct bio_list *bl)
652{
653 return bl->head;
654}
655
656static inline struct bio *bio_list_pop(struct bio_list *bl)
657{
658 struct bio *bio = bl->head;
659
660 if (bio) {
661 bl->head = bl->head->bi_next;
662 if (!bl->head)
663 bl->tail = NULL;
664
665 bio->bi_next = NULL;
666 }
667
668 return bio;
669}
670
671static inline struct bio *bio_list_get(struct bio_list *bl)
672{
673 struct bio *bio = bl->head;
674
675 bl->head = bl->tail = NULL;
676
677 return bio;
678}
679
680/*
681 * Increment chain count for the bio. Make sure the CHAIN flag update
682 * is visible before the raised count.
683 */
684static inline void bio_inc_remaining(struct bio *bio)
685{
686 bio_set_flag(bio, BIO_CHAIN);
687 smp_mb__before_atomic();
688 atomic_inc(&bio->__bi_remaining);
689}
690
691/*
692 * bio_set is used to allow other portions of the IO system to
693 * allocate their own private memory pools for bio and iovec structures.
694 * These memory pools in turn all allocate from the bio_slab
695 * and the bvec_slabs[].
696 */
697#define BIO_POOL_SIZE 2
698
699struct bio_set {
700 struct kmem_cache *bio_slab;
701 unsigned int front_pad;
702
703 mempool_t bio_pool;
704 mempool_t bvec_pool;
705#if defined(CONFIG_BLK_DEV_INTEGRITY)
706 mempool_t bio_integrity_pool;
707 mempool_t bvec_integrity_pool;
708#endif
709
710 /*
711 * Deadlock avoidance for stacking block drivers: see comments in
712 * bio_alloc_bioset() for details
713 */
714 spinlock_t rescue_lock;
715 struct bio_list rescue_list;
716 struct work_struct rescue_work;
717 struct workqueue_struct *rescue_workqueue;
718};
719
720struct biovec_slab {
721 int nr_vecs;
722 char *name;
723 struct kmem_cache *slab;
724};
725
726static inline bool bioset_initialized(struct bio_set *bs)
727{
728 return bs->bio_slab != NULL;
729}
730
731/*
732 * a small number of entries is fine, not going to be performance critical.
733 * basically we just need to survive
734 */
735#define BIO_SPLIT_ENTRIES 2
736
737#if defined(CONFIG_BLK_DEV_INTEGRITY)
738
739#define bip_for_each_vec(bvl, bip, iter) \
740 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
741
742#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
743 for_each_bio(_bio) \
744 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
745
746extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
747extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
748extern bool bio_integrity_prep(struct bio *);
749extern void bio_integrity_advance(struct bio *, unsigned int);
750extern void bio_integrity_trim(struct bio *);
751extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
752extern int bioset_integrity_create(struct bio_set *, int);
753extern void bioset_integrity_free(struct bio_set *);
754extern void bio_integrity_init(void);
755
756#else /* CONFIG_BLK_DEV_INTEGRITY */
757
758static inline void *bio_integrity(struct bio *bio)
759{
760 return NULL;
761}
762
763static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
764{
765 return 0;
766}
767
768static inline void bioset_integrity_free (struct bio_set *bs)
769{
770 return;
771}
772
773static inline bool bio_integrity_prep(struct bio *bio)
774{
775 return true;
776}
777
778static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
779 gfp_t gfp_mask)
780{
781 return 0;
782}
783
784static inline void bio_integrity_advance(struct bio *bio,
785 unsigned int bytes_done)
786{
787 return;
788}
789
790static inline void bio_integrity_trim(struct bio *bio)
791{
792 return;
793}
794
795static inline void bio_integrity_init(void)
796{
797 return;
798}
799
800static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
801{
802 return false;
803}
804
805static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
806 unsigned int nr)
807{
808 return ERR_PTR(-EINVAL);
809}
810
811static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
812 unsigned int len, unsigned int offset)
813{
814 return 0;
815}
816
817#endif /* CONFIG_BLK_DEV_INTEGRITY */
818
David Brazdil0f672f62019-12-10 10:32:29 +0000819/*
820 * Mark a bio as polled. Note that for async polled IO, the caller must
821 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
822 * We cannot block waiting for requests on polled IO, as those completions
823 * must be found by the caller. This is different than IRQ driven IO, where
824 * it's safe to wait for IO to complete.
825 */
826static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
827{
828 bio->bi_opf |= REQ_HIPRI;
829 if (!is_sync_kiocb(kiocb))
830 bio->bi_opf |= REQ_NOWAIT;
831}
832
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000833#endif /* CONFIG_BLOCK */
834#endif /* __LINUX_BIO_H */