blob: 0e6e84db06f6747cfd825aea1d2a7cb7d040398a [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BLK_CGROUP_H
3#define _BLK_CGROUP_H
4/*
5 * Common Block IO controller cgroup interface
6 *
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 *
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
12 *
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
15 */
16
17#include <linux/cgroup.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020018#include <linux/percpu.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000019#include <linux/percpu_counter.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020020#include <linux/u64_stats_sync.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021#include <linux/seq_file.h>
22#include <linux/radix-tree.h>
23#include <linux/blkdev.h>
24#include <linux/atomic.h>
25#include <linux/kthread.h>
David Brazdil0f672f62019-12-10 10:32:29 +000026#include <linux/fs.h>
Olivier Deprez92d4c212022-12-06 15:05:30 +010027#include <linux/blk-mq.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028
29/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
30#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
31
32/* Max limits for throttle policy */
33#define THROTL_IOPS_MAX UINT_MAX
34
35#ifdef CONFIG_BLK_CGROUP
36
Olivier Deprez157378f2022-04-04 15:47:50 +020037enum blkg_iostat_type {
38 BLKG_IOSTAT_READ,
39 BLKG_IOSTAT_WRITE,
40 BLKG_IOSTAT_DISCARD,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000041
Olivier Deprez157378f2022-04-04 15:47:50 +020042 BLKG_IOSTAT_NR,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000043};
44
45struct blkcg_gq;
46
47struct blkcg {
48 struct cgroup_subsys_state css;
49 spinlock_t lock;
Olivier Deprez157378f2022-04-04 15:47:50 +020050 refcount_t online_pin;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051
52 struct radix_tree_root blkg_tree;
53 struct blkcg_gq __rcu *blkg_hint;
54 struct hlist_head blkg_list;
55
56 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
57
58 struct list_head all_blkcgs_node;
59#ifdef CONFIG_CGROUP_WRITEBACK
60 struct list_head cgwb_list;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000061#endif
62};
63
Olivier Deprez157378f2022-04-04 15:47:50 +020064struct blkg_iostat {
65 u64 bytes[BLKG_IOSTAT_NR];
66 u64 ios[BLKG_IOSTAT_NR];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000067};
68
Olivier Deprez157378f2022-04-04 15:47:50 +020069struct blkg_iostat_set {
70 struct u64_stats_sync sync;
71 struct blkg_iostat cur;
72 struct blkg_iostat last;
David Brazdil0f672f62019-12-10 10:32:29 +000073};
74
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000075/*
76 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
77 * request_queue (q). This is used by blkcg policies which need to track
78 * information per blkcg - q pair.
79 *
80 * There can be multiple active blkcg policies and each blkg:policy pair is
81 * represented by a blkg_policy_data which is allocated and freed by each
82 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
83 * area by allocating larger data structure which embeds blkg_policy_data
84 * at the beginning.
85 */
86struct blkg_policy_data {
87 /* the blkg and policy id this per-policy data belongs to */
88 struct blkcg_gq *blkg;
89 int plid;
90};
91
92/*
93 * Policies that need to keep per-blkcg data which is independent from any
94 * request_queue associated to it should implement cpd_alloc/free_fn()
95 * methods. A policy can allocate private data area by allocating larger
96 * data structure which embeds blkcg_policy_data at the beginning.
97 * cpd_init() is invoked to let each policy handle per-blkcg data.
98 */
99struct blkcg_policy_data {
100 /* the blkcg and policy id this per-policy data belongs to */
101 struct blkcg *blkcg;
102 int plid;
103};
104
105/* association between a blk cgroup and a request queue */
106struct blkcg_gq {
107 /* Pointer to the associated request_queue */
108 struct request_queue *q;
109 struct list_head q_node;
110 struct hlist_node blkcg_node;
111 struct blkcg *blkcg;
112
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113 /* all non-root blkcg_gq's are guaranteed to have access to parent */
114 struct blkcg_gq *parent;
115
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116 /* reference count */
David Brazdil0f672f62019-12-10 10:32:29 +0000117 struct percpu_ref refcnt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118
119 /* is this blkg online? protected by both blkcg and q locks */
120 bool online;
121
Olivier Deprez157378f2022-04-04 15:47:50 +0200122 struct blkg_iostat_set __percpu *iostat_cpu;
123 struct blkg_iostat_set iostat;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000124
125 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
126
David Brazdil0f672f62019-12-10 10:32:29 +0000127 spinlock_t async_bio_lock;
128 struct bio_list async_bios;
129 struct work_struct async_bio_work;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130
131 atomic_t use_delay;
132 atomic64_t delay_nsec;
133 atomic64_t delay_start;
134 u64 last_delay;
135 int last_use;
David Brazdil0f672f62019-12-10 10:32:29 +0000136
137 struct rcu_head rcu_head;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000138};
139
140typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
141typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
142typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
143typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
David Brazdil0f672f62019-12-10 10:32:29 +0000144typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
145 struct request_queue *q, struct blkcg *blkcg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
147typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
148typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
149typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
150typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
151typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
152 size_t size);
153
154struct blkcg_policy {
155 int plid;
156 /* cgroup files for the policy */
157 struct cftype *dfl_cftypes;
158 struct cftype *legacy_cftypes;
159
160 /* operations */
161 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
162 blkcg_pol_init_cpd_fn *cpd_init_fn;
163 blkcg_pol_free_cpd_fn *cpd_free_fn;
164 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
165
166 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
167 blkcg_pol_init_pd_fn *pd_init_fn;
168 blkcg_pol_online_pd_fn *pd_online_fn;
169 blkcg_pol_offline_pd_fn *pd_offline_fn;
170 blkcg_pol_free_pd_fn *pd_free_fn;
171 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
172 blkcg_pol_stat_pd_fn *pd_stat_fn;
173};
174
175extern struct blkcg blkcg_root;
176extern struct cgroup_subsys_state * const blkcg_root_css;
David Brazdil0f672f62019-12-10 10:32:29 +0000177extern bool blkcg_debug_stats;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178
179struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
180 struct request_queue *q, bool update_hint);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000181int blkcg_init_queue(struct request_queue *q);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182void blkcg_exit_queue(struct request_queue *q);
183
184/* Blkio controller policy registration */
185int blkcg_policy_register(struct blkcg_policy *pol);
186void blkcg_policy_unregister(struct blkcg_policy *pol);
187int blkcg_activate_policy(struct request_queue *q,
188 const struct blkcg_policy *pol);
189void blkcg_deactivate_policy(struct request_queue *q,
190 const struct blkcg_policy *pol);
191
192const char *blkg_dev_name(struct blkcg_gq *blkg);
193void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
194 u64 (*prfill)(struct seq_file *,
195 struct blkg_policy_data *, int),
196 const struct blkcg_policy *pol, int data,
197 bool show_total);
198u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000199
200struct blkg_conf_ctx {
201 struct gendisk *disk;
202 struct blkcg_gq *blkg;
203 char *body;
204};
205
David Brazdil0f672f62019-12-10 10:32:29 +0000206struct gendisk *blkcg_conf_get_disk(char **inputp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000207int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
208 char *input, struct blkg_conf_ctx *ctx);
209void blkg_conf_finish(struct blkg_conf_ctx *ctx);
210
David Brazdil0f672f62019-12-10 10:32:29 +0000211/**
212 * blkcg_css - find the current css
213 *
214 * Find the css associated with either the kthread or the current task.
215 * This may return a dying css, so it is up to the caller to use tryget logic
216 * to confirm it is alive and well.
217 */
218static inline struct cgroup_subsys_state *blkcg_css(void)
219{
220 struct cgroup_subsys_state *css;
221
222 css = kthread_blkcg();
223 if (css)
224 return css;
225 return task_css(current, io_cgrp_id);
226}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000227
228static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
229{
230 return css ? container_of(css, struct blkcg, css) : NULL;
231}
232
David Brazdil0f672f62019-12-10 10:32:29 +0000233/**
234 * __bio_blkcg - internal, inconsistent version to get blkcg
235 *
236 * DO NOT USE.
237 * This function is inconsistent and consequently is dangerous to use. The
238 * first part of the function returns a blkcg where a reference is owned by the
239 * bio. This means it does not need to be rcu protected as it cannot go away
240 * with the bio owning a reference to it. However, the latter potentially gets
241 * it from task_css(). This can race against task migration and the cgroup
242 * dying. It is also semantically different as it must be called rcu protected
243 * and is susceptible to failure when trying to get a reference to it.
244 * Therefore, it is not ok to assume that *_get() will always succeed on the
245 * blkcg returned here.
246 */
247static inline struct blkcg *__bio_blkcg(struct bio *bio)
248{
249 if (bio && bio->bi_blkg)
250 return bio->bi_blkg->blkcg;
251 return css_to_blkcg(blkcg_css());
252}
253
254/**
255 * bio_blkcg - grab the blkcg associated with a bio
256 * @bio: target bio
257 *
258 * This returns the blkcg associated with a bio, %NULL if not associated.
259 * Callers are expected to either handle %NULL or know association has been
260 * done prior to calling this.
261 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000262static inline struct blkcg *bio_blkcg(struct bio *bio)
263{
David Brazdil0f672f62019-12-10 10:32:29 +0000264 if (bio && bio->bi_blkg)
265 return bio->bi_blkg->blkcg;
266 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267}
268
269static inline bool blk_cgroup_congested(void)
270{
271 struct cgroup_subsys_state *css;
272 bool ret = false;
273
274 rcu_read_lock();
275 css = kthread_blkcg();
276 if (!css)
277 css = task_css(current, io_cgrp_id);
278 while (css) {
279 if (atomic_read(&css->cgroup->congestion_count)) {
280 ret = true;
281 break;
282 }
283 css = css->parent;
284 }
285 rcu_read_unlock();
286 return ret;
287}
288
289/**
290 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
291 * @return: true if this bio needs to be submitted with the root blkg context.
292 *
293 * In order to avoid priority inversions we sometimes need to issue a bio as if
294 * it were attached to the root blkg, and then backcharge to the actual owning
295 * blkg. The idea is we do bio_blkcg() to look up the actual context for the
296 * bio and attach the appropriate blkg to the bio. Then we call this helper and
297 * if it is true run with the root blkg for that queue and then do any
298 * backcharging to the originating cgroup once the io is complete.
299 */
300static inline bool bio_issue_as_root_blkg(struct bio *bio)
301{
302 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
303}
304
305/**
306 * blkcg_parent - get the parent of a blkcg
307 * @blkcg: blkcg of interest
308 *
309 * Return the parent blkcg of @blkcg. Can be called anytime.
310 */
311static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
312{
313 return css_to_blkcg(blkcg->css.parent);
314}
315
316/**
317 * __blkg_lookup - internal version of blkg_lookup()
318 * @blkcg: blkcg of interest
319 * @q: request_queue of interest
320 * @update_hint: whether to update lookup hint with the result or not
321 *
322 * This is internal version and shouldn't be used by policy
323 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
324 * @q's bypass state. If @update_hint is %true, the caller should be
325 * holding @q->queue_lock and lookup hint is updated on success.
326 */
327static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
328 struct request_queue *q,
329 bool update_hint)
330{
331 struct blkcg_gq *blkg;
332
333 if (blkcg == &blkcg_root)
334 return q->root_blkg;
335
336 blkg = rcu_dereference(blkcg->blkg_hint);
337 if (blkg && blkg->q == q)
338 return blkg;
339
340 return blkg_lookup_slowpath(blkcg, q, update_hint);
341}
342
343/**
344 * blkg_lookup - lookup blkg for the specified blkcg - q pair
345 * @blkcg: blkcg of interest
346 * @q: request_queue of interest
347 *
348 * Lookup blkg for the @blkcg - @q pair. This function should be called
David Brazdil0f672f62019-12-10 10:32:29 +0000349 * under RCU read lock.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000350 */
351static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
352 struct request_queue *q)
353{
354 WARN_ON_ONCE(!rcu_read_lock_held());
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000355 return __blkg_lookup(blkcg, q, false);
356}
357
358/**
359 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
360 * @q: request_queue of interest
361 *
362 * Lookup blkg for @q at the root level. See also blkg_lookup().
363 */
364static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
365{
366 return q->root_blkg;
367}
368
369/**
370 * blkg_to_pdata - get policy private data
371 * @blkg: blkg of interest
372 * @pol: policy of interest
373 *
374 * Return pointer to private data associated with the @blkg-@pol pair.
375 */
376static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
377 struct blkcg_policy *pol)
378{
379 return blkg ? blkg->pd[pol->plid] : NULL;
380}
381
382static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
383 struct blkcg_policy *pol)
384{
385 return blkcg ? blkcg->cpd[pol->plid] : NULL;
386}
387
388/**
389 * pdata_to_blkg - get blkg associated with policy private data
390 * @pd: policy private data of interest
391 *
392 * @pd is policy private data. Determine the blkg it's associated with.
393 */
394static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
395{
396 return pd ? pd->blkg : NULL;
397}
398
399static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
400{
401 return cpd ? cpd->blkcg : NULL;
402}
403
404extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
405
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000406/**
Olivier Deprez157378f2022-04-04 15:47:50 +0200407 * blkcg_pin_online - pin online state
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000408 * @blkcg: blkcg of interest
409 *
Olivier Deprez157378f2022-04-04 15:47:50 +0200410 * While pinned, a blkcg is kept online. This is primarily used to
411 * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
412 * while an associated cgwb is still active.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000413 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200414static inline void blkcg_pin_online(struct blkcg *blkcg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000415{
Olivier Deprez157378f2022-04-04 15:47:50 +0200416 refcount_inc(&blkcg->online_pin);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000417}
418
419/**
Olivier Deprez157378f2022-04-04 15:47:50 +0200420 * blkcg_unpin_online - unpin online state
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000421 * @blkcg: blkcg of interest
422 *
Olivier Deprez157378f2022-04-04 15:47:50 +0200423 * This is primarily used to impedance-match blkg and cgwb lifetimes so
424 * that blkg doesn't go offline while an associated cgwb is still active.
425 * When this count goes to zero, all active cgwbs have finished so the
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000426 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000427 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200428static inline void blkcg_unpin_online(struct blkcg *blkcg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000429{
Olivier Deprez157378f2022-04-04 15:47:50 +0200430 do {
431 if (!refcount_dec_and_test(&blkcg->online_pin))
432 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000433 blkcg_destroy_blkgs(blkcg);
Olivier Deprez157378f2022-04-04 15:47:50 +0200434 blkcg = blkcg_parent(blkcg);
435 } while (blkcg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000436}
437
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000438/**
439 * blkg_path - format cgroup path of blkg
440 * @blkg: blkg of interest
441 * @buf: target buffer
442 * @buflen: target buffer length
443 *
444 * Format the path of the cgroup of @blkg into @buf.
445 */
446static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
447{
448 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
449}
450
451/**
452 * blkg_get - get a blkg reference
453 * @blkg: blkg to get
454 *
455 * The caller should be holding an existing reference.
456 */
457static inline void blkg_get(struct blkcg_gq *blkg)
458{
David Brazdil0f672f62019-12-10 10:32:29 +0000459 percpu_ref_get(&blkg->refcnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000460}
461
462/**
David Brazdil0f672f62019-12-10 10:32:29 +0000463 * blkg_tryget - try and get a blkg reference
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000464 * @blkg: blkg to get
465 *
466 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
467 * of freeing this blkg, so we can only use it if the refcnt is not zero.
468 */
David Brazdil0f672f62019-12-10 10:32:29 +0000469static inline bool blkg_tryget(struct blkcg_gq *blkg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000470{
David Brazdil0f672f62019-12-10 10:32:29 +0000471 return blkg && percpu_ref_tryget(&blkg->refcnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000472}
473
David Brazdil0f672f62019-12-10 10:32:29 +0000474/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000475 * blkg_put - put a blkg reference
476 * @blkg: blkg to put
477 */
478static inline void blkg_put(struct blkcg_gq *blkg)
479{
David Brazdil0f672f62019-12-10 10:32:29 +0000480 percpu_ref_put(&blkg->refcnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000481}
482
483/**
484 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
485 * @d_blkg: loop cursor pointing to the current descendant
486 * @pos_css: used for iteration
487 * @p_blkg: target blkg to walk descendants of
488 *
489 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
490 * read locked. If called under either blkcg or queue lock, the iteration
491 * is guaranteed to include all and only online blkgs. The caller may
492 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
493 * @p_blkg is included in the iteration and the first node to be visited.
494 */
495#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
496 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
497 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
498 (p_blkg)->q, false)))
499
500/**
501 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
502 * @d_blkg: loop cursor pointing to the current descendant
503 * @pos_css: used for iteration
504 * @p_blkg: target blkg to walk descendants of
505 *
506 * Similar to blkg_for_each_descendant_pre() but performs post-order
507 * traversal instead. Synchronization rules are the same. @p_blkg is
508 * included in the iteration and the last node to be visited.
509 */
510#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
511 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
512 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
513 (p_blkg)->q, false)))
514
David Brazdil0f672f62019-12-10 10:32:29 +0000515bool __blkcg_punt_bio_submit(struct bio *bio);
516
517static inline bool blkcg_punt_bio_submit(struct bio *bio)
518{
519 if (bio->bi_opf & REQ_CGROUP_PUNT)
520 return __blkcg_punt_bio_submit(bio);
521 else
522 return false;
523}
524
525static inline void blkcg_bio_issue_init(struct bio *bio)
526{
527 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
528}
529
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000530static inline void blkcg_use_delay(struct blkcg_gq *blkg)
531{
Olivier Deprez157378f2022-04-04 15:47:50 +0200532 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
533 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000534 if (atomic_add_return(1, &blkg->use_delay) == 1)
535 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
536}
537
538static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
539{
540 int old = atomic_read(&blkg->use_delay);
541
Olivier Deprez157378f2022-04-04 15:47:50 +0200542 if (WARN_ON_ONCE(old < 0))
543 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000544 if (old == 0)
545 return 0;
546
547 /*
548 * We do this song and dance because we can race with somebody else
549 * adding or removing delay. If we just did an atomic_dec we'd end up
550 * negative and we'd already be in trouble. We need to subtract 1 and
551 * then check to see if we were the last delay so we can drop the
552 * congestion count on the cgroup.
553 */
554 while (old) {
555 int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
556 if (cur == old)
557 break;
558 old = cur;
559 }
560
561 if (old == 0)
562 return 0;
563 if (old == 1)
564 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
565 return 1;
566}
567
Olivier Deprez157378f2022-04-04 15:47:50 +0200568/**
569 * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
570 * @blkg: target blkg
571 * @delay: delay duration in nsecs
572 *
573 * When enabled with this function, the delay is not decayed and must be
574 * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
575 * blkcg_[un]use_delay() and blkcg_add_delay() usages.
576 */
577static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
578{
579 int old = atomic_read(&blkg->use_delay);
580
581 /* We only want 1 person setting the congestion count for this blkg. */
582 if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old)
583 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
584
585 atomic64_set(&blkg->delay_nsec, delay);
586}
587
588/**
589 * blkcg_clear_delay - Disable allocator delay mechanism
590 * @blkg: target blkg
591 *
592 * Disable use_delay mechanism. See blkcg_set_delay().
593 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000594static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
595{
596 int old = atomic_read(&blkg->use_delay);
Olivier Deprez157378f2022-04-04 15:47:50 +0200597
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000598 /* We only want 1 person clearing the congestion count for this blkg. */
Olivier Deprez157378f2022-04-04 15:47:50 +0200599 if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old)
600 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000601}
602
Olivier Deprez92d4c212022-12-06 15:05:30 +0100603/**
604 * blk_cgroup_mergeable - Determine whether to allow or disallow merges
605 * @rq: request to merge into
606 * @bio: bio to merge
607 *
608 * @bio and @rq should belong to the same cgroup and their issue_as_root should
609 * match. The latter is necessary as we don't want to throttle e.g. a metadata
610 * update because it happens to be next to a regular IO.
611 */
612static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
613{
614 return rq->bio->bi_blkg == bio->bi_blkg &&
615 bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
616}
617
Olivier Deprez157378f2022-04-04 15:47:50 +0200618void blk_cgroup_bio_start(struct bio *bio);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000619void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
620void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
621void blkcg_maybe_throttle_current(void);
622#else /* CONFIG_BLK_CGROUP */
623
624struct blkcg {
625};
626
627struct blkg_policy_data {
628};
629
630struct blkcg_policy_data {
631};
632
633struct blkcg_gq {
634};
635
636struct blkcg_policy {
637};
638
639#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
640
641static inline void blkcg_maybe_throttle_current(void) { }
642static inline bool blk_cgroup_congested(void) { return false; }
643
644#ifdef CONFIG_BLOCK
645
646static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
647
648static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
649static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
650{ return NULL; }
651static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000652static inline void blkcg_exit_queue(struct request_queue *q) { }
653static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
654static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
655static inline int blkcg_activate_policy(struct request_queue *q,
656 const struct blkcg_policy *pol) { return 0; }
657static inline void blkcg_deactivate_policy(struct request_queue *q,
658 const struct blkcg_policy *pol) { }
659
David Brazdil0f672f62019-12-10 10:32:29 +0000660static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000661static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
662
663static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
664 struct blkcg_policy *pol) { return NULL; }
665static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
666static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
667static inline void blkg_get(struct blkcg_gq *blkg) { }
668static inline void blkg_put(struct blkcg_gq *blkg) { }
669
David Brazdil0f672f62019-12-10 10:32:29 +0000670static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
671static inline void blkcg_bio_issue_init(struct bio *bio) { }
Olivier Deprez157378f2022-04-04 15:47:50 +0200672static inline void blk_cgroup_bio_start(struct bio *bio) { }
Olivier Deprez92d4c212022-12-06 15:05:30 +0100673static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000674
675#define blk_queue_for_each_rl(rl, q) \
676 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
677
678#endif /* CONFIG_BLOCK */
679#endif /* CONFIG_BLK_CGROUP */
680#endif /* _BLK_CGROUP_H */